From 4842e5441b2f6ccafdb487517091ec6db1d95765 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Mon, 23 Oct 2023 10:01:00 +0200 Subject: [PATCH 01/14] Fix warnings detected during build on ci with Visual 2019 --- modules/core/src/image/vpGaussianFilter.cpp | 12 +- modules/core/src/image/vpImageCircle.cpp | 85 ++--- modules/core/src/image/vpImageFilter.cpp | 8 +- .../test/tools/geometry/testImageCircle.cpp | 308 +++++++++--------- 4 files changed, 208 insertions(+), 205 deletions(-) diff --git a/modules/core/src/image/vpGaussianFilter.cpp b/modules/core/src/image/vpGaussianFilter.cpp index 0f77e22807..4e116085ab 100644 --- a/modules/core/src/image/vpGaussianFilter.cpp +++ b/modules/core/src/image/vpGaussianFilter.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Gaussian filter class - * -*****************************************************************************/ + */ #include #include @@ -88,7 +86,8 @@ class vpGaussianFilter::Impl if (!m_deinterleave) { SimdGaussianBlurRun(m_funcPtrRGBa, reinterpret_cast(I.bitmap), I.getWidth() * 4, reinterpret_cast(I_blur.bitmap), I_blur.getWidth() * 4); - } else { + } + else { vpImageConvert::split(I, &m_red, &m_green, &m_blue); SimdGaussianBlurRun(m_funcPtrGray, m_red.bitmap, m_red.getWidth(), m_redBlurred.bitmap, m_redBlurred.getWidth()); SimdGaussianBlurRun(m_funcPtrGray, m_green.bitmap, m_green.getWidth(), m_greenBlurred.bitmap, @@ -125,8 +124,7 @@ class vpGaussianFilter::Impl */ vpGaussianFilter::vpGaussianFilter(unsigned int width, unsigned int height, float sigma, bool deinterleave) : m_impl(new Impl(width, height, sigma, deinterleave)) -{ -} +{ } vpGaussianFilter::~vpGaussianFilter() { delete m_impl; } diff --git a/modules/core/src/image/vpImageCircle.cpp b/modules/core/src/image/vpImageCircle.cpp index b8e49af9bd..b0508f9b37 100644 --- a/modules/core/src/image/vpImageCircle.cpp +++ b/modules/core/src/image/vpImageCircle.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Image circle, i.e. circle in the image space. - * -*****************************************************************************/ + */ #include @@ -53,30 +51,28 @@ vpImageCircle::vpImageCircle(const vpImagePoint ¢er, const float &radius) vpImageCircle::vpImageCircle(const cv::Vec3f &vec) : m_center(vec[1], vec[0]) , m_radius(vec[2]) -{ - -} +{ } #endif vpImageCircle::~vpImageCircle() -{ - -} +{ } /*! - * \brief Express \b theta between - Pi and Pi . + * \brief Express \b theta between \f$-\pi\f$ and \f$\pi\f$. * - * \param[in] theta The input angle we want to ensure it is in the interval [-Pi ; Pi] - * \return float The input angle in the interval [-Pi ; Pi] + * \param[in] theta The input angle we want to ensure it is in the interval \f$[-\pi ; \pi]\f$. + * \return The input angle in the interval \f$[-\pi ; \pi]\f$. */ float getAngleBetweenMinPiAndPi(const float &theta) { float theta1 = theta; - if (theta1 > M_PI) { - theta1 -= 2.0 * M_PI; + float pi = static_cast(M_PI); + + if (theta1 > pi) { + theta1 -= 2.0f * pi; } - else if (theta1 < -M_PI) { - theta1 += 2.0 * M_PI; + else if (theta1 < -pi) { + theta1 += 2.0f * pi; } return theta1; } @@ -122,7 +118,8 @@ void computeIntersectionsRightBorderOnly(const float &u_c, const float &umax_roi float theta2 = -1.f * theta1; float theta_min = std::min(theta1, theta2); float theta_max = std::max(theta1, theta2); - delta_theta = 2.f * M_PI - (theta_max - theta_min); + float pi = static_cast(M_PI); + delta_theta = 2.f * pi - (theta_max - theta_min); } /*! @@ -140,24 +137,25 @@ void computeIntersectionsTopBorderOnly(const float &v_c, const float &vmin_roi, // v = vc - r sin(theta) because the v-axis goes down // theta = asin((vc - v)/r) float theta1 = std::asin((v_c - vmin_roi) / radius); + float pi = static_cast(M_PI); theta1 = getAngleBetweenMinPiAndPi(theta1); float theta2 = 0.f; if (theta1 >= 0.f) { - theta2 = M_PI - theta1; + theta2 = pi - theta1; } else { - theta2 = -theta1 - M_PI; + theta2 = -theta1 - pi; } float theta_min = std::min(theta1, theta2); float theta_max = std::max(theta1, theta2); if (std::abs(theta_max - theta_min) * radius < 1.f) { // Between the maximum and minimum theta there is less than 1 pixel of difference // It meens that the full circle is visible - delta_theta = 2.f * M_PI; + delta_theta = 2.f * pi; } else if (theta1 > 0.f) { - delta_theta = 2.f * M_PI - (theta_max - theta_min); + delta_theta = 2.f * pi - (theta_max - theta_min); } else { delta_theta = theta_max - theta_min; @@ -179,27 +177,28 @@ void computeIntersectionsBottomBorderOnly(const float &v_c, const float &vmax_ro // v = vc - r sin(theta) because the v-axis goes down // theta = asin((vc - v)/r) float theta1 = std::asin((v_c - vmax_roi) / radius); + float pi = static_cast(M_PI); theta1 = getAngleBetweenMinPiAndPi(theta1); float theta2 = 0.f; if (theta1 >= 0.f) { - theta2 = M_PI - theta1; + theta2 = pi - theta1; } else { - theta2 = -theta1 - M_PI; + theta2 = -theta1 - pi; } float theta_min = std::min(theta1, theta2); float theta_max = std::max(theta1, theta2); if (std::abs(theta_max - theta_min) * radius < 1.f) { // Between the maximum and minimum theta there is less than 1 pixel of difference - // It meens that the full circle is visible - delta_theta = 2.f * M_PI; + // It means that the full circle is visible + delta_theta = 2.f * pi; } else if (theta1 > 0.f) { delta_theta = theta_max - theta_min; } else { - delta_theta = 2.f * M_PI - (theta_max - theta_min); + delta_theta = 2.f * pi - (theta_max - theta_min); } } @@ -226,13 +225,14 @@ void computePerpendicularAxesIntersections(const float &u_c, const float &v_c, c // v = vc - r sin(theta) because the v-axis goes down // theta = asin((vc - v)/r) float theta_u_cross = std::asin((v_c - crossing_u)/radius); + float pi = static_cast(M_PI); theta_u_cross = getAngleBetweenMinPiAndPi(theta_u_cross); float theta_u_cross_2 = 0.f; if (theta_u_cross > 0) { - theta_u_cross_2 = M_PI - theta_u_cross; + theta_u_cross_2 = pi - theta_u_cross; } else { - theta_u_cross_2 = -M_PI - theta_u_cross; + theta_u_cross_2 = -pi - theta_u_cross; } // Computing the corresponding u-coordinates at which the u-axis is crossed float u_ucross = u_c + radius * std::cos(theta_u_cross); @@ -352,19 +352,20 @@ void computeIntersectionsTopRight(const float &u_c, const float &v_c, const floa float u_umax = crossing_theta_u_max.second; float v_vmin = crossing_theta_v_min.second; float v_vmax = crossing_theta_v_max.second; + float pi = static_cast(M_PI); if (u_umin <= umax_roi && v_vmin < vmin_roi && u_umax >= umax_roi && v_vmax >= vmin_roi) { // The circle crosses only once each axis and the center is below the top border //Case crossing once delta_theta = theta_v_max - theta_u_min; if (delta_theta < 0) { // The arc cannot be negative - delta_theta += 2.f * M_PI; + delta_theta += 2.f * pi; } } else if (u_umin <= umax_roi && v_vmin >= vmin_roi && u_umax <= umax_roi && v_vmax >= vmin_roi) { // The circle crosses twice each axis //Case crossing twice - delta_theta = 2 * M_PI - ((theta_u_min - theta_u_max)+(theta_v_min - theta_v_max)); + delta_theta = 2.f * pi - ((theta_u_min - theta_u_max)+(theta_v_min - theta_v_max)); } else if (u_umin >= umax_roi && v_vmin >= vmin_roi && u_umax >= umax_roi && v_vmax >= vmin_roi) { // The circle crosses the u-axis outside the roi @@ -458,19 +459,20 @@ void computeIntersectionsBottomRight(const float &u_c, const float &v_c, const f float u_umax = crossing_theta_u_max.second; float v_vmin = crossing_theta_v_min.second; float v_vmax = crossing_theta_v_max.second; + float pi = static_cast(M_PI); if (u_umin <= umax_roi && u_umax > umax_roi && v_vmin <= vmax_roi && v_vmax > vmax_roi) { // The circle crosses only once each axis //Case crossing once delta_theta = theta_u_min - theta_v_min; if (delta_theta < 0) { // An arc length cannot be negative it means that theta_u_max was comprise in the bottom left quadrant of the circle - delta_theta += 2.f * M_PI; + delta_theta += 2.f * pi; } } else if (u_umin <= umax_roi && u_umax <= umax_roi && v_vmin <= vmax_roi && v_vmax <= vmax_roi) { // The circle crosses twice each axis //Case crossing twice - delta_theta = 2.f * M_PI - ((theta_v_min - theta_v_max) + (theta_u_max - theta_u_min)); + delta_theta = 2.f * pi - ((theta_v_min - theta_v_max) + (theta_u_max - theta_u_min)); } else if (u_umin > umax_roi && u_umax > umax_roi && v_vmin <= vmax_roi && v_vmax <= vmax_roi) { // The circle crosses the u-axis outside the roi @@ -586,9 +588,10 @@ void computeIntersectionsTopRightBottom(const float &u_c, const float &v_c, cons float theta_u_max_bottom = crossing_theta_u_max.first; float u_umin_bottom = crossing_theta_u_min.second; float u_umax_bottom = crossing_theta_u_max.second; + float pi = static_cast(M_PI); if (u_umax_top <= umax_roi && u_umax_bottom <= umax_roi && v_vmin >= vmin_roi && v_vmax <= vmax_roi) { // case intersection top + right + bottom twice - delta_theta = 2.f * M_PI - ((theta_u_min_top - theta_u_max_top) + (theta_v_min - theta_v_max) + (theta_u_max_bottom - theta_u_min_bottom)); + delta_theta = 2.f * pi - ((theta_u_min_top - theta_u_max_top) + (theta_v_min - theta_v_max) + (theta_u_max_bottom - theta_u_min_bottom)); } else if (u_umin_top <= umax_roi && u_umax_top > umax_roi && v_vmin <= vmin_roi && u_umin_bottom <= umax_roi && u_umax_bottom > umax_roi && v_vmax >= vmax_roi) { // case intersection top and bottom @@ -628,11 +631,12 @@ void computeIntersectionsTopBottomOnly(const float &u_c, const float &v_c, const float theta_u_cross_top = std::asin((v_c - vmin_roi)/radius); theta_u_cross_top = getAngleBetweenMinPiAndPi(theta_u_cross_top); float theta_u_cross_top_2 = 0.f; + float pi = static_cast(M_PI); if (theta_u_cross_top > 0) { - theta_u_cross_top_2 = M_PI - theta_u_cross_top; + theta_u_cross_top_2 = pi - theta_u_cross_top; } else { - theta_u_cross_top_2 = -M_PI - theta_u_cross_top; + theta_u_cross_top_2 = -pi - theta_u_cross_top; } // Computing the corresponding u-coordinates at which the u-axis is crossed @@ -656,10 +660,10 @@ void computeIntersectionsTopBottomOnly(const float &u_c, const float &v_c, const theta_u_cross_bottom = getAngleBetweenMinPiAndPi(theta_u_cross_bottom); float theta_u_cross_bottom_2 = 0.f; if (theta_u_cross_bottom > 0) { - theta_u_cross_bottom_2 = M_PI - theta_u_cross_bottom; + theta_u_cross_bottom_2 = pi - theta_u_cross_bottom; } else { - theta_u_cross_bottom_2 = -M_PI - theta_u_cross_bottom; + theta_u_cross_bottom_2 = -pi - theta_u_cross_bottom; } // Computing the corresponding u-coordinates at which the u-axis is crossed @@ -679,7 +683,7 @@ void computeIntersectionsTopBottomOnly(const float &u_c, const float &v_c, const // Computing the the length of the angular interval of the circle when it intersects // only with the top and bottom borders of the Region of Interest (RoI) - delta_theta = 2.f * M_PI - ((theta_u_cross_top_min - theta_u_cross_top_max) + (theta_u_cross_bottom_max - theta_u_cross_bottom_min)); + delta_theta = 2.f * pi - ((theta_u_cross_top_min - theta_u_cross_top_max) + (theta_u_cross_bottom_max - theta_u_cross_bottom_min)); } /*! @@ -938,11 +942,12 @@ float vpImageCircle::computeAngularCoverageInRoI(const vpRect &roi) const bool touchBottomBorder = (v_c + radius) >= vmax_roi; bool isHorizontallyOK = (!touchLeftBorder && !touchRightBorder); bool isVerticallyOK = (!touchTopBorder && !touchBottomBorder); + float pi = static_cast(M_PI); if (isHorizontallyOK && isVerticallyOK && roi.isInside(m_center)) { // Easy case // The circle has its center in the image and its radius is not too great // to make it fully contained in the RoI - delta_theta = 2.f * M_PI; + delta_theta = 2.f * pi; } else if (touchBottomBorder && !touchLeftBorder && !touchRightBorder && !touchTopBorder) { // Touches/intersects only the bottom border of the RoI diff --git a/modules/core/src/image/vpImageFilter.cpp b/modules/core/src/image/vpImageFilter.cpp index 63da6cc2e2..f7a0119d16 100644 --- a/modules/core/src/image/vpImageFilter.cpp +++ b/modules/core/src/image/vpImageFilter.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Various image tools, convolution, ... - * -*****************************************************************************/ + */ #include #include @@ -373,7 +371,7 @@ void vpImageFilter::canny(const vpImage &Isrc, vpImage #include @@ -47,11 +45,12 @@ bool compareAngles(const float &actualVal, const float &theoreticalVal) float ensureIsBetweenMinPiAndPi(const float &theta) { float theta1 = theta; - if (theta1 > M_PI) { - theta1 -= 2.0 * M_PI; + float pi = static_cast(M_PI); + if (theta1 > pi) { + theta1 -= 2.0 * pi; } - else if (theta1 < -M_PI) { - theta1 += 2.0 * M_PI; + else if (theta1 < -pi) { + theta1 += 2.0 * pi; } return theta1; } @@ -67,12 +66,15 @@ int main() const float HEIGHT_SWITCHED = WIDTH; // The RoI must be inverted in order to cross left and right axes while crossing only the top axis vpRect switchedRoI(OFFSET, OFFSET, WIDTH_SWITCHED, HEIGHT_SWITCHED); bool hasSucceeded = true; + float pi = static_cast(M_PI); + float pi_2 = static_cast(M_PI_2); + float pi_4 = static_cast(M_PI_4); // Test with no intersections { vpImageCircle circle(vpImagePoint(HEIGHT / 2.f, WIDTH / 2.f), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS; + float theoreticalValue = 2.f * pi * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -94,7 +96,7 @@ int main() vpRect roiSquare(OFFSET, OFFSET, HEIGHT, HEIGHT); vpImageCircle circle(vpImagePoint(OFFSET + HEIGHT / 2.f, OFFSET + HEIGHT / 2.f), HEIGHT / 2.f); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * HEIGHT / 2.f; + float theoreticalValue = 2.f * pi * HEIGHT / 2.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -119,7 +121,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 4.f * M_PI * RADIUS /3.f; + float theoreticalValue = 4.f * pi * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -144,7 +146,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS /3.f; + float theoreticalValue = 2.f * pi * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -169,7 +171,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS; + float theoreticalValue = 2.f * pi * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -194,7 +196,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 4.f * M_PI * RADIUS /3.f; + float theoreticalValue = 4.f * pi * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -219,7 +221,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS /3.f; + float theoreticalValue = 2.f * pi * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -244,7 +246,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS; + float theoreticalValue = 2.f * pi * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -265,12 +267,12 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + RADIUS * sin(theta) - float theta = M_PI / 3.f; + float theta = pi / 3.f; float uc = OFFSET + 100.f; float vc = OFFSET + RADIUS * sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 5.f * M_PI * RADIUS /3.f; + float theoreticalValue = 5.f * pi * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -291,12 +293,12 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + RADIUS * sin(theta) - float theta = -2.f * M_PI/3.f; + float theta = -2.f * pi/3.f; float uc = OFFSET + 100.f; float vc = OFFSET + RADIUS * std::sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = M_PI * RADIUS /3.f; + float theoreticalValue = pi * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -317,12 +319,12 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + RADIUS * sin(theta) - float theta = M_PI_2; + float theta = pi_2; float uc = OFFSET + 100.f; float vc = OFFSET + RADIUS * sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS; + float theoreticalValue = 2.f * pi * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -343,12 +345,12 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + HEIGHT + RADIUS * sin(theta) - float theta = -M_PI / 3.f; + float theta = -pi / 3.f; float uc = OFFSET + 100.f; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 5.f * M_PI * RADIUS /3.f; + float theoreticalValue = 5.f * pi * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -369,12 +371,12 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + HEIGHT + RADIUS * sin(theta) - float theta = M_PI / 3.f; + float theta = pi / 3.f; float uc = OFFSET + 100.f; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = M_PI * RADIUS /3.f; + float theoreticalValue = pi * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -398,7 +400,7 @@ int main() float vc = OFFSET + HEIGHT - RADIUS; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS; + float theoreticalValue = 2.f * pi * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -425,7 +427,7 @@ int main() float vc = OFFSET; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = M_PI_2 * RADIUS; + float theoreticalValue = pi_2 * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -451,13 +453,13 @@ int main() // (4): umin = uc + r cos(theta_v_max) ; v_cross_max = vc - r sin(theta_v_max) >= vmin && <= vmin + height // (3) & (4) => uc = umin - r cos(theta_v_min) = umin - r cos(theta_v_max) <=> theta_v_min = - theta_v_max // (3) & (4) => vc >= vmin + r sin(theta_v_min) && vc >= vmin + r sin (theta_v_max) - float theta_v_min = M_PI / 4.f; + float theta_v_min = pi / 4.f; float uc = OFFSET - RADIUS * std::cos(theta_v_min); float vc = OFFSET + RADIUS * std::sin(theta_v_min) + 1.f; vc = std::max(vc, OFFSET + RADIUS * std::sin(-theta_v_min) + 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = M_PI_2 * RADIUS; + float theoreticalValue = pi_2 * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -485,13 +487,13 @@ int main() // (1) => uc + r cos(theta_u_top_min) >= umin <=> uc >= umin - r cos(theta_u_top_min) // (2) => uc + r cos(theta_u_top_max) >= umin <=> uc >= umin - r cos(theta_u_top_max) - float theta_u_top_min = -1.1f * M_PI_2; + float theta_u_top_min = -1.1f * pi_2; float uc = OFFSET - RADIUS * std::cos(theta_u_top_min) + 1.f; - uc = std::max(uc, OFFSET - RADIUS * std::cos((float)M_PI - theta_u_top_min) + 1.f); + uc = std::max(uc, OFFSET - RADIUS * std::cos(pi - theta_u_top_min) + 1.f); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 0.2f * M_PI_2 * RADIUS; + float theoreticalValue = 0.2f * pi_2 * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -521,10 +523,10 @@ int main() // (3) & (4) =>{ uc = umin - r cos(theta_v_min) & { uc = umin - r cos(- theta_v_min) // (3) & (4) { vc >= vmin - r sin(theta_v_min) & { vc >= vmin - r cos(- theta_v_min) - float theta_u_top_min = 5.f * M_PI / 8.f; - float theta_u_top_max = M_PI - theta_u_top_min; + float theta_u_top_min = 5.f * pi / 8.f; + float theta_u_top_max = pi - theta_u_top_min; float uc = OFFSET - RADIUS * std::cos(theta_u_top_min) + 1.f; - uc = std::max(uc, OFFSET - RADIUS * std::cos((float)M_PI - theta_u_top_min) + 1.f); + uc = std::max(uc, OFFSET - RADIUS * std::cos(pi - theta_u_top_min) + 1.f); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); float theta_v_min = std::acos((OFFSET - uc)/RADIUS); theta_v_min = ensureIsBetweenMinPiAndPi(theta_v_min); @@ -564,13 +566,13 @@ int main() // (1) => vc = vmin + r sin(theta_u_top_min) // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_top_min = 2.f * M_PI / 3.f; - float theta_v_max = -M_PI_2; + float theta_u_top_min = 2.f * pi / 3.f; + float theta_v_max = -pi_2; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_max); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min);; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (M_PI_2 + M_PI / 3.f) * RADIUS; + float theoreticalValue = (pi_2 + pi / 3.f) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -599,13 +601,13 @@ int main() // (1) <=> asin((vc - vmin)/r) >= acos[(umin + width - uc)/r] <=> vc >= r sin(acos[(umin + width - uc)/r]) + vmin // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_max = -7.f * M_PI / 8.f; + float theta_v_max = -7.f * pi / 8.f; float theta_v_min = -theta_v_max; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_max); float vc = RADIUS * std::sin(std::acos((OFFSET + WIDTH - uc)/RADIUS)) + OFFSET + 1.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - (theta_v_min - theta_v_max)) * RADIUS; + float theoreticalValue = (2.f * pi - (theta_v_min - theta_v_max)) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -634,8 +636,8 @@ int main() // Choice: theta_u_top_min = -0.9 * PI / 2 // (1) => vc = vmin + r sin(theta_u_top_min) // (2) vc - r sin(theta_v_min) <= vmin => asin((vc - vmin)/r) <= theta_v_min - float theta_u_top_min = -0.9f * M_PI_2; - float theta_u_top_max = M_PI - theta_u_top_min; + float theta_u_top_min = -0.9f * pi_2; + float theta_u_top_max = pi - theta_u_top_min; theta_u_top_max = ensureIsBetweenMinPiAndPi(theta_u_top_max); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); float theta_v_min = std::asin((vc - OFFSET)/RADIUS) + 1.f; @@ -672,10 +674,10 @@ int main() // (2) & (4) =>{ uc = umin - r cos(theta_v_min) & { uc = umin - r cos(- theta_v_min) // (2) & (4) { vc >= vmin - r sin(theta_v_min) & { vc >= vmin - r cos(- theta_v_min) - float theta_u_top_min = 5.f * M_PI / 8.f; - float theta_u_top_max = M_PI - theta_u_top_min; + float theta_u_top_min = 5.f * pi / 8.f; + float theta_u_top_max = pi - theta_u_top_min; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_u_top_min) - 1.f; - uc = std::min(uc, OFFSET + WIDTH - RADIUS * std::cos((float)M_PI - theta_u_top_min) - 1.f); + uc = std::min(uc, OFFSET + WIDTH - RADIUS * std::cos(pi - theta_u_top_min) - 1.f); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); float theta_v_min = std::acos((OFFSET + WIDTH - uc)/RADIUS); theta_v_min = ensureIsBetweenMinPiAndPi(theta_v_min); @@ -687,7 +689,7 @@ int main() } vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max))) * RADIUS; + float theoreticalValue = (2.f * pi - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max))) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -715,13 +717,13 @@ int main() // (3) => vc = vmin + height + r sin(theta_u_bot_max) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_min = M_PI_2; - float theta_u_bot_max = -M_PI / 3.f; + float theta_v_min = pi_2; + float theta_u_bot_max = -pi / 3.f; float uc = OFFSET - RADIUS * std::cos(theta_v_min); float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_max);; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (M_PI_2 + M_PI / 3.f) * RADIUS; + float theoreticalValue = (pi_2 + pi / 3.f) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -751,7 +753,7 @@ int main() // (4) => vc <= vmin + height + r sin(theta_v_max) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_min = M_PI_4 / 2.f; + float theta_v_min = pi_4 / 2.f; float theta_v_max = -theta_v_min; float uc = OFFSET - RADIUS * std::cos(theta_v_min); float vc = std::min(OFFSET + HEIGHT + RADIUS * std::sin(theta_v_min) - 1.f, OFFSET + HEIGHT + RADIUS * std::sin(theta_v_max) - 1.f); @@ -787,8 +789,8 @@ int main() // (1) => uc >= umin - r cos(theta_u_bot_max) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = 5.f * M_PI_4 / 2.f; - float theta_u_bot_max = M_PI - theta_u_bot_min; + float theta_u_bot_min = 5.f * pi_4 / 2.f; + float theta_u_bot_max = pi - theta_u_bot_min; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); float uc = std::max(OFFSET - RADIUS * std::cos(theta_u_bot_min) + 1.f, OFFSET - RADIUS * std::cos(theta_u_bot_max) + 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); @@ -823,10 +825,10 @@ int main() // (2) & (4) => vc < vmin + height + r sin(theta_v_min) & vc < vmin + height + r sin(-theta_v_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = -5.f * M_PI / 8.f; - float theta_u_bot_max = M_PI - theta_u_bot_min; + float theta_u_bot_min = -5.f * pi / 8.f; + float theta_u_bot_max = pi - theta_u_bot_min; theta_u_bot_max = ensureIsBetweenMinPiAndPi(theta_u_bot_max); - float theta_v_min = 7.f * M_PI / 8.f; + float theta_v_min = 7.f * pi / 8.f; float theta_v_max = -theta_v_min; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); float uc = OFFSET - RADIUS * std::cos(theta_v_min); @@ -860,13 +862,13 @@ int main() // (1) => vc = vmin + height + r sin(theta_u_bot_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = -2.f * M_PI / 3.f; - float theta_v_min = M_PI_2; + float theta_u_bot_min = -2.f * pi / 3.f; + float theta_v_min = pi_2; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_min); float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min);; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (M_PI_2 + M_PI / 3.f) * RADIUS; + float theoreticalValue = (pi_2 + pi / 3.f) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -895,12 +897,12 @@ int main() // (2) & (4) => vc <= vmin + height + r sin(theta_v_min) & vc <= vmin + height + r sin(-theta_v_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_min = 5.f * M_PI / 6.f; + float theta_v_min = 5.f * pi / 6.f; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_min); float vc = std::min(OFFSET + HEIGHT + RADIUS * std::sin(theta_v_min) - 1.f, OFFSET + HEIGHT + RADIUS * std::sin(-theta_v_min) - 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (M_PI / 3.f) * RADIUS; // <=> 2.f * M_PI / 6.f + float theoreticalValue = (pi / 3.f) * RADIUS; // <=> 2.f * M_PI / 6.f bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -929,12 +931,12 @@ int main() // (1) & (3) => uc < umin + width - r cos(theta_u_bot_min) & uc <= umin + width - r cos(PI - theta_u_bot_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = 4.f * M_PI / 6.f; + float theta_u_bot_min = 4.f * pi / 6.f; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); - float uc = std::min(OFFSET + WIDTH - RADIUS * std::cos(theta_u_bot_min) - 1.f, OFFSET + WIDTH - RADIUS * std::cos((float)M_PI -theta_u_bot_min) - 1.f); + float uc = std::min(OFFSET + WIDTH - RADIUS * std::cos(theta_u_bot_min) - 1.f, OFFSET + WIDTH - RADIUS * std::cos((float)pi -theta_u_bot_min) - 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (M_PI / 3.f) * RADIUS; // <=> 2.f * M_PI / 6.f + float theoreticalValue = (pi / 3.f) * RADIUS; // <=> 2.f * M_PI / 6.f bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -964,16 +966,16 @@ int main() // (2) & (4) => vc < vmin + height + r sin(theta_v_min) & vc < vmin + height + r sin(-theta_v_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = -7.f * M_PI / 8.f; - float theta_u_bot_max = M_PI - theta_u_bot_min; + float theta_u_bot_min = -7.f * pi / 8.f; + float theta_u_bot_max = pi - theta_u_bot_min; theta_u_bot_max = ensureIsBetweenMinPiAndPi(theta_u_bot_max); - float theta_v_max = -3.f * M_PI / 8.f; + float theta_v_max = -3.f * pi / 8.f; float theta_v_min = -theta_v_max; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); float uc = OFFSET - RADIUS * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_v_min - theta_v_max) + (theta_u_bot_max - theta_u_bot_min))) * RADIUS; + float theoreticalValue = (2.f * pi - ((theta_v_min - theta_v_max) + (theta_u_bot_max - theta_u_bot_min))) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1002,12 +1004,12 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 5.f * M_PI / 8.f; - float theta_u_top_max = 3.f * M_PI / 8.f; - float theta_v_min = 7.f * M_PI / 8.f; + float theta_u_top_min = 5.f * pi / 8.f; + float theta_u_top_max = 3.f * pi / 8.f; + float theta_v_min = 7.f * pi / 8.f; float theta_v_max = -theta_v_min; - float theta_u_bottom_min = -5.f * M_PI / 8.f; - float theta_u_bottom_max = -3.f * M_PI / 8.f; + float theta_u_bottom_min = -5.f * pi / 8.f; + float theta_u_bottom_max = -3.f * pi / 8.f; float vc = OFFSET + HEIGHT / 2.f; float radius = -(OFFSET - vc)/ std::sin(theta_u_top_min); float uc = OFFSET - radius * std::cos(theta_v_min); @@ -1042,9 +1044,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_max = M_PI / 6.f; - float theta_u_top_min = M_PI - theta_u_top_max; - float theta_v_min = M_PI / 3.f; + float theta_u_top_max = pi / 6.f; + float theta_u_top_min = pi - theta_u_top_max; + float theta_v_min = pi / 3.f; float theta_u_bottom_max = -theta_u_top_max; float radius = HEIGHT; float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1080,9 +1082,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 4.f * M_PI / 6.f; - float theta_u_top_max = M_PI - theta_u_top_min; - float theta_v_min = M_PI; + float theta_u_top_min = 4.f * pi / 6.f; + float theta_u_top_max = pi - theta_u_top_min; + float theta_v_min = pi; float theta_u_bottom_min = -theta_u_top_min; float theta_u_bottom_max = -theta_u_top_max; float radius = HEIGHT / (2.f * std::sin(theta_u_top_min)); // vmin + h - vmin = (vc - r sin(-theta_u_top_min)) - (vc - r sin(theta_top_min)) @@ -1090,7 +1092,7 @@ int main() float uc = OFFSET - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + float theoreticalValue = (2.f * pi - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1119,8 +1121,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = M_PI_2; - float theta_v_min = M_PI_4; + float theta_u_top_min = pi_2; + float theta_v_min = pi_4; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1156,8 +1158,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = M_PI_2; - float theta_v_min = 3.f * M_PI_4; + float theta_u_top_min = pi_2; + float theta_v_min = 3.f * pi_4; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1194,8 +1196,8 @@ int main() // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max float theta_u_top_max = 0.f; - float theta_u_bot_max = -M_PI / 3.f; - float theta_v_max = -M_PI / 6.f; + float theta_u_bot_max = -pi / 3.f; + float theta_v_max = -pi / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_max) - std::sin(theta_u_bot_max)); float uc = OFFSET - radius * std::cos(theta_v_max); float vc = OFFSET + radius * std::sin(theta_u_top_max); @@ -1230,9 +1232,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_max = M_PI / 3.f; + float theta_u_top_max = pi / 3.f; float theta_u_bot_max = 0.f; - float theta_v_min = M_PI / 6.f; + float theta_v_min = pi / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_max) - std::sin(theta_u_bot_max)); float uc = OFFSET - radius * std::cos(theta_v_min); float vc = OFFSET + radius * std::sin(theta_u_top_max); @@ -1267,18 +1269,18 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 5.f * M_PI / 8.f; - float theta_u_top_max = 3.f * M_PI / 8.f; - float theta_v_min = 1.f * M_PI / 8.f; + float theta_u_top_min = 5.f * pi / 8.f; + float theta_u_top_max = 3.f * pi / 8.f; + float theta_v_min = 1.f * pi / 8.f; float theta_v_max = -theta_v_min; - float theta_u_bottom_min = -5.f * M_PI / 8.f; - float theta_u_bottom_max = -3.f * M_PI / 8.f; + float theta_u_bottom_min = -5.f * pi / 8.f; + float theta_u_bottom_max = -3.f * pi / 8.f; float vc = OFFSET + HEIGHT / 2.f; float radius = -(OFFSET - vc)/ std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + float theoreticalValue = (2.f * pi - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1307,15 +1309,15 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 5.f * M_PI / 6.f; - float theta_v_min = 2.f * M_PI / 3.f; + float theta_u_top_min = 5.f * pi / 6.f; + float theta_v_min = 2.f * pi / 3.f; float theta_u_bottom_min = -theta_u_top_min; float radius = HEIGHT; float vc = OFFSET + radius * std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - (theta_u_top_min - theta_u_bottom_min)) * radius; + float theoreticalValue = (2.f * pi - (theta_u_top_min - theta_u_bottom_min)) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1344,8 +1346,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 4.f * M_PI / 6.f; - float theta_u_top_max = M_PI - theta_u_top_min; + float theta_u_top_min = 4.f * pi / 6.f; + float theta_u_top_max = pi - theta_u_top_min; float theta_v_min = 0; float theta_u_bottom_min = -theta_u_top_min; float theta_u_bottom_max = -theta_u_top_max; @@ -1354,7 +1356,7 @@ int main() float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + float theoreticalValue = (2.f * pi - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1383,15 +1385,15 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = M_PI_2; - float theta_v_min = 3.f * M_PI_4; + float theta_u_top_min = pi_2; + float theta_v_min = 3.f * pi_4; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - (theta_v_min - theta_v_max)) * radius; + float theoreticalValue = (2.f * pi - (theta_v_min - theta_v_max)) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1420,15 +1422,15 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = M_PI_2; - float theta_v_min = M_PI_4; + float theta_u_top_min = pi_2; + float theta_v_min = pi_4; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - (theta_v_min - theta_v_max)) * radius; + float theoreticalValue = (2.f * pi - (theta_v_min - theta_v_max)) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1457,15 +1459,15 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = M_PI; - float theta_u_bot_min = -2.f * M_PI / 3.f; - float theta_v_max = -5.f * M_PI / 6.f; + float theta_u_top_min = pi; + float theta_u_bot_min = -2.f * pi / 3.f; + float theta_v_max = -5.f * pi / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_min) - std::sin(theta_u_bot_min)); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_max); float vc = OFFSET + radius * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - (theta_u_top_min - theta_v_max)) * radius; + float theoreticalValue = (2.f * pi - (theta_u_top_min - theta_v_max)) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1494,9 +1496,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 2.f * M_PI / 3.f; - float theta_u_bot_min = M_PI; - float theta_v_min = 5.f * M_PI / 6.f; + float theta_u_top_min = 2.f * pi / 3.f; + float theta_u_bot_min = pi; + float theta_v_min = 5.f * pi / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_min) - std::sin(theta_u_bot_min)); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1533,12 +1535,12 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_min = 7.f * M_PI / 8.f; + float theta_v_left_min = 7.f * pi / 8.f; float theta_v_left_max = -theta_v_left_min; - float theta_v_right_min = M_PI / 8.f; + float theta_v_right_min = pi / 8.f; float theta_v_right_max = -theta_v_right_min; - float theta_u_top_min = 5.f * M_PI / 8.f; - float theta_u_top_max = M_PI - theta_u_top_min; + float theta_u_top_min = 5.f * pi / 8.f; + float theta_u_top_max = pi - theta_u_top_min; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1574,12 +1576,12 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_top_min = -2.f * M_PI / 3.f; + float theta_u_top_min = -2.f * pi / 3.f; float uc = OFFSET + WIDTH_SWITCHED/2.f; float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); - float theoreticalValue = (M_PI/3.f) * RADIUS; + float theoreticalValue = (pi/3.f) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1610,9 +1612,9 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_max = -5.f * M_PI / 8.f; - float theta_v_right_max = -3.f *M_PI / 8.f; - float theta_u_top_min = -7.f * M_PI / 8.f; + float theta_v_left_max = -5.f * pi / 8.f; + float theta_v_right_max = -3.f *pi / 8.f; + float theta_u_top_min = -7.f * pi / 8.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_max) - std::cos(theta_v_left_max)); float uc = OFFSET - radius * std::cos(theta_v_left_max); float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1649,9 +1651,9 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_top_max = -M_PI / 3.f; + float theta_u_top_max = -pi / 3.f; float theta_v_right_max = 0.f; - float theta_v_left_max = -M_PI_2; + float theta_v_left_max = -pi_2; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_max) - std::cos(theta_v_left_max)); float uc = OFFSET; float vc = OFFSET + radius * std::sin(theta_u_top_max); @@ -1688,9 +1690,9 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_top_min = -2.f * M_PI / 3.f; - float theta_v_left_max = M_PI; - float theta_v_right_max = -M_PI_2; + float theta_u_top_min = -2.f * pi / 3.f; + float theta_v_left_max = pi; + float theta_v_right_max = -pi_2; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_max) - std::cos(theta_v_left_max)); float uc = OFFSET + WIDTH_SWITCHED; float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1727,12 +1729,12 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_min = 7.f * M_PI / 8.f; + float theta_v_left_min = 7.f * pi / 8.f; float theta_v_left_max = -theta_v_left_min; - float theta_v_right_min = M_PI / 8.f; + float theta_v_right_min = pi / 8.f; float theta_v_right_max = -theta_v_right_min; - float theta_u_bot_min = -5.f * M_PI / 8.f; - float theta_u_bot_max = -M_PI - theta_u_bot_min; + float theta_u_bot_min = -5.f * pi / 8.f; + float theta_u_bot_max = -pi - theta_u_bot_min; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); @@ -1768,10 +1770,10 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_min = 2.f * M_PI / 3.f; - float theta_u_bot_max = M_PI - theta_u_bot_min; - float theta_v_left_min = 5.f * M_PI / 6.f; - float theta_v_right_min = M_PI / 6.f; + float theta_u_bot_min = 2.f * pi / 3.f; + float theta_u_bot_max = pi - theta_u_bot_min; + float theta_v_left_min = 5.f * pi / 6.f; + float theta_v_right_min = pi / 6.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); @@ -1807,9 +1809,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_min = 7.f * M_PI / 8.f; - float theta_v_left_min = 5.f * M_PI / 8.f; - float theta_v_right_min = 3.f * M_PI / 8.f; + float theta_u_bot_min = 7.f * pi / 8.f; + float theta_v_left_min = 5.f * pi / 8.f; + float theta_v_right_min = 3.f * pi / 8.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); @@ -1845,8 +1847,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_max = M_PI / 3.f; - float theta_v_left_min = M_PI_2; + float theta_u_bot_max = pi / 3.f; + float theta_v_left_min = pi_2; float theta_v_right_min = 0.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET; @@ -1883,9 +1885,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_min = 2.f * M_PI / 3.f; - float theta_v_right_min = M_PI_2; - float theta_v_left_min = M_PI; + float theta_u_bot_min = 2.f * pi / 3.f; + float theta_v_right_min = pi_2; + float theta_v_left_min = pi; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED; float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); @@ -1917,17 +1919,17 @@ int main() // (6): u_cross_bot_max = uc + r cos(theta_u_bottom_max) <= umin_roi + width ; vmin_roi + height = vc - r sin(theta_u_bottom_max) // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 2.f * M_PI / 3.f; - float theta_u_top_max = M_PI / 3.f; - float theta_u_bottom_min = -2.f * M_PI / 3.f; - float theta_u_bottom_max = -M_PI / 3.f; + float theta_u_top_min = 2.f * pi / 3.f; + float theta_u_top_max = pi / 3.f; + float theta_u_bottom_min = -2.f * pi / 3.f; + float theta_u_bottom_max = -pi / 3.f; float uc = OFFSET + WIDTH / 2.f; float vc = OFFSET + HEIGHT / 2.f; float radius = -(OFFSET - vc)/ std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + float theoreticalValue = (2.f * pi - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1953,9 +1955,9 @@ int main() // (6): u_min + width = uc + r cos(theta_v_right_max); v_cross_right_max = vc - r sin(theta_v_right_max) // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_min = 5.f * M_PI / 6.f; + float theta_v_left_min = 5.f * pi / 6.f; float theta_v_left_max = -theta_v_left_min; - float theta_v_right_min = M_PI / 6.f; + float theta_v_right_min = pi / 6.f; float theta_v_right_max = -theta_v_right_min; float uc = OFFSET + HEIGHT / 2.f; float vc = OFFSET + WIDTH / 2.f; @@ -1985,14 +1987,14 @@ int main() // Choosing theta_v_left_min = 7 PI / 8 and circle at the center of the RoI // umin = uc + r cos(theta_v_left_min) => r = (umin - uc) / cos(theta_v_left_min) vpRect squareRoI(OFFSET, OFFSET, HEIGHT, HEIGHT); - float theta_v_left_min = 7.f * M_PI / 8.f; + float theta_v_left_min = 7.f * pi / 8.f; float uc = OFFSET + HEIGHT / 2.f; float vc = OFFSET + HEIGHT / 2.f; float radius = (OFFSET - uc) / std::cos(theta_v_left_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(squareRoI); - float theoreticalValue = M_PI * radius; + float theoreticalValue = pi * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { From 44ec8e987c6b38af54de0fa8f461cf2d25133ffd Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Tue, 24 Oct 2023 18:47:29 +0200 Subject: [PATCH 02/14] Improve code quality by fixing all the warnings when -Wsuggest-destructor-override is used - Changes done to add override keyword when necessary, removed useless destructors - Doc improvement in the same commit --- 3rdparty/pthreads4w/ChangeLog | 210 ++-- .../tutorial-tracking-mb-generic-json.dox | 2 +- .../visual-servo/tutorial-pixhawk-vs.dox | 2 +- example/math/BSpline.cpp | 2 +- .../servoAfma6FourPoints2DArtVelocity.cpp | 4 +- ...ervoAfma6FourPoints2DCamVelocityLs_cur.cpp | 2 +- ...ervoAfma6FourPoints2DCamVelocityLs_des.cpp | 2 +- ...oViper850FourPoints2DArtVelocityLs_cur.cpp | 2 +- ...oViper850FourPoints2DArtVelocityLs_des.cpp | 4 +- ...oViper850FourPoints2DCamVelocityLs_cur.cpp | 2 +- .../servoViper850FourPointsKinect.cpp | 2 +- modules/ar/CMakeLists.txt | 2 +- .../include/visp3/core/vpCameraParameters.h | 130 +-- modules/core/include/visp3/core/vpCircle.h | 93 +- modules/core/include/visp3/core/vpClient.h | 276 +++-- modules/core/include/visp3/core/vpColVector.h | 4 - modules/core/include/visp3/core/vpCylinder.h | 185 ++-- modules/core/include/visp3/core/vpDisplay.h | 940 +++++++++--------- modules/core/include/visp3/core/vpException.h | 12 +- .../include/visp3/core/vpForceTwistMatrix.h | 7 +- .../include/visp3/core/vpForwardProjection.h | 223 ++--- .../core/include/visp3/core/vpFrameGrabber.h | 112 +-- .../include/visp3/core/vpHistogramValey.h | 9 +- .../include/visp3/core/vpHomogeneousMatrix.h | 4 - modules/core/include/visp3/core/vpLine.h | 197 ++-- .../core/vpLinearKalmanFilterInstantiation.h | 15 +- modules/core/include/visp3/core/vpMath.h | 92 +- modules/core/include/visp3/core/vpMatrix.h | 3 - .../include/visp3/core/vpMatrixException.h | 2 +- .../visp3/core/vpMeterPixelConversion.h | 36 +- .../core/include/visp3/core/vpMomentAlpha.h | 327 +++--- .../core/include/visp3/core/vpMomentArea.h | 30 +- .../visp3/core/vpMomentAreaNormalized.h | 218 ++-- .../core/include/visp3/core/vpMomentBasic.h | 1 - .../include/visp3/core/vpMomentCInvariant.h | 21 +- .../include/visp3/core/vpMomentCentered.h | 3 +- .../core/include/visp3/core/vpMomentCommon.h | 22 +- .../include/visp3/core/vpMomentDatabase.h | 23 +- .../visp3/core/vpMomentGravityCenter.h | 11 +- .../core/vpMomentGravityCenterNormalized.h | 1 - .../core/include/visp3/core/vpMomentObject.h | 2 +- .../visp3/core/vpPixelMeterConversion.h | 42 +- modules/core/include/visp3/core/vpPoint.h | 25 +- .../core/include/visp3/core/vpPoseVector.h | 4 - .../include/visp3/core/vpQuaternionVector.h | 27 +- modules/core/include/visp3/core/vpRequest.h | 2 +- .../include/visp3/core/vpRotationMatrix.h | 5 - .../include/visp3/core/vpRotationVector.h | 11 +- modules/core/include/visp3/core/vpRowVector.h | 19 +- .../core/include/visp3/core/vpRxyzVector.h | 3 - .../core/include/visp3/core/vpRzyxVector.h | 4 - .../core/include/visp3/core/vpRzyzVector.h | 3 - modules/core/include/visp3/core/vpServer.h | 10 +- modules/core/include/visp3/core/vpSphere.h | 85 +- .../core/include/visp3/core/vpSubColVector.h | 41 +- modules/core/include/visp3/core/vpSubMatrix.h | 4 +- .../core/include/visp3/core/vpSubRowVector.h | 42 +- .../core/include/visp3/core/vpThetaUVector.h | 2 - modules/core/include/visp3/core/vpTracker.h | 45 +- .../include/visp3/core/vpTranslationVector.h | 4 +- modules/core/include/visp3/core/vpUDPClient.h | 220 ++-- .../visp3/core/vpVelocityTwistMatrix.h | 9 +- .../core/src/camera/vpCameraParameters.cpp | 673 ++++++------- .../src/camera/vpMeterPixelConversion.cpp | 10 +- .../src/camera/vpPixelMeterConversion.cpp | 16 +- modules/core/src/image/vpRGBa.cpp | 2 +- modules/core/src/image/vpRGBf.cpp | 2 +- .../vpLinearKalmanFilterInstantiation.cpp | 12 +- modules/core/src/math/matrix/vpColVector.cpp | 2 +- .../core/src/math/matrix/vpSubColVector.cpp | 108 +- .../core/src/math/matrix/vpSubRowVector.cpp | 100 +- modules/core/src/tools/network/vpClient.cpp | 39 +- .../forward-projection/vpCylinder.cpp | 5 - .../tracking/forward-projection/vpSphere.cpp | 11 +- .../src/tracking/moments/vpMomentCommon.cpp | 118 +-- modules/core/test/tools/xml/testXmlParser.cpp | 139 ++- .../visp3/detection/vpDetectorAprilTag.h | 352 +++---- .../include/visp3/detection/vpDetectorBase.h | 57 +- .../visp3/detection/vpDetectorDNNOpenCV.h | 48 +- .../detection/vpDetectorDataMatrixCode.h | 118 ++- .../include/visp3/detection/vpDetectorFace.h | 80 +- .../visp3/detection/vpDetectorQRCode.h | 123 ++- modules/gui/include/visp3/gui/vpD3DRenderer.h | 27 +- modules/gui/include/visp3/gui/vpDisplayD3D.h | 122 ++- modules/gui/include/visp3/gui/vpDisplayGDI.h | 158 ++- modules/gui/include/visp3/gui/vpDisplayGTK.h | 81 +- .../gui/include/visp3/gui/vpDisplayOpenCV.h | 246 +++-- .../gui/include/visp3/gui/vpDisplayWin32.h | 111 +-- modules/gui/include/visp3/gui/vpDisplayX.h | 80 +- modules/gui/include/visp3/gui/vpGDIRenderer.h | 2 +- modules/gui/include/visp3/gui/vpPlot.h | 141 ++- modules/gui/src/display/vpDisplayGTK.cpp | 105 +- .../gui/src/display/windows/vpDisplayD3D.cpp | 9 +- .../gui/src/display/windows/vpDisplayGDI.cpp | 9 +- .../imgproc/src/vpCircleHoughTransform.cpp | 2 +- modules/io/include/visp3/io/vpDiskGrabber.h | 126 +-- modules/io/include/visp3/io/vpVideoReader.h | 343 +++---- modules/io/src/video/vpDiskGrabber.cpp | 2 - modules/io/src/video/vpVideoReader.cpp | 126 ++- modules/io/src/video/vpVideoWriter.cpp | 2 +- modules/robot/include/visp3/robot/vpAfma4.h | 4 +- modules/robot/include/visp3/robot/vpAfma6.h | 4 +- modules/robot/include/visp3/robot/vpPioneer.h | 150 ++- .../robot/include/visp3/robot/vpPioneerPan.h | 187 ++-- .../robot/include/visp3/robot/vpQbSoftHand.h | 93 +- .../robot/include/visp3/robot/vpRobotAfma4.h | 10 +- .../robot/include/visp3/robot/vpRobotAfma6.h | 14 +- .../include/visp3/robot/vpRobotBiclops.h | 16 +- .../robot/include/visp3/robot/vpRobotCamera.h | 15 +- .../include/visp3/robot/vpRobotFlirPtu.h | 12 +- .../robot/include/visp3/robot/vpRobotFranka.h | 347 ++++--- .../robot/include/visp3/robot/vpRobotKinova.h | 14 +- .../include/visp3/robot/vpRobotPioneer.h | 94 +- .../robot/include/visp3/robot/vpRobotPtu46.h | 10 +- .../include/visp3/robot/vpRobotSimulator.h | 54 +- .../include/visp3/robot/vpRobotTemplate.h | 46 +- .../visp3/robot/vpRobotUniversalRobots.h | 12 +- .../include/visp3/robot/vpRobotViper650.h | 15 +- .../include/visp3/robot/vpRobotViper850.h | 17 +- .../visp3/robot/vpRobotWireFrameSimulator.h | 193 ++-- .../include/visp3/robot/vpSimulatorAfma6.h | 286 +++--- .../include/visp3/robot/vpSimulatorCamera.h | 133 ++- .../include/visp3/robot/vpSimulatorPioneer.h | 122 ++- .../visp3/robot/vpSimulatorPioneerPan.h | 125 ++- .../include/visp3/robot/vpSimulatorViper850.h | 336 ++++--- .../robot/include/visp3/robot/vpUnicycle.h | 96 +- modules/robot/include/visp3/robot/vpViper.h | 4 +- .../robot/include/visp3/robot/vpViper650.h | 123 ++- .../robot/include/visp3/robot/vpViper850.h | 124 ++- .../haptic-device/qbdevice/vpQbSoftHand.cpp | 23 +- .../robot/src/real-robot/viper/vpViper.cpp | 2 +- .../robot/src/real-robot/viper/vpViper650.cpp | 2 +- .../robot/src/real-robot/viper/vpViper850.cpp | 2 +- .../src/robot-simulator/vpRobotCamera.cpp | 8 +- .../vpRobotWireFrameSimulator.cpp | 47 +- .../src/robot-simulator/vpSimulatorCamera.cpp | 6 - .../robot-simulator/vpSimulatorPioneer.cpp | 10 +- .../robot-simulator/vpSimulatorPioneerPan.cpp | 10 +- .../sensor/vpForceTorqueAtiNetFTSensor.h | 111 +-- .../visp3/sensor/vpForceTorqueAtiSensor.h | 89 +- .../sensor/include/visp3/sensor/vpSickLDMRS.h | 108 +- .../tracker/blob/include/visp3/blob/vpDot.h | 282 +++--- .../tracker/blob/include/visp3/blob/vpDot2.h | 392 ++++---- modules/tracker/blob/src/dots/vpDot2.cpp | 166 ++-- .../include/visp3/mbt/vpMbDepthDenseTracker.h | 46 +- .../visp3/mbt/vpMbDepthNormalTracker.h | 46 +- .../include/visp3/mbt/vpMbEdgeKltTracker.h | 67 +- .../mbt/include/visp3/mbt/vpMbEdgeTracker.h | 478 +++++---- .../include/visp3/mbt/vpMbGenericTracker.h | 155 ++- .../mbt/include/visp3/mbt/vpMbHiddenFaces.h | 46 +- .../mbt/include/visp3/mbt/vpMbKltTracker.h | 52 +- .../mbt/include/visp3/mbt/vpMbTracker.h | 43 +- .../mbt/include/visp3/mbt/vpMbtMeEllipse.h | 27 +- .../mbt/include/visp3/mbt/vpMbtMeLine.h | 56 +- .../mbt/include/visp3/mbt/vpMbtPolygon.h | 102 +- .../tracker/mbt/src/edge/vpMbtMeEllipse.cpp | 4 - .../tracker/mbt/src/vpMbGenericTracker.cpp | 2 - modules/tracker/mbt/src/vpMbTracker.cpp | 4 +- modules/tracker/mbt/src/vpMbtPolygon.cpp | 20 +- .../tracker/me/include/visp3/me/vpMeEllipse.h | 3 +- .../tracker/me/include/visp3/me/vpMeLine.h | 2 +- .../tracker/me/include/visp3/me/vpMeNurbs.h | 5 - .../tracker/me/include/visp3/me/vpMeTracker.h | 2 +- modules/tracker/me/include/visp3/me/vpNurbs.h | 4 - .../tracker/me/src/moving-edges/vpMeNurbs.cpp | 2 - .../tracker/me/src/moving-edges/vpNurbs.cpp | 2 - modules/tracker/me/test/testNurbs.cpp | 2 +- .../tt/vpTemplateTrackerWarpHomographySL3.h | 28 +- .../vpTemplateTrackerWarpHomographySL3.cpp | 7 +- .../include/visp3/tt_mi/vpTemplateTrackerMI.h | 37 +- .../include/visp3/vision/vpHomography.h | 2 - .../include/visp3/vision/vpPoseFeatures.h | 20 +- .../visp3/visual_features/vpBasicFeature.h | 19 +- .../visp3/visual_features/vpFeatureDepth.h | 239 +++-- .../visp3/visual_features/vpFeatureEllipse.h | 39 +- .../visp3/visual_features/vpFeatureLine.h | 328 +++--- .../visual_features/vpFeatureLuminance.h | 55 +- .../visp3/visual_features/vpFeatureMoment.h | 320 +++--- .../visual_features/vpFeatureMomentAlpha.h | 161 ++- .../visual_features/vpFeatureMomentArea.h | 71 +- .../vpFeatureMomentAreaNormalized.h | 279 +++--- .../visual_features/vpFeatureMomentBasic.h | 90 +- .../vpFeatureMomentCInvariant.h | 383 ++++--- .../visual_features/vpFeatureMomentCentered.h | 102 +- .../visual_features/vpFeatureMomentCommon.h | 371 ++++--- .../visual_features/vpFeatureMomentDatabase.h | 236 +++-- .../vpFeatureMomentGravityCenter.h | 365 ++++--- .../vpFeatureMomentGravityCenterNormalized.h | 402 ++++---- .../visp3/visual_features/vpFeaturePoint.h | 278 +++--- .../visp3/visual_features/vpFeaturePoint3D.h | 337 +++---- .../visual_features/vpFeaturePointPolar.h | 432 ++++---- .../visp3/visual_features/vpFeatureSegment.h | 241 +++-- .../visp3/visual_features/vpFeatureThetaU.h | 358 ++++--- .../visual_features/vpFeatureTranslation.h | 474 +++++---- .../visual_features/vpFeatureVanishingPoint.h | 71 +- .../visp3/visual_features/vpGenericFeature.h | 277 +++--- .../vpFeatureBuilderSegment.cpp | 88 +- .../src/visual-feature/vpFeatureMoment.cpp | 147 ++- .../visual-feature/vpFeatureMomentAlpha.cpp | 49 +- .../vpFeatureMomentAreaNormalized.cpp | 57 +- .../visual-feature/vpFeatureMomentBasic.cpp | 64 +- .../vpFeatureMomentCInvariant.cpp | 672 +++++++------ .../vpFeatureMomentCentered.cpp | 111 +-- .../visual-feature/vpFeatureMomentCommon.cpp | 39 +- .../vpFeatureMomentDatabase.cpp | 58 +- .../vpFeatureMomentGravityCenter.cpp | 56 +- ...vpFeatureMomentGravityCenterNormalized.cpp | 65 +- .../src/visual-feature/vpFeatureSegment.cpp | 588 ++++++----- .../src/visual-feature/vpGenericFeature.cpp | 50 +- 209 files changed, 10114 insertions(+), 10585 deletions(-) diff --git a/3rdparty/pthreads4w/ChangeLog b/3rdparty/pthreads4w/ChangeLog index 6f0913ef4d..d36a4b66a2 100644 --- a/3rdparty/pthreads4w/ChangeLog +++ b/3rdparty/pthreads4w/ChangeLog @@ -30,7 +30,7 @@ * implement.h (NEED_FTIME): remove conditionals. * pthread.h: Remove Borland compiler time types no longer needed. * configure.ac (NEED_FTIME): Removed check. - + 2018-08-07 Ross Johnson * GNUmakefile.in (DLL_VER): rename as PTW32_VER. @@ -63,7 +63,7 @@ all-tests-cflags. * Makefile (all-tests-cflags): retain; require all-tests-md and all-tests-mt. - + 2016-12-25 Ross Johnson * Change all license notices to the Apache License 2.0 @@ -110,11 +110,11 @@ * _ptw32.h: MINGW(all) include stdint.h to define all specific size integers (int64_t etc). - + 2016-12-17 Kyle Schwarz * _ptw32.h: MINGW6464 define pid_t as __int64. - + 2016-04-01 Ross Johnson * _ptw32.h: Move more header stuff into here. @@ -154,7 +154,7 @@ * pthread_mutex_init.c: Memory allocation of robust mutex element was not being checked. - + 2016-02-29 Ross Johnson * GNUmakefile (MINGW_HAVE_SECURE_API): Moved to config.h. Undefined @@ -223,7 +223,7 @@ * create.c: Don't apply cpu affinity from thread attributes for WINCE; bug fix. - + 2013-07-23 Ross Johnson * config.h (HAVE_CPU_AFFINITY): Defined. @@ -304,7 +304,7 @@ 2012-10-24 Stephane Clairet * pthread_key_delete.c: Bug fix - move keylock release to after the - while loop. (This bug first was introduced at release 2.9.1) + while loop. (This bug first was introduced at release 2.9.1) 2012-10-16 Ross Johnson @@ -327,7 +327,7 @@ * sched.h (cpu_set_t): Redefined. * implement.h (_sched_cpu_set_vector_): Created as the private equivalent of cpu_set. - (pthread_thread_t_.cpuset): Type change. + (pthread_thread_t_.cpuset): Type change. * sched_setaffinity.c: Reflect changes to cpu_set_t and _sched_cpu_set_vector_. * pthread_setaffinity.c: Likewise. * create.c: Likewise. @@ -461,14 +461,14 @@ * sched.h (DWORD_PTR): As above; other changes. * sem_post.c: Fix errno handling and restructure. * sem_getvalue.c: Fix return value and restructure. - + 2012-09-18 Ross Johnson * sched_setaffinity.c: New API to set process CPU affinity in POSIX context; compatibility with Linux. * pthread_setaffinity.c: Likewise. * implement.h (pthread_t_): Added cpuset element. - * sched.h: Added new prototypes. + * sched.h: Added new prototypes. * sched.h (cpu_set_t): Support for new process and thread affinity API. * pthread.h: Added new prototypes. @@ -580,7 +580,7 @@ * implement.h (__ptw32_spinlock_check_need_init): added missing forward declaration. - + 2012-07-19 Daniel Richard. G * common.mk: New; macros common to all build environment makefiles. @@ -714,7 +714,7 @@ mandatory for implementations that don't support PROCESS_SHARED mutexes, nevertheless it was considered useful both functionally and for source-level compatibility. - + 2011-03-26 Ross Johnson * pthread_getunique_np.c: New non-POSIX interface for compatibility @@ -878,7 +878,7 @@ observed when there are greater than barrier-count threads attempting to cross. * pthread_barrier_destroy: Added an MCS guard lock. - + 2009-03-03 Stephan O'Farrill * pthread_attr_getschedpolicy.c: Add "const" to function parameter @@ -923,7 +923,7 @@ * ptw32_semwait.c: Add check for invalid sem_t after acquiring the sem_t state guard mutex and before affecting changes to sema state. - + 2007-01-06 Marcel Ruff * error.c: Fix reference to pthread handle exitStatus member for @@ -1144,7 +1144,7 @@ 2005-04-25 Ross Johnson - * ptw32_relmillisecs.c: New module; converts future abstime to + * ptw32_relmillisecs.c: New module; converts future abstime to milliseconds relative to 'now'. * pthread_mutex_timedlock.c: Use new __ptw32_relmillisecs routine in place of internal code; remove the NEED_SEM code - this routine is now @@ -1246,7 +1246,7 @@ in speed. So, in the final design with cancelability, an uncontested once_control operation involves a minimum of five interlocked operations (including the LeaveCS operation). - + ALTERNATIVES: An alternative design from Alexander Terekhov proposed using a named mutex, as sketched below:- @@ -1259,13 +1259,13 @@ } once_control = true; } - + A more detailed description of this can be found here:- http://groups.yahoo.com/group/boost/message/15442 [Although the definition of a suitable PTHREAD_ONCE_INIT precludes use of the TLS located flag, this is not critical.] - + There are three primary concerns though:- 1) The [named] mutex is 'created' even in the uncontended case. 2) A system wide unique name must be generated. @@ -1411,7 +1411,7 @@ 2004-10-19 Ross Johnson * sem_init.c (sem_init): New semaphore model based on the same idea - as mutexes, i.e. user space interlocked check to avoid + as mutexes, i.e. user space interlocked check to avoid unnecessarily entering kernel space. Wraps the Win32 semaphore and keeps it's own counter. Although the motivation to do this has existed for a long time, credit goes to Alexander Terekhov for providing @@ -1443,7 +1443,7 @@ * pthread_mutex_trylock.c: Likewise. * pthread_mutex_timedlock.c: Likewise. * pthread_mutex_unlock.c: Set the event. - + 2004-10-14 Ross Johnson * pthread_mutex_lock.c (pthread_mutex_lock): New algorithm using @@ -1600,7 +1600,7 @@ * pthread.h (PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP): Likewise. * pthread.h (PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP): Likewise. - * ptw32_mutex_check_need_init.c (__ptw32_mutex_check_need_init): + * ptw32_mutex_check_need_init.c (__ptw32_mutex_check_need_init): Add new initialisers. * pthread_mutex_lock.c (pthread_mutex_lock): Check for new @@ -1682,7 +1682,7 @@ * ptw32_getprocessors.c: Some Win32 environments don't have GetProcessAffinityMask(), so always return CPU count = 1 for them. * config.h (NEED_PROCESSOR_AFFINITY_MASK): Define for WinCE. - + 2003-10-15 Ross Johnson * Re-indented all .c files using default GNU style to remove assorted @@ -1882,7 +1882,7 @@ 2002-09-20 Michael Johnson - * pthread_cond_destroy.c (pthread_cond_destroy): + * pthread_cond_destroy.c (pthread_cond_destroy): When two different threads exist, and one is attempting to destroy a condition variable while the other is attempting to initialize a condition variable that was created with @@ -1900,7 +1900,7 @@ 2002-07-30 Alexander Terekhov - * pthread_cond_wait.c (__ptw32_cond_wait_cleanup): + * pthread_cond_wait.c (__ptw32_cond_wait_cleanup): Remove code designed to avoid/prevent spurious wakeup problems. It is believed that the sem_timedwait() call is consuming a CV signal that it shouldn't and this is @@ -1967,7 +1967,7 @@ * pthread_rwlock_timedrdlock.c: New - untested. * pthread_rwlock_timedwrlock.c: New - untested. - + * Testsuite passed (except known MSVC++ problems) * pthread_cond_destroy.c: Expand the time change @@ -2081,7 +2081,7 @@ * pthread_mutex_timedlock.c: Likewise. * pthread_mutex_trylock.c: Likewise. * pthread_mutex_unlock.c: Likewise. - + * private.c: Split file into function segments. * ptw32_InterlockedCompareExchange.c: Separated routine from private.c * ptw32_callUserDestroyRoutines.c: Likewise. @@ -2138,7 +2138,7 @@ * ptw32_calloc.c: Likewise. * ptw32_new.c: Likewise. * w32_CancelableWait.c: Likewise. - + 2002-02-09 Ross Johnson * nonportable.c (pthread_delay_np): Make a true @@ -2212,7 +2212,7 @@ it must be possible to NOT include the header and related definitions with some combination of symbol definitions. Secondly, it should be possible - that additional definitions should be limited to POSIX + that additional definitions should be limited to POSIX compliant symbols by the definition of appropriate symbols. * pthread.h: POSIX conditionals. @@ -2253,7 +2253,7 @@ 2002-02-04 Ross Johnson The following extends the idea above to the rest of pthreads-win32 - rpj - + * attr.c: All routines are now in separate compilation units; This file is used to congregate the separate modules for potential inline optimisation and backward build compatibility. @@ -2394,7 +2394,7 @@ Solaris pthreads implementation. * implement.h (pthread_mutex_t_): Remove critical section element - no longer needed. - + 2002-01-04 Ross Johnson @@ -2435,7 +2435,7 @@ implementation of InterlockedCompareExchange. * Makefile (TEST_ICE): Likewise. * private.c (TEST_ICE): Likewise. - + 2001-10-24 Ross Johnson * attr.c (pthread_attr_setstacksize): Quell warning @@ -2626,7 +2626,7 @@ * semaphore.c: Added sem_post_multiple; this is a useful routine, but it doesn't appear to be standard. For now it's not an exported function. - + 2001-06-25 Ross Johnson * create.c (pthread_create): Add priority inheritance @@ -2742,9 +2742,9 @@ 2001-06-06 Ross Johnson - * mutex.c (pthread_mutexattr_init): Remove + * mutex.c (pthread_mutexattr_init): Remove __ptw32_mutex_default_kind. - + 2001-06-05 Ross Johnson * nonportable.c (pthread_mutex_setdefaultkind_np): @@ -2794,14 +2794,14 @@ * GNUmakefile (OPT): Leave symbolic information out of the library and increase optimisation level - for smaller faster prebuilt dlls. - + 2001-05-29 Milan Gardian * Makefile: fix typo. * pthreads.h: Fix problems with stdcall/cdecl conventions, in particular remove the need for PT_STDCALL everywhere; remove warning supression. * (errno): Fix the longstanding "inconsistent dll linkage" problem - with errno; now also works with /MD debugging libs - + with errno; now also works with /MD debugging libs - warnings emerged when compiling pthreads library with /MD (or /MDd) compiler switch, instead of /MT (or /MTd) (i.e. when compiling pthreads using Multithreaded DLL CRT instead of Multithreaded statically linked @@ -2945,13 +2945,13 @@ Mingw32 built library. 2000-10-10 Steven Reddie - + * misc.c (pthread_self): Restore Win32 "last error" cleared by TlsGetValue() call in pthread_getspecific() - + 2000-09-20 Arthur Kantor - + * mutex.c (pthread_mutex_lock): Record the owner of the mutex. This requires also keeping count of recursive locks ourselves rather than leaving it @@ -3075,7 +3075,7 @@ * pthread.h (__PTW32_BUILD): Only redefine __except and catch compiler keywords if we aren't building - the library (ie. __PTW32_BUILD is not defined) - + the library (ie. __PTW32_BUILD is not defined) - this is safer than defining and then undefining if not building the library. * implement.h: Remove __except and catch undefines. @@ -3250,7 +3250,7 @@ * private.c (__ptw32_threadStart): Initialise ei[]. (__ptw32_threadStart): When beginthread is used to start the - thread, force waiting until the creator thread had the + thread, force waiting until the creator thread had the thread handle. * cancel.c (__ptw32_cancel_thread): Include context switch @@ -3292,7 +3292,7 @@ Making this mutex a static could reduce the number of mutexes used by an application since it is actually created only at first use and it's often destroyed soon after. - + 2000-07-22 Ross Johnson * FAQ: Added Q5 and Q6. @@ -3388,7 +3388,7 @@ 1999-11-21 Ross Johnson - * global.c (__ptw32_exception_services): Declare new variable. + * global.c (__ptw32_exception_services): Declare new variable. * private.c (__ptw32_threadStart): Destroy thread's cancelLock mutex; make 'catch' and '__except' usageimmune to @@ -3462,7 +3462,7 @@ * pthread.h (winsock.h): Include unconditionally. (ETIMEDOUT): Change fallback value to that defined by winsock.h. - + * general: Patched for portability to WinCE. The details are described in the file WinCE-PORT. Follow the instructions in README.WinCE to make the appropriate changes in config.h. @@ -3594,14 +3594,14 @@ Tue Aug 17 20:00:08 1999 Mumit Khan * exit.c (pthread_exit): Don't call pthread_self() but get thread handle directly from TSD for efficiency. - + 1999-08-12 Ross Johnson * private.c (__ptw32_threadStart): ei[] only declared if _MSC_VER. * exit.c (pthread_exit): Check for implicitly created threads to avoid raising an unhandled exception. - + 1999-07-12 Peter Slacik * condvar.c (pthread_cond_destroy): Add critical section. @@ -3640,7 +3640,7 @@ Sun May 30 00:25:02 1999 Ross Johnson Fri May 28 13:33:05 1999 Mark E. Armstrong * condvar.c (pthread_cond_broadcast): Fix possible memory fault - + Thu May 27 13:08:46 1999 Peter Slacik * condvar.c (pthread_cond_broadcast): Fix logic bug @@ -3670,7 +3670,7 @@ Thu Apr 8 01:16:23 1999 Ross Johnson (sem_getvalue): ditto. * semaphore.h (_POSIX_SEMAPHORES): define. - + Wed Apr 7 14:09:52 1999 Ross Johnson * errno.c (_REENTRANT || _MT): Invert condition. @@ -3715,7 +3715,7 @@ Fri Apr 2 11:08:50 1999 Ross Johnson * semaphore.c (__ptw32_sem_timedwait): Moved to private.c. - * pthread.h (__ptw32_sem_t): Change to sem_t. + * pthread.h (__ptw32_sem_t): Change to sem_t. * private.c (__ptw32_sem_timedwait): Moved from semaphore.c; set errno on error. @@ -3758,7 +3758,7 @@ Fri Mar 19 09:12:59 1999 Ross Johnson Tue Mar 16 1999 Ross Johnson * all: Add GNU LGPL and Copyright and Warranty. - + Mon Mar 15 00:20:13 1999 Ross Johnson * condvar.c (pthread_cond_init): fix possible uninitialised use @@ -3818,7 +3818,7 @@ Sun Mar 7 12:31:14 1999 Ross Johnson * implement.h (__ptw32_cond_test_init_lock): Add extern. - * global.c (__ptw32_cond_test_init_lock): Add declaration. + * global.c (__ptw32_cond_test_init_lock): Add declaration. * condvar.c (pthread_cond_destroy): check for valid initialised CV; flag destroyed CVs as invalid. @@ -3924,7 +3924,7 @@ Wed Feb 3 13:04:44 1999 Ross Johnson * cleanup.c: Rename __ptw32_*_cleanup() to pthread_*_cleanup(). * pthread.def: Ditto. - + * pthread.h: Ditto. * pthread.def (pthread_cleanup_push): Remove from export list; @@ -3998,7 +3998,7 @@ Fri Jan 29 11:56:28 1999 Ross Johnson Sun Jan 24 01:34:52 1999 Ross Johnson - * semaphore.c (sem_wait): Remove second arg to + * semaphore.c (sem_wait): Remove second arg to pthreadCancelableWait() call. Sat Jan 23 17:36:40 1999 Ross Johnson @@ -4072,7 +4072,7 @@ Tue Jan 19 18:27:42 1999 Ross Johnson * pthread.h (pthreadCancelableTimedWait): New prototype. (pthreadCancelableWait): Remove second argument. - * misc.c (CancelableWait): New static function is + * misc.c (CancelableWait): New static function is pthreadCancelableWait() renamed. (pthreadCancelableWait): Now just calls CancelableWait() with INFINITE timeout. @@ -4083,18 +4083,18 @@ Tue Jan 19 18:27:42 1999 Scott Lightner * private.c (__ptw32_sem_timedwait): 'abstime' arg really is absolute time. Calculate relative time to wait from current - time before passing timeout to new routine + time before passing timeout to new routine pthreadCancelableTimedWait(). Tue Jan 19 10:27:39 1999 Ross Johnson * pthread.h (pthread_mutexattr_setforcecs_np): New prototype. - + * mutex.c (pthread_mutexattr_init): Init 'pshared' and 'forcecs' attributes to 0. (pthread_mutexattr_setforcecs_np): New function (not portable). - * pthread.h (pthread_mutex_t): + * pthread.h (pthread_mutex_t): Add 'mutex' element. Set to NULL in PTHREAD_MUTEX_INITIALIZER. The pthread_mutex_*() routines will try to optimise performance by choosing either mutexes or critical sections as the basis @@ -4102,7 +4102,7 @@ Tue Jan 19 10:27:39 1999 Ross Johnson (pthread_mutexattr_t_): Add 'forcecs' element. Some applications may choose to force use of critical sections if they know that:- - the mutex is PROCESS_PRIVATE and, + the mutex is PROCESS_PRIVATE and, either the OS supports TryEnterCriticalSection() or pthread_mutex_trylock() will never be called on the mutex. This attribute will be setable via a non-portable routine. @@ -4131,11 +4131,11 @@ Sun Jan 17 12:01:26 1999 Ross Johnson * mutex.c (_mutex_check_need_init): New static function to test and init PTHREAD_MUTEX_INITIALIZER mutexes. Provides serialised - access to the internal state of the uninitialised static mutex. + access to the internal state of the uninitialised static mutex. Called from pthread_mutex_trylock() and pthread_mutex_lock() which do a quick unguarded test to check if _mutex_check_need_init() needs to be called. This is safe as the test is conservative - and is repeated inside the guarded section of + and is repeated inside the guarded section of _mutex_check_need_init(). Thus in all calls except the first calls to lock static mutexes, the additional overhead to lock any mutex is a single memory fetch and test for zero. @@ -4249,7 +4249,7 @@ Mon Jan 11 20:33:19 1999 Ross Johnson * pthread.h: Re-arrange conditional compile of pthread_cleanup-* macros. - * cleanup.c (__ptw32_push_cleanup): Provide conditional + * cleanup.c (__ptw32_push_cleanup): Provide conditional compile of cleanup->prev. 1999-01-11 Tor Lillqvist @@ -4291,13 +4291,13 @@ Tue Dec 29 13:11:16 1998 Ross Johnson pthread_mutexattr_t_, pthread_key_t_, pthread_cond_t_, pthread_condattr_t_, pthread_once_t_. - * pthread.h: Add "_" prefix to pthread_push_cleanup and + * pthread.h: Add "_" prefix to pthread_push_cleanup and pthread_pop_cleanup internal routines, and associated struct and typedefs. * buildlib.bat: Add compile command for semaphore.c - * pthread.def: Comment out pthread_atfork routine name. + * pthread.def: Comment out pthread_atfork routine name. Now unimplemented. * tsd.c (pthread_setspecific): Rename tkAssocCreate to @@ -4380,7 +4380,7 @@ Sun Dec 20 14:51:58 1998 Ross Johnson (pthread_condattr_getpshared): Replaced by John Bossom's version. (pthread_condattr_setpshared): Replaced by John Bossom's version. (pthread_cond_init): Replaced by John Bossom's version. - Fix comment (refered to mutex rather than condition variable). + Fix comment (referred to mutex rather than condition variable). (pthread_cond_destroy): Replaced by John Bossom's version. (pthread_cond_wait): Replaced by John Bossom's version. (pthread_cond_timedwait): Replaced by John Bossom's version. @@ -4400,7 +4400,7 @@ Mon Dec 7 09:44:40 1998 John Bossom (pthread_setcanceltype): Replaced. (pthread_testcancel): Replaced. (pthread_cancel): Replaced. - + * exit.c (pthread_exit): Replaced. * misc.c (pthread_self): Replaced. @@ -4648,9 +4648,9 @@ Mon Oct 5 14:25:08 1998 Ross Johnson macro. Passes. * tests/create1.c: New file; test pthread_create(). Passes. - + * tests/equal.c: Poor test; remove. - + * tests/equal1.c New file; test pthread_equal(). Passes. * tests/once1.c: New file; test for pthread_once(). Passes. @@ -4664,7 +4664,7 @@ Mon Oct 5 14:25:08 1998 Ross Johnson * tests/self3.c: New file. Test pthread_self() with a couple of threads to ensure their thread IDs differ. Passes. - + 1998-10-04 Ben Elliston * tests/mutex2.c: Test pthread_mutex_trylock(). Passes. @@ -4692,14 +4692,14 @@ Mon Oct 5 14:25:08 1998 Ross Johnson * config.h.in: Regenerate. * create.c (__ptw32_start_call): Add STDCALL prefix. - + * mutex.c (pthread_mutex_init): Correct function signature. * attr.c (pthread_attr_init): Only zero out the `sigmask' member if we have the sigset_t type. * pthread.h: No need to include . It doesn't even exist - on Win32! Again, an artifact of cross-compilation. + on Win32! Again, an artifact of cross-compilation. (pthread_sigmask): Only provide if we have the sigset_t type. * process.h: Remove. This was a stand-in before we started doing @@ -4717,7 +4717,7 @@ Mon Oct 5 14:25:08 1998 Ross Johnson * configure.in: Test for required system features. - * configure: Generate. + * configure: Generate. * acconfig.h: New file. @@ -4733,7 +4733,7 @@ Mon Oct 5 14:25:08 1998 Ross Johnson * install-sh: Likewise. - * config.h: Remove. + * config.h: Remove. * Makefile: Likewise. @@ -4746,7 +4746,7 @@ Mon Oct 5 14:25:08 1998 Ross Johnson Sat Sep 12 20:09:24 1998 Ross Johnson * windows.h: Remove error number definitions. These are in - + * tsd.c: Add comment explaining rationale for not building POSIX TSD on top of Win32 TLS. @@ -4756,10 +4756,10 @@ Sat Sep 12 20:09:24 1998 Ross Johnson * signal.c (pthread_sigmask): Only provide if HAVE_SIGSET_T is defined. - + * config.h: #undef features, don't #define them. This will be generated by autoconf very soon. - + 1998-08-11 Ben Elliston * Makefile (LIB): Define. @@ -4770,7 +4770,7 @@ Sat Sep 12 20:09:24 1998 Ross Johnson already have one. * windows.c (TlsGetValue): Bug fix. - + Thu Aug 6 15:19:22 1998 Ross Johnson * misc.c (pthread_once): Fix arg 1 of EnterCriticalSection() @@ -4779,7 +4779,7 @@ Thu Aug 6 15:19:22 1998 Ross Johnson * fork.c (pthread_atfork): Typecast (void (*)(void *)) funcptr in each __ptw32_handler_push() call. - * exit.c (__ptw32_exit): Fix attr arg in + * exit.c (__ptw32_exit): Fix attr arg in pthread_attr_getdetachstate() call. * private.c (__ptw32_new_thread): Typecast (HANDLE) NULL. @@ -4789,13 +4789,13 @@ Thu Aug 6 15:19:22 1998 Ross Johnson changing in an attempt to make thread administration data types opaque and cleanup DLL startup. - * dll.c (PthreadsEntryPoint): + * dll.c (PthreadsEntryPoint): (__ptw32_virgins): Remove malloc() and free() calls. (__ptw32_reuse): Ditto. (__ptw32_win32handle_map): Ditto. (__ptw32_threads_mutex_table): Ditto. - * global.c (_POSIX_THREAD_THREADS_MAX): Initialise with + * global.c (_POSIX_THREAD_THREADS_MAX): Initialise with PTW32_MAX_THREADS. (__ptw32_virgins): Ditto. (__ptw32_reuse): Ditto. @@ -4808,9 +4808,9 @@ Thu Aug 6 15:19:22 1998 Ross Johnson * condvar.c (pthread_cond_init): Add address-of operator & to arg 1 of pthread_mutex_init() call. (pthread_cond_destroy): Add address-of operator & to - arg 1 of pthread_mutex_destroy() call. + arg 1 of pthread_mutex_destroy() call. - * cleanup.c (__ptw32_destructor_pop_all): Add (int) cast to + * cleanup.c (__ptw32_destructor_pop_all): Add (int) cast to pthread_getspecific() arg. (__ptw32_destructor_pop): Add (void *) cast to "if" conditional. (__ptw32_destructor_push): Add (void *) cast to @@ -4849,13 +4849,13 @@ Tue Aug 4 16:57:58 1998 Ross Johnson * private.c (__ptw32_delete_thread): Fix typo. Add missing ';'. - * global.c (__ptw32_virgins): Change types from pointer to + * global.c (__ptw32_virgins): Change types from pointer to array pointer. (__ptw32_reuse): Ditto. (__ptw32_win32handle_map): Ditto. (__ptw32_threads_mutex_table): Ditto. - * implement.h(__ptw32_virgins): Change types from pointer to + * implement.h(__ptw32_virgins): Change types from pointer to array pointer. (__ptw32_reuse): Ditto. (__ptw32_win32handle_map): Ditto. @@ -4925,7 +4925,7 @@ Mon Aug 3 21:19:57 1998 Ross Johnson * windows.h (THREAD_PRIORITY_NORMAL): Add. - * pthread.h (sched_param): Add missing ';' to struct definition. + * pthread.h (sched_param): Add missing ';' to struct definition. * attr.c (pthread_attr_init): Remove obsolete pthread_attr_t member initialisation - cancelstate, canceltype, cancel_pending. @@ -4938,7 +4938,7 @@ Mon Aug 3 21:19:57 1998 Ross Johnson 1998-08-02 Ben Elliston - * windows.h: Remove duplicate TlsSetValue() prototype. Add + * windows.h: Remove duplicate TlsSetValue() prototype. Add TlsGetValue() prototype. (FALSE): Define. (TRUE): Likewise. @@ -5115,7 +5115,7 @@ Mon Jul 27 00:20:37 1998 Ross Johnson * exit.c (__ptw32_exit): Fix incorrect check for detachedstate. - * implement.h (__ptw32_call_t): Remove env member. + * implement.h (__ptw32_call_t): Remove env member. Sun Jul 26 13:06:12 1998 Ross Johnson @@ -5229,7 +5229,7 @@ Sat Jul 25 00:00:13 1998 Ross Johnson * create.c (__ptw32_start_call): Set thread priority. Ensure our thread entry is removed from the thread table but only if pthread_detach() was called and there are no waiting joins. - (pthread_create): Set detach flag in thread entry if the + (pthread_create): Set detach flag in thread entry if the thread is created PTHREAD_CREATE_DETACHED. * pthread.h (pthread_attr_t): Rename member "detachedstate". @@ -5307,7 +5307,7 @@ Fri Jul 24 21:13:55 1998 Ross Johnson (pthread_create): New threads inherit their creator's signal mask. Copy the signal mask to the new thread structure if we know about signals. - + Fri Jul 24 16:33:17 1998 Ross Johnson * fork.c (pthread_atfork): Add all the necessary push calls. @@ -5351,12 +5351,12 @@ Fri Jul 24 03:00:25 1998 Ross Johnson (pthread_create): _beginthreadex() now passes a pointer to our thread table entry instead of just the call member of that entry. - * implement.h (__ptw32_threads_thread): New member + * implement.h (__ptw32_threads_thread): New member void ** joinvalueptr. (__ptw32_call_t): New member jmpbuf env. * exit.c (pthread_exit): Major rewrite to handle joins and handing - value pointer to joining thread. Uses longjmp() back to + value pointer to joining thread. Uses longjmp() back to __ptw32_start_call(). * create.c (pthread_create): Ensure values of new attribute members @@ -5426,7 +5426,7 @@ Fri Jul 24 00:21:21 1998 Ross Johnson (SCHED_MAX): Likewise, the maximum possible value. (PTHREAD_CANCEL_ASYNCHRONOUS): Redefine. (PTHREAD_CANCEL_DEFERRED): Likewise. - + * sched.c: New file. (pthread_setschedparam): Implement. (pthread_getschedparam): Implement. @@ -5492,7 +5492,7 @@ Wed Jul 22 00:16:22 1998 Ross Johnson all new threads. It allows us to do some cleanup when the thread returns, ie. that is otherwise only done if the thread is cancelled. - * exit.c (__ptw32_vacuum): New function contains code from + * exit.c (__ptw32_vacuum): New function contains code from pthread_exit() that we need in the new __ptw32_start_call() as well. @@ -5563,7 +5563,7 @@ Wed Jul 22 00:16:22 1998 Ross Johnson up to multiple of DWORD. Add function prototypes. - * private.c (__ptw32_getthreadindex): "*thread" should have been + * private.c (__ptw32_getthreadindex): "*thread" should have been "thread". Detect empty slot fail condition. 1998-07-20 Ben Elliston @@ -5571,13 +5571,13 @@ Wed Jul 22 00:16:22 1998 Ross Johnson * misc.c (pthread_once): Implement. Don't use a per-application flag and mutex--make `pthread_once_t' contain these elements in their structure. The earlier version had incorrect semantics. - + * pthread.h (__ptw32_once_flag): Add new variable. Remove. (__ptw32_once_lock): Add new mutex lock to ensure integrity of access to __ptw32_once_flag. Remove. (pthread_once): Add function prototype. (pthread_once_t): Define this type. - + Mon Jul 20 02:31:05 1998 Ross Johnson * private.c (__ptw32_getthreadindex): Implement. @@ -5594,7 +5594,7 @@ Mon Jul 20 02:31:05 1998 Ross Johnson * create.c (pthread_create): Add thread to thread table. Keep a thread-private copy of the attributes with default values - filled in when necessary. Same for the cleanup stack. Make + filled in when necessary. Same for the cleanup stack. Make pthread_create C run-time library friendly by using _beginthreadex() instead of CreateThread(). Fix error returns. @@ -5661,9 +5661,9 @@ Sun Jul 19 16:26:23 1998 Ross Johnson across processes for now. (pthread_mutex_t): Use a Win32 CRITICAL_SECTION type for better performance. - + * implement.h (__ptw32_mutexattr_t): Remove shared attribute. - + * mutex.c (pthread_mutexattr_setpshared): This optional function is no longer supported, since we want to implement POSIX mutex variables using the much more efficient Win32 critical section @@ -5688,7 +5688,7 @@ Sun Jul 19 16:26:23 1998 Ross Johnson (pthread_attr_getstackaddr): Likewise. (pthread_attr_init): Likewise. (pthread_attr_destroy): Likewise. - + * condvar.c (pthread_condattr_init): Add `_cond' to function name. * mutex.c (pthread_mutex_lock): Add `_mutex' to function name. @@ -5702,7 +5702,7 @@ Sun Jul 19 16:26:23 1998 Ross Johnson (pthread_attr_getstacksize): Likewise. (pthread_attr_setstackaddr): Likewise. (pthread_attr_getstackaddr): Likewise. - + Mon Jul 13 01:09:55 1998 Ross Johnson * implement.h: Wrap in #ifndef _IMPLEMENT_H @@ -5717,7 +5717,7 @@ Mon Jul 13 01:09:55 1998 Ross Johnson (pthread_condattr_destroy): Likewise. (pthread_condattr_setpshared): Likewise. (pthread_condattr_getpshared): Likewise. - + * implement.h (PTHREAD_THREADS_MAX): Remove trailing semicolon. (PTHREAD_STACK_MIN): Specify; needs confirming. (__ptw32_attr_t): Define this type. @@ -5769,7 +5769,7 @@ Mon Jul 13 01:09:55 1998 Ross Johnson (insert_attr): New function; very preliminary implementation! (is_attr): Likewise. (remove_attr): Likewise. - + Sat Jul 11 14:48:54 1998 Ross Johnson * implement.h: Preliminary implementation specific defines. @@ -5781,10 +5781,10 @@ Sat Jul 11 14:48:54 1998 Ross Johnson * sync.c (pthread_join): Implement. * misc.c (pthread_equal): Likewise. - + * pthread.h (pthread_join): Add function prototype. (pthread_equal): Likewise. - + 1998-07-10 Ben Elliston * misc.c (pthread_self): Implement. diff --git a/doc/tutorial/tracking/tutorial-tracking-mb-generic-json.dox b/doc/tutorial/tracking/tutorial-tracking-mb-generic-json.dox index 1c132a25db..2ef9352ead 100644 --- a/doc/tutorial/tracking/tutorial-tracking-mb-generic-json.dox +++ b/doc/tutorial/tracking/tutorial-tracking-mb-generic-json.dox @@ -116,7 +116,7 @@ stating that this tracker uses both edge (see the vpMe class) and KLT (see vpKlt The next important definition is: \snippet realsense-color-and-depth.json.example Transformation Describing the transformation between this camera and the reference camera. It can also be given as a vpPoseVector JSON representation. -If the current camera is the reference, then "camTref" may be ommitted or set as the identity transformation. +If the current camera is the reference, then "camTref" may be omitted or set as the identity transformation. Next, we must define the camera intrinsics (see vpCameraParameters): \snippet realsense-color-and-depth.json.example Camera diff --git a/doc/tutorial/visual-servo/tutorial-pixhawk-vs.dox b/doc/tutorial/visual-servo/tutorial-pixhawk-vs.dox index a34dd17df7..fc67ffae45 100644 --- a/doc/tutorial/visual-servo/tutorial-pixhawk-vs.dox +++ b/doc/tutorial/visual-servo/tutorial-pixhawk-vs.dox @@ -272,7 +272,7 @@ In order to do this part, make sure you add a camera to your drone. We added a i Jetson through USB. The code servoPixhawkDroneIBVS.cpp is an example that needs to be run on the Jetson and that allows to do visual servoing with the drone. -This program establishes a rigid link between the drone (equiped with a camera) and an Apriltag. +This program establishes a rigid link between the drone (equipped with a camera) and an Apriltag. Depending on where the camera is placed, the matrices expressing the transformation between the FLU body frame of the drone and the camera frame need to be modified. Here is a picture of the drone showing where the D405 camera was attached. diff --git a/example/math/BSpline.cpp b/example/math/BSpline.cpp index 0a3d2caf36..5056dfded9 100644 --- a/example/math/BSpline.cpp +++ b/example/math/BSpline.cpp @@ -29,7 +29,7 @@ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * Description: - * Exemple of a B-Spline curve. + * Example of a B-Spline curve. * *****************************************************************************/ /*! diff --git a/example/servo-afma6/servoAfma6FourPoints2DArtVelocity.cpp b/example/servo-afma6/servoAfma6FourPoints2DArtVelocity.cpp index 4a35a880f5..0b9729103c 100644 --- a/example/servo-afma6/servoAfma6FourPoints2DArtVelocity.cpp +++ b/example/servo-afma6/servoAfma6FourPoints2DArtVelocity.cpp @@ -120,7 +120,7 @@ int main() try { // Define the square CAD model -// Square dimention +// Square dimension #define L 0.075 // Distance between the camera and the square at the desired // position after visual servoing convergence @@ -154,7 +154,7 @@ int main() std::cout << " Test program for vpServo " << std::endl; std::cout << " Eye-in-hand task control, velocity computed in the joint space" << std::endl; std::cout << " Use of the Afma6 robot " << std::endl; - std::cout << " task : servo 4 points on a square with dimention " << L << " meters" << std::endl; + std::cout << " task : servo 4 points on a square with dimension " << L << " meters" << std::endl; std::cout << "-------------------------------------------------------" << std::endl; std::cout << std::endl; diff --git a/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_cur.cpp b/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_cur.cpp index 3904882893..5cdc8de3cf 100644 --- a/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_cur.cpp +++ b/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_cur.cpp @@ -203,7 +203,7 @@ int main() std::cout << " Eye-in-hand task control, velocity computed in the camera frame" << std::endl; std::cout << " Use of the Afma6 robot " << std::endl; std::cout << " Interaction matrix computed with the current features " << std::endl; - std::cout << " task : servo 4 points on a square with dimention " << L << " meters" << std::endl; + std::cout << " task : servo 4 points on a square with dimension " << L << " meters" << std::endl; std::cout << "-------------------------------------------------------" << std::endl; std::cout << std::endl; diff --git a/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_des.cpp b/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_des.cpp index f9fd160600..5d715572b3 100644 --- a/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_des.cpp +++ b/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_des.cpp @@ -153,7 +153,7 @@ int main() std::cout << " Use of the Afma6 robot " << std::endl; std::cout << " Interaction matrix computed with the desired features " << std::endl; - std::cout << " task : servo 4 points on a square with dimention " << L << " meters" << std::endl; + std::cout << " task : servo 4 points on a square with dimension " << L << " meters" << std::endl; std::cout << "-------------------------------------------------------" << std::endl; std::cout << std::endl; diff --git a/example/servo-viper850/servoViper850FourPoints2DArtVelocityLs_cur.cpp b/example/servo-viper850/servoViper850FourPoints2DArtVelocityLs_cur.cpp index dac1df8176..43fd7e2edb 100644 --- a/example/servo-viper850/servoViper850FourPoints2DArtVelocityLs_cur.cpp +++ b/example/servo-viper850/servoViper850FourPoints2DArtVelocityLs_cur.cpp @@ -188,7 +188,7 @@ int main() std::cout << " Test program for vpServo " << std::endl; std::cout << " Eye-in-hand task control, velocity computed in the joint space" << std::endl; std::cout << " Use of the Afma6 robot " << std::endl; - std::cout << " task : servo 4 points on a square with dimention " << L << " meters" << std::endl; + std::cout << " task : servo 4 points on a square with dimension " << L << " meters" << std::endl; std::cout << "-------------------------------------------------------" << std::endl; std::cout << std::endl; diff --git a/example/servo-viper850/servoViper850FourPoints2DArtVelocityLs_des.cpp b/example/servo-viper850/servoViper850FourPoints2DArtVelocityLs_des.cpp index 0e2ac786db..456295e6ee 100644 --- a/example/servo-viper850/servoViper850FourPoints2DArtVelocityLs_des.cpp +++ b/example/servo-viper850/servoViper850FourPoints2DArtVelocityLs_des.cpp @@ -108,7 +108,7 @@ int main() try { // Define the square CAD model -// Square dimention +// Square dimension // #define L 0.075 #define L 0.05 // Distance between the camera and the square at the desired @@ -150,7 +150,7 @@ int main() std::cout << " Test program for vpServo " << std::endl; std::cout << " Eye-in-hand task control, velocity computed in the joint space" << std::endl; std::cout << " Use of the Afma6 robot " << std::endl; - std::cout << " task : servo 4 points on a square with dimention " << L << " meters" << std::endl; + std::cout << " task : servo 4 points on a square with dimension " << L << " meters" << std::endl; std::cout << "-------------------------------------------------------" << std::endl; std::cout << std::endl; diff --git a/example/servo-viper850/servoViper850FourPoints2DCamVelocityLs_cur.cpp b/example/servo-viper850/servoViper850FourPoints2DCamVelocityLs_cur.cpp index 3b7f4588b7..896985320a 100644 --- a/example/servo-viper850/servoViper850FourPoints2DCamVelocityLs_cur.cpp +++ b/example/servo-viper850/servoViper850FourPoints2DCamVelocityLs_cur.cpp @@ -189,7 +189,7 @@ int main() std::cout << " Test program for vpServo " << std::endl; std::cout << " Eye-in-hand task control, velocity computed in the camera space" << std::endl; std::cout << " Use of the Viper850 robot " << std::endl; - std::cout << " task : servo 4 points on a square with dimention " << L << " meters" << std::endl; + std::cout << " task : servo 4 points on a square with dimension " << L << " meters" << std::endl; std::cout << "-------------------------------------------------------" << std::endl; std::cout << std::endl; diff --git a/example/servo-viper850/servoViper850FourPointsKinect.cpp b/example/servo-viper850/servoViper850FourPointsKinect.cpp index ba8a431c35..2e4f567e3d 100644 --- a/example/servo-viper850/servoViper850FourPointsKinect.cpp +++ b/example/servo-viper850/servoViper850FourPointsKinect.cpp @@ -200,7 +200,7 @@ int main() std::cout << " Test program for vpServo " << std::endl; std::cout << " Eye-in-hand task control, velocity computed in the camera space" << std::endl; std::cout << " Use of the Viper850 robot " << std::endl; - std::cout << " task : servo 4 points on a square with dimention " << L << " meters" << std::endl; + std::cout << " task : servo 4 points on a square with dimension " << L << " meters" << std::endl; std::cout << "-------------------------------------------------------" << std::endl; std::cout << std::endl; diff --git a/modules/ar/CMakeLists.txt b/modules/ar/CMakeLists.txt index d5a5394cbb..004b702c55 100644 --- a/modules/ar/CMakeLists.txt +++ b/modules/ar/CMakeLists.txt @@ -173,7 +173,7 @@ if(USE_COIN3D) endif() endif() -vp_add_module(ar visp_core OPTIONAL visp_io) +vp_add_module(ar visp_core) vp_glob_module_sources() if(USE_OGRE) diff --git a/modules/core/include/visp3/core/vpCameraParameters.h b/modules/core/include/visp3/core/vpCameraParameters.h index 06b36d5e25..ada6c7074d 100644 --- a/modules/core/include/visp3/core/vpCameraParameters.h +++ b/modules/core/include/visp3/core/vpCameraParameters.h @@ -341,7 +341,7 @@ class VISP_EXPORT vpCameraParameters \return True if the fov has been computed, False otherwise. */ - inline bool isFovComputed() const { return isFov; } + inline bool isFovComputed() const { return m_isFov; } void computeFov(const unsigned int &w, const unsigned int &h); @@ -354,7 +354,7 @@ class VISP_EXPORT vpCameraParameters */ inline double getHorizontalFovAngle() const { - if (!isFov) { + if (!m_isFov) { vpTRACE("Warning: The FOV is not computed, getHorizontalFovAngle() " "won't be significant."); } @@ -370,7 +370,7 @@ class VISP_EXPORT vpCameraParameters */ inline double getVerticalFovAngle() const { - if (!isFov) { + if (!m_isFov) { vpTRACE("Warning: The FOV is not computed, getVerticalFovAngle() won't " "be significant."); } @@ -391,24 +391,24 @@ class VISP_EXPORT vpCameraParameters */ inline std::vector getFovNormals() const { - if (!isFov) { + if (!m_isFov) { vpTRACE("Warning: The FOV is not computed, getFovNormals() won't be " "significant."); } - return fovNormals; + return m_fovNormals; } - inline double get_px() const { return px; } - inline double get_px_inverse() const { return inv_px; } - inline double get_py_inverse() const { return inv_py; } - inline double get_py() const { return py; } - inline double get_u0() const { return u0; } - inline double get_v0() const { return v0; } - inline double get_kud() const { return kud; } - inline double get_kdu() const { return kdu; } + inline double get_px() const { return m_px; } + inline double get_px_inverse() const { return m_inv_px; } + inline double get_py_inverse() const { return m_inv_py; } + inline double get_py() const { return m_py; } + inline double get_u0() const { return m_u0; } + inline double get_v0() const { return m_v0; } + inline double get_kud() const { return m_kud; } + inline double get_kdu() const { return m_kdu; } inline std::vector getKannalaBrandtDistortionCoefficients() const { return m_dist_coefs; } - inline vpCameraParametersProjType get_projModel() const { return projModel; } + inline vpCameraParametersProjType get_projModel() const { return m_projModel; } vpMatrix get_K() const; vpMatrix get_K_inverse() const; @@ -425,22 +425,22 @@ class VISP_EXPORT vpCameraParameters static const double DEFAULT_KDU_PARAMETER; static const vpCameraParametersProjType DEFAULT_PROJ_TYPE; - double px, py; //!< Pixel size - double u0, v0; //!< Principal point - double kud; //!< Radial distortion (from undistorted to distorted) - double kdu; //!< Radial distortion (from distorted to undistorted) - std::vector m_dist_coefs; //!< Coefficients for Kannala-Brandt distortion model + double m_px, m_py; //!< Pixel size + double m_u0, m_v0; //!< Principal point + double m_kud; //!< Radial distortion (from undistorted to distorted) + double m_kdu; //!< Radial distortion (from distorted to undistorted) + std::vector m_dist_coefs; //!< Coefficients for Kannala-Brandt distortion model - unsigned int width; //!< Width of the image used for the fov computation - unsigned int height; //!< Height of the image used for the fov computation - bool isFov; //!< Boolean to specify if the fov has been computed - double m_hFovAngle; //!< Field of view horizontal angle - double m_vFovAngle; //!< Field of view vertical angle - std::vector fovNormals; //!< Normals of the planes describing the fov + unsigned int m_width; //!< Width of the image used for the fov computation + unsigned int m_height; //!< Height of the image used for the fov computation + bool m_isFov; //!< Boolean to specify if the fov has been computed + double m_hFovAngle; //!< Field of view horizontal angle + double m_vFovAngle; //!< Field of view vertical angle + std::vector m_fovNormals; //!< Normals of the planes describing the fov - double inv_px, inv_py; + double m_inv_px, m_inv_py; - vpCameraParametersProjType projModel; //!< used projection model + vpCameraParametersProjType m_projModel; //!< used projection model #ifdef VISP_HAVE_NLOHMANN_JSON friend void to_json(nlohmann::json &j, const vpCameraParameters &cam); friend void from_json(const nlohmann::json &j, vpCameraParameters &cam); @@ -454,26 +454,26 @@ NLOHMANN_JSON_SERIALIZE_ENUM(vpCameraParameters::vpCameraParametersProjType, { {vpCameraParameters::perspectiveProjWithDistortion, "perspectiveWithDistortion"}, {vpCameraParameters::ProjWithKannalaBrandtDistortion, "kannalaBrandtDistortion"} }); + /** * \brief Converts camera parameters into a JSON representation. - * \sa from_json for more information on the content - * \param j the resulting JSON object - * \param cam the camera to serialize - * + * \sa from_json() for more information on the content. + * \param j The resulting JSON object. + * \param cam The camera to serialize. */ inline void to_json(nlohmann::json &j, const vpCameraParameters &cam) { - j["px"] = cam.px; - j["py"] = cam.py; - j["u0"] = cam.u0; - j["v0"] = cam.v0; - j["model"] = cam.projModel; + j["px"] = cam.m_px; + j["py"] = cam.m_py; + j["u0"] = cam.m_u0; + j["v0"] = cam.m_v0; + j["model"] = cam.m_projModel; - switch (cam.projModel) { + switch (cam.m_projModel) { case vpCameraParameters::perspectiveProjWithDistortion: { - j["kud"] = cam.kud; - j["kdu"] = cam.kdu; + j["kud"] = cam.m_kud; + j["kdu"] = cam.m_kdu; break; } case vpCameraParameters::ProjWithKannalaBrandtDistortion: @@ -487,33 +487,33 @@ inline void to_json(nlohmann::json &j, const vpCameraParameters &cam) break; } } -/*! - \brief Deserialize a JSON object into camera parameters. - The minimal required properties are: - - Pixel size: px, py - - Principal point: u0, v0 - - If a projection model (\ref vpCameraParameters::vpCameraParametersProjType) is supplied, then other parameters may be expected: - - In the case of perspective projection with distortion, ku, and kud must be supplied. - - In the case of Kannala-Brandt distortion, the list of coefficients must be supplied. - - An example of a JSON object representing a camera is: - \code{.json} - { - "px": 300.0, - "py": 300.0, - "u0": 120.5, - "v0": 115.0, - "model": "perspectiveWithDistortion", // one of ["perspectiveWithoutDistortion", "perspectiveWithDistortion", "kannalaBrandtDistortion"]. If ommitted, camera is assumed to have no distortion - "kud": 0.5, // required since "model" == perspectiveWithDistortion - "kdu": 0.5 - } - \endcode - - \param j The json object to deserialize. - \param cam The modified camera. -*/ +/*! + * \brief Deserialize a JSON object into camera parameters. + * The minimal required properties are: + * - Pixel size: px, py + * - Principal point: u0, v0 + * + * If a projection model (\ref vpCameraParameters::vpCameraParametersProjType) is supplied, then other parameters may be expected: + * - In the case of perspective projection with distortion, ku, and kud must be supplied. + * - In the case of Kannala-Brandt distortion, the list of coefficients must be supplied. + * + * An example of a JSON object representing a camera is: + * \code{.json} + * { + * "px": 300.0, + * "py": 300.0, + * "u0": 120.5, + * "v0": 115.0, + * "model": "perspectiveWithDistortion", // one of ["perspectiveWithoutDistortion", "perspectiveWithDistortion", "kannalaBrandtDistortion"]. If omitted, camera is assumed to have no distortion + * "kud": 0.5, // required since "model" == perspectiveWithDistortion + * "kdu": 0.5 + * } + * \endcode + * + * \param j The json object to deserialize. + * \param cam The modified camera. + */ inline void from_json(const nlohmann::json &j, vpCameraParameters &cam) { const double px = j.at("px").get(); diff --git a/modules/core/include/visp3/core/vpCircle.h b/modules/core/include/visp3/core/vpCircle.h index 3407516fdc..c491f08985 100644 --- a/modules/core/include/visp3/core/vpCircle.h +++ b/modules/core/include/visp3/core/vpCircle.h @@ -46,63 +46,62 @@ #include /*! - \class vpCircle - \ingroup group_core_geometry - \brief Class that defines a 3D circle in the object frame and allows forward projection of a 3D circle in the - camera frame and in the 2D image plane by perspective projection. - All the parameters must be set in meter. - - Note that a 3D circle is defined from the intersection between a 3D plane and a 3D sphere. - - A 3D circle has the followings parameters: - - **in the object frame**: the parameters oA, oB, oC corresponding to the 3D plane with equation - oA*(X-oX)+oB*(Y-oY)+oC*(Z-oZ)=0 where (X,Y,Z) are the coordinates of a 3D point belonging to the plane passing through - the 3D sphere center (oX,oY,oZ) and the 3D coordinates oX, oY, oZ of the center and radius R of the 3D sphere. These - parameters registered in vpForwardProjection::oP internal 7-dim vector are set using the constructors - vpCircle(double oA, double oB, double oC, double oX, double oY, double oZ, double R), - vpCircle(const vpColVector &oP) or the functions - setWorldCoordinates(double oA, double oB, double oC, double oX, double oY, double oZ, double R) - and setWorldCoordinates(const vpColVector &oP). To get theses parameters use get_oP(). - - - **in the camera frame**: the parameters cA, cB, cC corresponding to the 3D plane cAx+cBy+cCz+D=0 - and the coordinates cX, cY, cZ of the center and radius R of the 3D sphere. These - parameters registered in vpTracker::cP internal 7-dim vector are computed using - changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const or changeFrame(const vpHomogeneousMatrix &cMo). - These parameters could be retrieved using getA(), getB(), getC(), getX(), getY(), getZ() and getR(). - To get theses parameters use get_cP(). - - - **in the image plane**: here we consider the parameters of the ellipse corresponding - to the perspective projection of the 3D circle. The parameters are the ellipse centroid (x, y) - and n20, n11, n02 which are the second order centered moments of - the ellipse normalized by its area (i.e., such that \f$n_{ij} = \mu_{ij}/a\f$ where - \f$\mu_{ij}\f$ are the centered moments and a the area). - These parameters are registered in vpTracker::p internal 5-dim vector - and computed using projection() and - projection(const vpColVector &cP, vpColVector &p) const. They could be retrieved using get_x(), get_y(), get_n20(), - get_n11() and get_n02(). They correspond to 2D normalized circle parameters with values expressed in meters. - To get theses parameters use get_p(). - -*/ + * \class vpCircle + * \ingroup group_core_geometry + * \brief Class that defines a 3D circle in the object frame and allows forward projection of a 3D circle in the + * camera frame and in the 2D image plane by perspective projection. + * All the parameters must be set in meter. + * + * Note that a 3D circle is defined from the intersection between a 3D plane and a 3D sphere. + * + * A 3D circle has the followings parameters: + * - **in the object frame**: the parameters oA, oB, oC corresponding to the 3D plane with equation + * oA*(X-oX)+oB*(Y-oY)+oC*(Z-oZ)=0 where (X,Y,Z) are the coordinates of a 3D point belonging to the plane passing through + * the 3D sphere center (oX,oY,oZ) and the 3D coordinates oX, oY, oZ of the center and radius R of the 3D sphere. These + * parameters registered in vpForwardProjection::oP internal 7-dim vector are set using the constructors + * vpCircle(double oA, double oB, double oC, double oX, double oY, double oZ, double R), + * vpCircle(const vpColVector &oP) or the functions + * setWorldCoordinates(double oA, double oB, double oC, double oX, double oY, double oZ, double R) + * and setWorldCoordinates(const vpColVector &oP). To get theses parameters use get_oP(). + * + * - **in the camera frame**: the parameters cA, cB, cC corresponding to the 3D plane cAx+cBy+cCz+D=0 + * and the coordinates cX, cY, cZ of the center and radius R of the 3D sphere. These + * parameters registered in vpTracker::cP internal 7-dim vector are computed using + * changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const or changeFrame(const vpHomogeneousMatrix &cMo). + * These parameters could be retrieved using getA(), getB(), getC(), getX(), getY(), getZ() and getR(). + * To get theses parameters use get_cP(). + * + * - **in the image plane**: here we consider the parameters of the ellipse corresponding + * to the perspective projection of the 3D circle. The parameters are the ellipse centroid (x, y) + * and n20, n11, n02 which are the second order centered moments of + * the ellipse normalized by its area (i.e., such that \f$n_{ij} = \mu_{ij}/a\f$ where + * \f$\mu_{ij}\f$ are the centered moments and a the area). + * These parameters are registered in vpTracker::p internal 5-dim vector + * and computed using projection() and + * projection(const vpColVector &cP, vpColVector &p) const. They could be retrieved using get_x(), get_y(), get_n20(), + * get_n11() and get_n02(). They correspond to 2D normalized circle parameters with values expressed in meters. + * To get theses parameters use get_p(). + */ class VISP_EXPORT vpCircle : public vpForwardProjection { public: vpCircle(); explicit vpCircle(const vpColVector &oP); vpCircle(double oA, double oB, double oC, double oX, double oY, double oZ, double R); - virtual ~vpCircle(); + virtual ~vpCircle() override; - void changeFrame(const vpHomogeneousMatrix &noMo, vpColVector &noP) const; - void changeFrame(const vpHomogeneousMatrix &cMo); + void changeFrame(const vpHomogeneousMatrix &noMo, vpColVector &noP) const override; + void changeFrame(const vpHomogeneousMatrix &cMo) override; void display(const vpImage &I, const vpCameraParameters &cam, const vpColor &color = vpColor::green, - unsigned int thickness = 1); + unsigned int thickness = 1) override; void display(const vpImage &I, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1); void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &color = vpColor::green, unsigned int thickness = 1); + const vpColor &color = vpColor::green, unsigned int thickness = 1) override; void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1); - vpCircle *duplicate() const; + vpCircle *duplicate() const override; double get_x() const { return p[0]; } double get_y() const { return p[1]; } @@ -121,10 +120,10 @@ class VISP_EXPORT vpCircle : public vpForwardProjection double getR() const { return cP[6]; } - void projection(); - void projection(const vpColVector &cP, vpColVector &p) const; + void projection() override; + void projection(const vpColVector &cP, vpColVector &p) const override; - void setWorldCoordinates(const vpColVector &oP); + void setWorldCoordinates(const vpColVector &oP) override; void setWorldCoordinates(double oA, double oB, double oC, double oX, double oY, double oZ, double R); //################### @@ -134,7 +133,7 @@ class VISP_EXPORT vpCircle : public vpForwardProjection const double &theta, double &i, double &j); protected: - void init(); + void init() override; public: #if defined(VISP_BUILD_DEPRECATED_FUNCTIONS) diff --git a/modules/core/include/visp3/core/vpClient.h b/modules/core/include/visp3/core/vpClient.h index 10ae2923ad..6550491ad5 100644 --- a/modules/core/include/visp3/core/vpClient.h +++ b/modules/core/include/visp3/core/vpClient.h @@ -45,169 +45,161 @@ #ifdef VISP_HAVE_FUNC_INET_NTOP /*! - \class vpClient - - \ingroup group_core_com_ethernet - - \brief This class represents a Transmission Control Protocol (TCP) client. - - TCP provides reliable, ordered delivery of a stream of bytes from a program - on one computer to another program on another computer. - - Exemple of client's code, receiving and sending basic message - It corresponds to the client used in the first example of vpServer class' - documentation: - - \code -#include -#include - -int main() -{ - std::string servername = "localhost"; - unsigned int port = 35000; - - vpClient client; - client.connectToHostname(servername, port); - //client.connectToIP("127.0.0.1",port); - - int val = 0; - - while(1) - { - // Sending the new value to the first client - if(client.send(&val) != sizeof(int)) - std::cout << "Error while sending" << std::endl; - else - std::cout << "Sending : " << val << std::endl; - - // Receiving a value from the first client - if(client.receive(&val) != sizeof(int)) - std::cout << "Error while receiving" << std::endl; - else - std::cout << "Received : " << val << std::endl; - } - - return 0; -} - \endcode - - Exemple of client's code, sending a vpImage on request form. - It correspond to the server used in the second example of vpServer class' - documentation. - - \code -#include -#include -#include -#include -#include -#include - -#include "vpRequestImage.h" //See vpRequest class documentation - -int main(int argc, char **argv) -{ -#if defined(VISP_HAVE_V4L2) - std::string servername = "localhost"; - unsigned int port = 35000; - - vpImage I; // Create a gray level image container - - // Create a grabber based on v4l2 third party lib (for usb cameras under - // Linux) - vpV4l2Grabber g; - g.setScale(1); - g.setInput(0); - g.open(I); - - // Create an image viewer -#if defined(VISP_HAVE_X11) - vpDisplayX d(I, -1, -1, "Camera frame"); -#elif defined(VISP_HAVE_GDI) //Win32 - vpDisplayGDI d(I, -1, -1, "Camera frame"); -#endif - - vpClient client; - client.connectToHostname(servername, port); - //client.connectToIP("127.0.0.1",port); - - vpRequestImage reqImage(&I); - - while(1) - { - double t = vpTime::measureTimeMs(); - // Acquire a new image - g.acquire(I); - - vpDisplay::display(I); - vpDisplay::flush(I); - - client.sendAndEncodeRequest(reqImage); - - // A click in the viewer to exit - if ( vpDisplay::getClick(I, false) ) - break; - } - - return 0; -#endif -} - \endcode - - \sa vpClient - \sa vpRequest - \sa vpNetwork -*/ + * \class vpClient + * + * \ingroup group_core_com_ethernet + * + * \brief This class represents a Transmission Control Protocol (TCP) client. + * + * TCP provides reliable, ordered delivery of a stream of bytes from a program + * on one computer to another program on another computer. + * + * Example of client's code, receiving and sending basic message + * It corresponds to the client used in the first example of vpServer class' + * documentation: + * + * \code + * #include + * #include + * + * int main() + * { + * std::string servername = "localhost"; + * unsigned int port = 35000; + * + * vpClient client; + * client.connectToHostname(servername, port); + * //client.connectToIP("127.0.0.1",port); + * + * int val = 0; + * + * while(1) + * { + * // Sending the new value to the first client + * if(client.send(&val) != sizeof(int)) + * std::cout << "Error while sending" << std::endl; + * else + * std::cout << "Sending : " << val << std::endl; + * + * // Receiving a value from the first client + * if(client.receive(&val) != sizeof(int)) + * std::cout << "Error while receiving" << std::endl; + * else + * std::cout << "Received : " << val << std::endl; + * } + * + * return 0; + * } + * \endcode + * + * Example of client's code, sending a vpImage on request form. + * It correspond to the server used in the second example of vpServer class' + * documentation. + * + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * + * #include "vpRequestImage.h" //See vpRequest class documentation + * + * int main(int argc, char **argv) + * { + * #if defined(VISP_HAVE_V4L2) + * std::string servername = "localhost"; + * unsigned int port = 35000; + * + * vpImage I; // Create a gray level image container + * + * // Create a grabber based on v4l2 third party lib (for usb cameras under + * // Linux) + * vpV4l2Grabber g; + * g.setScale(1); + * g.setInput(0); + * g.open(I); + * + * // Create an image viewer + * #if defined(VISP_HAVE_X11) + * vpDisplayX d(I, -1, -1, "Camera frame"); + * #elif defined(VISP_HAVE_GDI) //Win32 + * vpDisplayGDI d(I, -1, -1, "Camera frame"); + * #endif + * + * vpClient client; + * client.connectToHostname(servername, port); + * //client.connectToIP("127.0.0.1",port); + * + * vpRequestImage reqImage(&I); + * + * while(1) + * { + * double t = vpTime::measureTimeMs(); + * // Acquire a new image + * g.acquire(I); + * + * vpDisplay::display(I); + * vpDisplay::flush(I); + * + * client.sendAndEncodeRequest(reqImage); + * + * // A click in the viewer to exit + * if ( vpDisplay::getClick(I, false) ) + * break; + * } + * + * return 0; + * #endif + * } + * \endcode + * + * \sa vpClient + * \sa vpRequest + * \sa vpNetwork + */ class VISP_EXPORT vpClient : public vpNetwork { private: - //######## PARAMETERS ######## - //# # - //############################ - - unsigned int numberOfAttempts; - - //######## Private Functions ######## - //# # - //################################### + unsigned int m_numberOfAttempts; bool connectServer(vpNetwork::vpReceptor &serv); public: vpClient(); - virtual ~vpClient(); + virtual ~vpClient() override; bool connectToHostname(const std::string &hostname, const unsigned int &port_serv); bool connectToIP(const std::string &ip, const unsigned int &port_serv); void deconnect(const unsigned int &index = 0); /*! - Get the actual number of attempts to connect to the server. - - \sa vpClient::setNumberOfAttempts() - - \return Actual number of attempts. - */ - unsigned int getNumberOfAttempts() { return numberOfAttempts; } + * Get the actual number of attempts to connect to the server. + * + * \sa vpClient::setNumberOfAttempts() + * + * \return Actual number of attempts. + */ + unsigned int getNumberOfAttempts() { return m_numberOfAttempts; } /*! - Get the number of server that the client is connected on. - - \return Number of servers. - */ + * Get the number of server that the client is connected on. + * + * \return Number of servers. + */ unsigned int getNumberOfServers() { return (unsigned int)receptor_list.size(); } void print(); /*! - Set the number of attempts to connect to the server. - - \sa vpClient::getNumberOfAttempts() - - \param nb : Number of attempts. - */ - void setNumberOfAttempts(const unsigned int &nb) { numberOfAttempts = nb; } + * Set the number of attempts to connect to the server. + * + * \sa vpClient::getNumberOfAttempts() + * + * \param nb : Number of attempts. + */ + void setNumberOfAttempts(const unsigned int &nb) { m_numberOfAttempts = nb; } void stop(); }; diff --git a/modules/core/include/visp3/core/vpColVector.h b/modules/core/include/visp3/core/vpColVector.h index 0fb123573b..0236d46bc1 100644 --- a/modules/core/include/visp3/core/vpColVector.h +++ b/modules/core/include/visp3/core/vpColVector.h @@ -254,10 +254,6 @@ class VISP_EXPORT vpColVector : public vpArray2D std::copy(list.begin(), list.end(), data); } #endif - /*! - * Destructor. - */ - virtual ~vpColVector() { } /*! * Removes all elements from the vector (which are destroyed), diff --git a/modules/core/include/visp3/core/vpCylinder.h b/modules/core/include/visp3/core/vpCylinder.h index 5347c78658..6f402f7226 100644 --- a/modules/core/include/visp3/core/vpCylinder.h +++ b/modules/core/include/visp3/core/vpCylinder.h @@ -46,58 +46,60 @@ #include /*! - \class vpCylinder - \ingroup group_core_geometry - \brief Class that defines a 3D cylinder in the object frame and allows forward projection of a 3D cylinder in the - camera frame and in the 2D image plane by perspective projection. - All the parameters must be set in meter. - - A 3D cylinder of radius R is defined by the set of circles of radius R whose center belongs - to a straight line perpendicular to the plane of the circles. - - A 3D cylinder has the followings parameters: - - **in the object frame**: the cylinder is represented by the equation: - \f$ (X - oX)^2 + (Y - oY)^2 + (Z - oZ)^2 - (oA \; X + oB \; Y + oC \; Z)^2 - - R^2 = 0 \f$ with - \f[ - \left\{ \begin{array}{l} - oA^2 + oB^2 + oC^2 = 1 \\ - oA \; oX + oB \; oY + oC \; oZ = 0 - \end{array} \right. - \f] - where R is the radius of the cylinder, oA, oB, oC are the - coordinates of its direction vector and oX, oY, oZ are the - coordinates of the nearest point belonging to the cylinder axis from the - projection center. - The corresponding parameters are located in vpForwardProjection::oP 7-dim internal vector. They correspond - to oP = (oA, oB, oC, oX, oY, oZ, R). - Setting the cylinder parameters is achieved through the constructors with - parameters or setWorldCoordinates() methods. - To get theses parameters use get_oP(). - - - **in the camera frame**: parameters are saved in vpTracker::cP 7-dim internal vector - with cP =(cA, cB, cC, cX, cY, cZ, R). Considering the set of parameters oP expressed in the object - frame, cylinder coordinates expressed in the camera frame are obtained using - changeFrame(). To get these parameters use get_cP(). - - - **in the 2D image plane**: parameters are saved in vpTracker::p 4-dim vector. - They correspond to p = (\f$\rho_1\f$, \f$\theta_1\f$, \f$\rho_2\f$, \f$\theta_2\f$), noting - that for non-degenerated cases, the perspective projection of a cylinder on the image plane is a set of two - straight lines with equation: - \f[ - \left\{ \begin{array}{lll} - x \;\cos\theta_1 + x \;\sin\theta_1 - \rho_1 = 0 \\ - y \;\cos\theta_2 + y \;\sin\theta_2 - \rho_2 = 0 - \end{array} \right. - \f] - Perspective projection is achieved using projection() methods. The methods - get_p(), getRho1(), getTheta1() and getRho2(), getTheta2() allow to access to the - projected line parameters. -*/ + * \class vpCylinder + * \ingroup group_core_geometry + * \brief Class that defines a 3D cylinder in the object frame and allows forward projection of a 3D cylinder in the + * camera frame and in the 2D image plane by perspective projection. + * All the parameters must be set in meter. + * + * A 3D cylinder of radius R is defined by the set of circles of radius R whose center belongs + * to a straight line perpendicular to the plane of the circles. + * + * A 3D cylinder has the followings parameters: + * - **in the object frame**: the cylinder is represented by the equation: + * \f$ (X - oX)^2 + (Y - oY)^2 + (Z - oZ)^2 - (oA \; X + oB \; Y + oC \; Z)^2 - + * R^2 = 0 \f$ with + * \f[ + * \left\{ \begin{array}{l} + * oA^2 + oB^2 + oC^2 = 1 \\ + * oA \; oX + oB \; oY + oC \; oZ = 0 + * \end{array} \right. + * \f] + * where R is the radius of the cylinder, oA, oB, oC are the + * coordinates of its direction vector and oX, oY, oZ are the + * coordinates of the nearest point belonging to the cylinder axis from the + * projection center. + * The corresponding parameters are located in vpForwardProjection::oP 7-dim internal vector. They correspond + * to oP = (oA, oB, oC, oX, oY, oZ, R). + * Setting the cylinder parameters is achieved through the constructors with + * parameters or setWorldCoordinates() methods. + * To get theses parameters use get_oP(). + * + * - **in the camera frame**: parameters are saved in vpTracker::cP 7-dim internal vector + * with cP =(cA, cB, cC, cX, cY, cZ, R). Considering the set of parameters oP expressed in the object + * frame, cylinder coordinates expressed in the camera frame are obtained using + * changeFrame(). To get these parameters use get_cP(). + * + * - **in the 2D image plane**: parameters are saved in vpTracker::p 4-dim vector. + * They correspond to p = (\f$\rho_1\f$, \f$\theta_1\f$, \f$\rho_2\f$, \f$\theta_2\f$), noting + * that for non-degenerated cases, the perspective projection of a cylinder on the image plane is a set of two + * straight lines with equation: + * \f[ + * \left\{ \begin{array}{lll} + * x \;\cos\theta_1 + x \;\sin\theta_1 - \rho_1 = 0 \\ + * y \;\cos\theta_2 + y \;\sin\theta_2 - \rho_2 = 0 + * \end{array} \right. + * \f] + * + * Perspective projection is achieved using projection() methods. The methods + * get_p(), getRho1(), getTheta1() and getRho2(), getTheta2() allow to access to the + * projected line parameters. + */ class VISP_EXPORT vpCylinder : public vpForwardProjection { public: - typedef enum { + typedef enum + { line1, /*!< First limb of the cylinder. */ line2 /*!< Second limb of the cylinder. */ } vpLineCylinderType; @@ -105,85 +107,90 @@ class VISP_EXPORT vpCylinder : public vpForwardProjection vpCylinder(); explicit vpCylinder(const vpColVector &oP); vpCylinder(double oA, double oB, double oC, double oX, double oY, double oZ, double R); - virtual ~vpCylinder(); - void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const; - void changeFrame(const vpHomogeneousMatrix &cMo); + void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const override; + void changeFrame(const vpHomogeneousMatrix &cMo) override; double computeZ(double x, double y) const; void display(const vpImage &I, const vpCameraParameters &cam, const vpColor &color = vpColor::green, - unsigned int thickness = 1); + unsigned int thickness = 1) override; void display(const vpImage &I, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1); void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &color = vpColor::green, unsigned int thickness = 1); + const vpColor &color = vpColor::green, unsigned int thickness = 1) override; void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1); - vpCylinder *duplicate() const; + vpCylinder *duplicate() const override; /*! - Return the \f$\rho_1\f$ parameter of the line corresponding to the - projection of the cylinder in the image plane. - \sa getTheta1() - */ + * Return the \f$\rho_1\f$ parameter of the line corresponding to the + * projection of the cylinder in the image plane. + * \sa getTheta1() + */ double getRho1() const { return p[0]; } /*! - Return the \f$\theta_1\f$ parameter of the line corresponding to the - projection of the cylinder in the image plane. - \sa getRho1() - */ + * Return the \f$\theta_1\f$ parameter of the line corresponding to the + * projection of the cylinder in the image plane. + * \sa getRho1() + */ double getTheta1() const { return p[1]; } /*! - Return the \f$\rho_2\f$ parameter of the line corresponding to the - projection of the cylinder in the image plane. - \sa getTheta2() - */ + * Return the \f$\rho_2\f$ parameter of the line corresponding to the + * projection of the cylinder in the image plane. + * \sa getTheta2() + */ double getRho2() const { return p[2]; } /*! - Return the \f$\theta_2\f$ parameter of the line corresponding to the - projection of the cylinder in the image plane. - \sa getRho2() - */ + * Return the \f$\theta_2\f$ parameter of the line corresponding to the + * projection of the cylinder in the image plane. + * \sa getRho2() + */ double getTheta2() const { return p[3]; } /*! - Return cylinder cA parameter expressed in the camera frame. - */ + * Return cylinder cA parameter expressed in the camera frame. + */ double getA() const { return cP[0]; } + /*! - Return cylinder cB parameter expressed in the camera frame. - */ + * Return cylinder cB parameter expressed in the camera frame. + */ double getB() const { return cP[1]; } + /*! - Return cylinder cC parameter expressed in the camera frame. - */ + * Return cylinder cC parameter expressed in the camera frame. + */ double getC() const { return cP[2]; } + /*! - Return cylinder cX parameter expressed in the camera frame. - */ + * Return cylinder cX parameter expressed in the camera frame. + */ double getX() const { return cP[3]; } + /*! - Return cylinder cY parameter expressed in the camera frame. - */ + * Return cylinder cY parameter expressed in the camera frame. + */ double getY() const { return cP[4]; } + /*! - Return cylinder cZ parameter expressed in the camera frame. - */ + * Return cylinder cZ parameter expressed in the camera frame. + */ double getZ() const { return cP[5]; } + /*! - Return cylinder R parameter corresponding to the cylinder radius. - */ + * Return cylinder R parameter corresponding to the cylinder radius. + */ double getR() const { return cP[6]; } - void init(); + void init() override; - void projection(); - void projection(const vpColVector &cP, vpColVector &p) const; + void projection() override; + void projection(const vpColVector &cP, vpColVector &p) const override; - void setWorldCoordinates(const vpColVector &oP); + void setWorldCoordinates(const vpColVector &oP) override; void setWorldCoordinates(double oA, double oB, double oC, double oX, double oY, double oZ, double R); }; diff --git a/modules/core/include/visp3/core/vpDisplay.h b/modules/core/include/visp3/core/vpDisplay.h index 326a7e8ea4..2e1327b895 100644 --- a/modules/core/include/visp3/core/vpDisplay.h +++ b/modules/core/include/visp3/core/vpDisplay.h @@ -48,127 +48,126 @@ #include /*! - \file vpDisplay.h - \brief Generic class for image display, also provide the interface - with the image. -*/ + * \file vpDisplay.h + * \brief Generic class for image display, also provide the interface + * with the image. + */ /*! - - \class vpDisplay - - \ingroup group_core_gui - - \brief Class that defines generic functionalities for display. - - The \ref tutorial-getting-started is a good starting point to know - how to use this class to display an image in a window. - - \warning Since ViSP 3.3.1 or higher we introduce the alpha channel support for color - transparency. This new feature is only supported yet using vpDisplayOpenCV. See vpColor - header documentation and displayOpenCV.cpp example for usage when displaying filled - transparent circles and rectangles. - - The example below shows how to use this class. - - \code -#include -#include -#include -#include -#include -#include -#include - -int main() -{ - vpImage I; // Grey level image - - // Read an image in PGM P5 format -#ifdef _WIN32 - vpImageIo::read(I, "C:/Temp/visp-images/Klimt/Klimt.pgm"); -#else - vpImageIo::read(I, "/local/soft/ViSP/ViSP-images/Klimt/Klimt.pgm"); -#endif - - vpDisplay *d; - - // Depending on the detected third party libraries, we instantiate here the - // first video device which is available -#if defined(VISP_HAVE_X11) - d = new vpDisplayX; -#elif defined(VISP_HAVE_GTK) - d = new vpDisplayGTK; -#elif defined(VISP_HAVE_GDI) - d = new vpDisplayGDI; -#elif defined(VISP_HAVE_D3D9) - d = new vpDisplayD3D; -#elif defined(HAVE_OPENCV_HIGHGUI) - d = new vpDisplayOpenCV; -#endif - - // Initialize the display with the image I. Display and image are - // now link together. -#ifdef VISP_HAVE_DISPLAY - d->init(I); -#endif - - // Specify the window location - vpDisplay::setWindowPosition(I, 400, 100); - - // Set the display window title - vpDisplay::setTitle(I, "My image"); - - // To initialize the video device, it is also possible to replace - // the 3 previous lines by: - // d->init(I, 400, 100, "My image"); - - // Set the display background with image I content - vpDisplay::display(I); - - // Draw a red rectangle in the display overlay (foreground) - vpDisplay::displayRectangle(I, 10, 10, 100, 20, vpColor::red, true); - - // Draw a red rectangle in the display overlay (foreground) - vpImagePoint topLeftCorner; - topLeftCorner.set_i(50); - topLeftCorner.set_j(10); - vpDisplay::displayRectangle(I, topLeftCorner, 100, 20, vpColor::green, - true); - - // Flush the foreground and background display - vpDisplay::flush(I); - - // Get non blocking keyboard events - std::cout << "Check keyboard events..." << std::endl; - char key[10]; sprintf(key, "\0"); - bool ret; - for (int i=0; i< 200; i++) { - bool ret = vpDisplay::getKeyboardEvent(I, key, false); - if (ret) - std::cout << "keyboard event: key: " << "\"" << key - << "\"" << std::endl; - vpTime::wait(40); - } - - // Get a blocking keyboard event - std::cout << "Wait for a keyboard event..." << std::endl; - ret = vpDisplay::getKeyboardEvent(I, key, true); - std::cout << "keyboard event: " << ret << std::endl; - if (ret) - std::cout << "key: " << "\"" << key << "\"" << std::endl; - - // Wait for a click in the display window - std::cout << "Wait for a button click..." << std::endl; - vpDisplay::getClick(I); - - delete d; -} - \endcode - - Other examples are available in tutorial-image-viewer.cpp and - tutorial-viewer.cpp. -*/ + * \class vpDisplay + * + * \ingroup group_core_gui + * + * \brief Class that defines generic functionalities for display. + * + * The \ref tutorial-getting-started is a good starting point to know + * how to use this class to display an image in a window. + * + * \warning Since ViSP 3.3.1 or higher we introduce the alpha channel support for color + * transparency. This new feature is only supported yet using vpDisplayOpenCV. See vpColor + * header documentation and displayOpenCV.cpp example for usage when displaying filled + * transparent circles and rectangles. + * + * The example below shows how to use this class. + * + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * vpImage I; // Grey level image + * + * // Read an image in PGM P5 format + * #ifdef _WIN32 + * vpImageIo::read(I, "C:/Temp/visp-images/Klimt/Klimt.pgm"); + * #else + * vpImageIo::read(I, "/local/soft/ViSP/ViSP-images/Klimt/Klimt.pgm"); + * #endif + * + * vpDisplay *d; + * + * // Depending on the detected third party libraries, we instantiate here the + * // first video device which is available + * #if defined(VISP_HAVE_X11) + * d = new vpDisplayX; + * #elif defined(VISP_HAVE_GTK) + * d = new vpDisplayGTK; + * #elif defined(VISP_HAVE_GDI) + * d = new vpDisplayGDI; + * #elif defined(VISP_HAVE_D3D9) + * d = new vpDisplayD3D; + * #elif defined(HAVE_OPENCV_HIGHGUI) + * d = new vpDisplayOpenCV; + * #endif + * + * // Initialize the display with the image I. Display and image are + * // now link together. + * #ifdef VISP_HAVE_DISPLAY + * d->init(I); + * #endif + * + * // Specify the window location + * vpDisplay::setWindowPosition(I, 400, 100); + * + * // Set the display window title + * vpDisplay::setTitle(I, "My image"); + * + * // To initialize the video device, it is also possible to replace + * // the 3 previous lines by: + * // d->init(I, 400, 100, "My image"); + * + * // Set the display background with image I content + * vpDisplay::display(I); + * + * // Draw a red rectangle in the display overlay (foreground) + * vpDisplay::displayRectangle(I, 10, 10, 100, 20, vpColor::red, true); + * + * // Draw a red rectangle in the display overlay (foreground) + * vpImagePoint topLeftCorner; + * topLeftCorner.set_i(50); + * topLeftCorner.set_j(10); + * vpDisplay::displayRectangle(I, topLeftCorner, 100, 20, vpColor::green, + * true); + * + * // Flush the foreground and background display + * vpDisplay::flush(I); + * + * // Get non blocking keyboard events + * std::cout << "Check keyboard events..." << std::endl; + * char key[10]; sprintf(key, "\0"); + * bool ret; + * for (int i=0; i< 200; i++) { + * bool ret = vpDisplay::getKeyboardEvent(I, key, false); + * if (ret) + * std::cout << "keyboard event: key: " << "\"" << key + * << "\"" << std::endl; + * vpTime::wait(40); + * } + * + * // Get a blocking keyboard event + * std::cout << "Wait for a keyboard event..." << std::endl; + * ret = vpDisplay::getKeyboardEvent(I, key, true); + * std::cout << "keyboard event: " << ret << std::endl; + * if (ret) + * std::cout << "key: " << "\"" << key << "\"" << std::endl; + * + * // Wait for a click in the display window + * std::cout << "Wait for a button click..." << std::endl; + * vpDisplay::getClick(I); + * + * delete d; + * } + * \endcode + * + * Other examples are available in tutorial-image-viewer.cpp and + * tutorial-viewer.cpp. + */ class VISP_EXPORT vpDisplay { public: @@ -225,37 +224,37 @@ class VISP_EXPORT vpDisplay //@{ unsigned int computeAutoScale(unsigned int width, unsigned int height); /*! - Return the value of the down scale factor applied to the image in order to - reduce the size of the window used to display the image. + * Return the value of the down scale factor applied to the image in order to + * reduce the size of the window used to display the image. */ unsigned int getDownScalingFactor() { return m_scale; } /*! - Return the display height. - \sa getWidth() - */ + * Return the display height. + * \sa getWidth() + */ inline unsigned int getHeight() const { return m_height; } /*! - Return the display width. - \sa getHeight() - */ + * Return the display width. + * \sa getHeight() + */ inline unsigned int getWidth() const { return m_width; } /*! - Return the position (along the horizontal axis) on the screen of the - display window. \sa getWindowYPosition() + * Return the position (along the horizontal axis) on the screen of the + * display window. \sa getWindowYPosition() */ int getWindowXPosition() const { return m_windowXPosition; } /*! - Return the position (along the vertical axis) on the screen of the display - window. \sa getWindowXPosition() + * Return the position (along the vertical axis) on the screen of the display + * window. \sa getWindowXPosition() */ int getWindowYPosition() const { return m_windowYPosition; } /*! - Check if the display has been initialised - - \return True if the display has been initialised, otherwise False - */ + * Check if the display has been initialised. + * + * \return True if the display has been initialised, otherwise False + */ inline bool isInitialised() { return m_displayHasBeenInitialized; } virtual void setDownScalingFactor(unsigned int scale); virtual void setDownScalingFactor(vpScaleType scaleType); @@ -265,47 +264,47 @@ class VISP_EXPORT vpDisplay /** @name vpDisplay pure virtual functions */ //@{ /*! - Set the window backgroud to \e color. - \param color : Background color. - */ + * Set the window background to \e color. + * \param color : Background color. + */ virtual void clearDisplay(const vpColor &color = vpColor::white) = 0; /*! - Close the window. - */ + * Close the window. + */ virtual void closeDisplay() = 0; /*! - Display an arrow from image point \e ip1 to image point \e ip2. - \param ip1 : Initial image point. - \param ip2 : Final image point. - \param color : Arrow color. - \param w : Arrow width. - \param h : Arrow height. - \param thickness : Thickness of the lines used to display the arrow. - */ + * Display an arrow from image point \e ip1 to image point \e ip2. + * \param ip1 : Initial image point. + * \param ip2 : Final image point. + * \param color : Arrow color. + * \param w : Arrow width. + * \param h : Arrow height. + * \param thickness : Thickness of the lines used to display the arrow. + */ virtual void displayArrow(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color = vpColor::white, unsigned int w = 4, unsigned int h = 2, unsigned int thickness = 1) = 0; /*! - Display a string at the image point \e ip location. - - To select the font used to display the string, use setFont(). - - \param ip : Upper left image point location of the string in the display. - \param text : String to display in overlay. - \param color : String color. - - \sa setFont() - */ + * Display a string at the image point \e ip location. + * + * To select the font used to display the string, use setFont(). + * + * \param ip : Upper left image point location of the string in the display. + * \param text : String to display in overlay. + * \param color : String color. + * + * \sa setFont() + */ virtual void displayCharString(const vpImagePoint &ip, const char *text, const vpColor &color = vpColor::green) = 0; /*! - Display a circle. - \param circle : Circle to display. - \param color : Circle color. - \param fill : When set to true fill the circle. - \param thickness : Thickness of the circle. This parameter is only useful - when \e fill is set to false. - */ + * Display a circle. + * \param circle : Circle to display. + * \param color : Circle color. + * \param fill : When set to true fill the circle. + * \param thickness : Thickness of the circle. This parameter is only useful + * when \e fill is set to false. + */ inline virtual void displayCircle(const vpImageCircle &circle, const vpColor &color, bool fill = false, unsigned int thickness = 1) { @@ -313,68 +312,60 @@ class VISP_EXPORT vpDisplay } /*! - Display a circle. - \param center : Circle center position. - \param radius : Circle radius. - \param color : Circle color. - \param fill : When set to true fill the circle. - \param thickness : Thickness of the circle. This parameter is only useful - when \e fill is set to false. - */ + * Display a circle. + * \param center : Circle center position. + * \param radius : Circle radius. + * \param color : Circle color. + * \param fill : When set to true fill the circle. + * \param thickness : Thickness of the circle. This parameter is only useful + * when \e fill is set to false. + */ virtual void displayCircle(const vpImagePoint ¢er, unsigned int radius, const vpColor &color, bool fill = false, unsigned int thickness = 1) = 0; /*! - Display a cross at the image point \e ip location. - \param ip : Cross location. - \param size : Size (width and height) of the cross. - \param color : Cross color. - \param thickness : Thickness of the lines used to display the cross. - */ + * Display a cross at the image point \e ip location. + * \param ip : Cross location. + * \param size : Size (width and height) of the cross. + * \param color : Cross color. + * \param thickness : Thickness of the lines used to display the cross. + */ virtual void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness = 1) = 0; /*! - Display a dashed line from image point \e ip1 to image point \e ip2. - \param ip1 : Initial image point. - \param ip2 : Final image point. - \param color : Line color. - \param thickness : Dashed line thickness. - */ + * Display a dashed line from image point \e ip1 to image point \e ip2. + * \param ip1 : Initial image point. + * \param ip2 : Final image point. + * \param color : Line color. + * \param thickness : Dashed line thickness. + */ virtual void displayDotLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness = 1) = 0; /*! - Display a line from image point \e ip1 to image point \e ip2. - \param ip1 : Initial image point. - \param ip2 : Final image point. - \param color : Line color. - \param thickness : Line thickness. - */ + * Display a line from image point \e ip1 to image point \e ip2. + * \param ip1 : Initial image point. + * \param ip2 : Final image point. + * \param color : Line color. + * \param thickness : Line thickness. + */ virtual void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness = 1) = 0; /*! - Display the gray level image \e I (8bits). - - \warning Display has to be initialized. - - \warning Suppress the overlay drawing. - - \param I : Image to display. - - \sa init(), closeDisplay() - */ + * Display the gray level image \e I (8bits). + * \warning Display has to be initialized. + * \warning Suppress the overlay drawing. + * \param I : Image to display. + * \sa init(), closeDisplay() + */ virtual void displayImage(const vpImage &I) = 0; /*! - Display the color image \e I in RGBa format (32bits). - - \warning Display has to be initialized. - - \warning Suppress the overlay drawing. - - \param I : Image to display. - - \sa init(), closeDisplay() - */ + * Display the color image \e I in RGBa format (32bits). + * \warning Display has to be initialized. + * \warning Suppress the overlay drawing. + * \param I : Image to display. + * \sa init(), closeDisplay() + */ virtual void displayImage(const vpImage &I) = 0; virtual void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, @@ -383,339 +374,334 @@ class VISP_EXPORT vpDisplay unsigned int height) = 0; /*! - Display a point at the image point \e ip location. - \param ip : Point location. - \param color : Point color. - \param thickness : Point thickness. - */ + * Display a point at the image point \e ip location. + * \param ip : Point location. + * \param color : Point color. + * \param thickness : Point thickness. + */ virtual void displayPoint(const vpImagePoint &ip, const vpColor &color, unsigned int thickness = 1) = 0; /*! - Display a rectangle with \e topLeft as the top-left corner and \e - width and \e height the rectangle size. - - \param topLeft : Top-left corner of the rectangle. - \param width : Rectangle width. - \param height : Rectangle height. - \param color : Rectangle color. - \param fill : When set to true fill the rectangle. - - \param thickness : Thickness of the four lines used to display the - rectangle. This parameter is only useful when \e fill is set to - false. - */ + * Display a rectangle with \e topLeft as the top-left corner and \e + * width and \e height the rectangle size. + * + * \param topLeft : Top-left corner of the rectangle. + * \param width : Rectangle width. + * \param height : Rectangle height. + * \param color : Rectangle color. + * \param fill : When set to true fill the rectangle. + * \param thickness : Thickness of the four lines used to display the + * rectangle. This parameter is only useful when \e fill is set to + * false. + */ virtual void displayRectangle(const vpImagePoint &topLeft, unsigned int width, unsigned int height, const vpColor &color, bool fill = false, unsigned int thickness = 1) = 0; /*! - Display a rectangle with \e topLeft as the top-left corner and \e - width and \e height the rectangle size. - - \param topLeft : Top-left corner of the rectangle. - \param bottomRight : Bottom-right corner of the rectangle. - \param color : Rectangle color. - \param fill : When set to true fill the rectangle. - - \param thickness : Thickness of the four lines used to display the - rectangle. This parameter is only useful when \e fill is set to - false. - */ + * Display a rectangle with \e topLeft as the top-left corner and \e + * width and \e height the rectangle size. + * + * \param topLeft : Top-left corner of the rectangle. + * \param bottomRight : Bottom-right corner of the rectangle. + * \param color : Rectangle color. + * \param fill : When set to true fill the rectangle. + * \param thickness : Thickness of the four lines used to display the + * rectangle. This parameter is only useful when \e fill is set to + * false. + */ virtual void displayRectangle(const vpImagePoint &topLeft, const vpImagePoint &bottomRight, const vpColor &color, bool fill = false, unsigned int thickness = 1) = 0; /*! - Display a rectangle with \e topLeft as the top-left corner and \e - width and \e height the rectangle size. - - \param rectangle : Rectangle characteristics. - \param color : Rectangle color. - \param fill : When set to true fill the rectangle. - - \param thickness : Thickness of the four lines used to display the - rectangle. This parameter is only useful when \e fill is set to - false. - - */ + * Display a rectangle with \e topLeft as the top-left corner and \e + * width and \e height the rectangle size. + * + * \param rectangle : Rectangle characteristics. + * \param color : Rectangle color. + * \param fill : When set to true fill the rectangle. + * \param thickness : Thickness of the four lines used to display the + * rectangle. This parameter is only useful when \e fill is set to + * false. + */ virtual void displayRectangle(const vpRect &rectangle, const vpColor &color, bool fill = false, unsigned int thickness = 1) = 0; /*! - Flushes the display. - It's necessary to use this function to see the results of any drawing. - */ + * Flushes the display. + * It's necessary to use this function to see the results of any drawing. + */ virtual void flushDisplay() = 0; /*! - Flushes the display. - It's necessary to use this function to see the results of any drawing. - */ + * Flushes the display. + * It's necessary to use this function to see the results of any drawing. + */ virtual void flushDisplayROI(const vpImagePoint &iP, unsigned int width, unsigned int height) = 0; /* Simple interface with the mouse event */ /*! - Wait for a click from one of the mouse button. - - \param blocking [in] : Blocking behavior. - - When set to true, this method waits until a mouse button is - pressed and then returns always true. - - When set to false, returns true only if a mouse button is - pressed, otherwise returns false. - - \return - - true if a button was clicked. This is always the case if blocking is set - to \e true. - - false if no button was clicked. This can occur if blocking is set - to \e false. - */ + * Wait for a click from one of the mouse button. + * + * \param blocking [in] : Blocking behavior. + * - When set to true, this method waits until a mouse button is + * pressed and then returns always true. + * - When set to false, returns true only if a mouse button is + * pressed, otherwise returns false. + * + * \return + * - true if a button was clicked. This is always the case if blocking is set + * to \e true. + * - false if no button was clicked. This can occur if blocking is set + * to \e false. + */ virtual bool getClick(bool blocking = true) = 0; /*! - Wait for a click from one of the mouse button and get the position - of the clicked image point. - - \param ip [out] : The coordinates of the clicked image point. - - \param blocking [in] : true for a blocking behaviour waiting a mouse - button click, false for a non blocking behaviour. - - \return - - true if a button was clicked. This is always the case if blocking is set - to \e true. - - false if no button was clicked. This can occur if blocking is set - to \e false. - */ + * Wait for a click from one of the mouse button and get the position + * of the clicked image point. + * + * \param ip [out] : The coordinates of the clicked image point. + * + * \param blocking [in] : true for a blocking behaviour waiting a mouse + * button click, false for a non blocking behaviour. + * + * \return + * - true if a button was clicked. This is always the case if blocking is set + * to \e true. + * - false if no button was clicked. This can occur if blocking is set + * to \e false. + */ virtual bool getClick(vpImagePoint &ip, bool blocking = true) = 0; /*! - Wait for a mouse button click and get the position of the clicked - pixel. The button used to click is also set. - - \param ip [out] : The coordinates of the clicked image point. - - \param button [out] : The button used to click. - - \param blocking [in] : - - When set to true, this method waits until a mouse button is - pressed and then returns always true. - - When set to false, returns true only if a mouse button is - pressed, otherwise returns false. - - \return true if a mouse button is pressed, false otherwise. If a - button is pressed, the location of the mouse pointer is updated in - \e ip. - */ + * Wait for a mouse button click and get the position of the clicked + * pixel. The button used to click is also set. + * + * \param ip [out] : The coordinates of the clicked image point. + * + * \param button [out] : The button used to click. + * + * \param blocking [in] : + * - When set to true, this method waits until a mouse button is + * pressed and then returns always true. + * - When set to false, returns true only if a mouse button is + * pressed, otherwise returns false. + * + * \return true if a mouse button is pressed, false otherwise. If a + * button is pressed, the location of the mouse pointer is updated in + * \e ip. + */ virtual bool getClick(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true) = 0; /*! - Wait for a mouse button click release and get the position of the - image point were the click release occurs. The button used to click is - also set. Same method as getClick(unsigned int&, unsigned int&, - vpMouseButton::vpMouseButtonType &, bool). - - \param ip [out] : Position of the clicked image point. - - \param button [in] : Button used to click. - - \param blocking [in] : true for a blocking behaviour waiting a mouse - button click, false for a non blocking behaviour. - - \return - - true if a button was clicked. This is always the case if blocking is set - to \e true. - - false if no button was clicked. This can occur if blocking is set - to \e false. - - \sa getClick(vpImagePoint &, vpMouseButton::vpMouseButtonType &, bool) - - */ + * Wait for a mouse button click release and get the position of the + * image point were the click release occurs. The button used to click is + * also set. Same method as getClick(unsigned int&, unsigned int&, + * vpMouseButton::vpMouseButtonType &, bool). + * + * \param ip [out] : Position of the clicked image point. + * + * \param button [in] : Button used to click. + * + * \param blocking [in] : true for a blocking behaviour waiting a mouse + * button click, false for a non blocking behaviour. + * + * \return + * - true if a button was clicked. This is always the case if blocking is set + * to \e true. + * - false if no button was clicked. This can occur if blocking is set + * to \e false. + * + * \sa getClick(vpImagePoint &, vpMouseButton::vpMouseButtonType &, bool) + * + */ virtual bool getClickUp(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true) = 0; /*! - Get a keyboard event. - - \param blocking [in] : Blocking behavior. - - When set to true, this method waits until a key is - pressed and then returns always true. - - When set to false, returns true only if a key is - pressed, otherwise returns false. - - \return - - true if a key was pressed. This is always the case if blocking is set - to \e true. - - false if no key was pressed. This can occur if blocking is set - to \e false. - */ + * Get a keyboard event. + * + * \param blocking [in] : Blocking behavior. + * - When set to true, this method waits until a key is + * pressed and then returns always true. + * - When set to false, returns true only if a key is + * pressed, otherwise returns false. + * + * \return + * - true if a key was pressed. This is always the case if blocking is set + * to \e true. + * - false if no key was pressed. This can occur if blocking is set + * to \e false. + */ virtual bool getKeyboardEvent(bool blocking = true) = 0; /*! - - Get a keyboard event. - - \param blocking [in] : Blocking behavior. - - When set to true, this method waits until a key is - pressed and then returns always true. - - When set to false, returns true only if a key is - pressed, otherwise returns false. - - \param key [out]: If possible, an ISO Latin-1 character - corresponding to the keyboard key. - - \return - - true if a key was pressed. This is always the case if blocking is set - to \e true. - - false if no key was pressed. This can occur if blocking is set - to \e false. - */ + * Get a keyboard event. + * + * \param blocking [in] : Blocking behavior. + * - When set to true, this method waits until a key is + * pressed and then returns always true. + * - When set to false, returns true only if a key is + * pressed, otherwise returns false. + * + * \param key [out]: If possible, an ISO Latin-1 character + * corresponding to the keyboard key. + * + * \return + * - true if a key was pressed. This is always the case if blocking is set + * to \e true. + * - false if no key was pressed. This can occur if blocking is set + * to \e false. + */ virtual bool getKeyboardEvent(std::string &key, bool blocking = true) = 0; /*! - Get the coordinates of the mouse pointer. - - \param ip [out] : The coordinates of the mouse pointer. - - \return true if a pointer motion event was received, false otherwise. - - \exception vpDisplayException::notInitializedError : If the display - was not initialized. - */ + * Get the coordinates of the mouse pointer. + * + * \param ip [out] : The coordinates of the mouse pointer. + * + * \return true if a pointer motion event was received, false otherwise. + * + * \exception vpDisplayException::notInitializedError : If the display + * was not initialized. + */ virtual bool getPointerMotionEvent(vpImagePoint &ip) = 0; /*! - Get the coordinates of the mouse pointer. - - \param ip [out] : The coordinates of the mouse pointer. - - \return true. - - \exception vpDisplayException::notInitializedError : If the display - was not initialized. - */ + * Get the coordinates of the mouse pointer. + * + * \param ip [out] : The coordinates of the mouse pointer. + * + * \return true. + * + * \exception vpDisplayException::notInitializedError : If the display + * was not initialized. + */ virtual bool getPointerPosition(vpImagePoint &ip) = 0; /*! - Gets the screen vertical resolution in pixel. + * Gets the screen vertical resolution in pixel. */ virtual unsigned int getScreenHeight() = 0; /*! - Gets the screen resolution in pixel. - \param width, height : Screen resolution in pixels. + * Gets the screen resolution in pixel. + * \param width, height : Screen resolution in pixels. */ virtual void getScreenSize(unsigned int &width, unsigned int &height) = 0; /*! - Gets the screen horizontal resolution in pixel. + * Gets the screen horizontal resolution in pixel. */ virtual unsigned int getScreenWidth() = 0; /*! - Initialize the display (size, position and title) of a gray level image. + * Initialize the display (size, position and title) of a gray level image. - \param I : Image to be displayed (not that image has to be initialized). - \param x : Horizontal position of the upper/left window corner. - \param y : Vertical position of the upper/left window corner. - \param title : Window title. - */ + * \param I : Image to be displayed (not that image has to be initialized). + * \param x : Horizontal position of the upper/left window corner. + * \param y : Vertical position of the upper/left window corner. + * \param title : Window title. + */ virtual void init(vpImage &I, int x = -1, int y = -1, const std::string &title = "") = 0; /*! - Initialize the display (size, position and title) of a color - image in RGBa format. - - \param I : Image to be displayed (not that image has to be initialized). - \param x : Horizontal position of the upper/left window corner. - \param y : Vertical position of the upper/left window corner. - \param title : Window title. - */ + * Initialize the display (size, position and title) of a color + * image in RGBa format. + * + * \param I : Image to be displayed (not that image has to be initialized). + * \param x : Horizontal position of the upper/left window corner. + * \param y : Vertical position of the upper/left window corner. + * \param title : Window title. + */ virtual void init(vpImage &I, int x = -1, int y = -1, const std::string &title = "") = 0; /*! - Initialize the display size, position and title. - - \param width : Window width. - \param height : Window height. - \param x : Horizontal position of the upper/left window corner. - \param y : Vertical position of the upper/left window corner. - \param title : Window title. - - The following example shows how to use this function - \code - #include - #include - #include - #include - #include - #include - - int main() - { - #ifdef VISP_HAVE_DISPLAY - vpImage I; - vpImageIo::read(I, "lena.pgm"); - - vpDisplay *d; - - #if defined(VISP_HAVE_X11) - d = new vpDisplayX; - #elif defined(VISP_HAVE_GTK) - d = new vpDisplayGTK; - #elif defined(VISP_HAVE_GDI) - d = new vpDisplayGDI; - #elif defined(VISP_HAVE_D3D9) - d = new vpDisplayD3D; - #elif defined(HAVE_OPENCV_HIGHGUI) - d = new vpDisplayOpenCV; - #else - std::cout << "Sorry, no video device is available" << std::endl; - return -1; - #endif - - d->init(I.getWidth(), I.getHeight(), 10, 20, "viewer"); - - // Now associate the display to the image - I.display = d; - - // Set the display background with image I content - vpDisplay::display(I); - - // Flush the foreground and background display - vpDisplay::flush(I); - - // wait for a mouse clink in the display to exit - vpDisplay::getClick(I); - - delete d; - #endif - } - \endcode - */ + * Initialize the display size, position and title. + * + * \param width : Window width. + * \param height : Window height. + * \param x : Horizontal position of the upper/left window corner. + * \param y : Vertical position of the upper/left window corner. + * \param title : Window title. + * + * The following example shows how to use this function + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * #ifdef VISP_HAVE_DISPLAY + * vpImage I; + * vpImageIo::read(I, "lena.pgm"); + * + * vpDisplay *d; + * + * #if defined(VISP_HAVE_X11) + * d = new vpDisplayX; + * #elif defined(VISP_HAVE_GTK) + * d = new vpDisplayGTK; + * #elif defined(VISP_HAVE_GDI) + * d = new vpDisplayGDI; + * #elif defined(VISP_HAVE_D3D9) + * d = new vpDisplayD3D; + * #elif defined(HAVE_OPENCV_HIGHGUI) + * d = new vpDisplayOpenCV; + * #else + * std::cout << "Sorry, no video device is available" << std::endl; + * return -1; + * #endif + * + * d->init(I.getWidth(), I.getHeight(), 10, 20, "viewer"); + * + * // Now associate the display to the image + * I.display = d; + * + * // Set the display background with image I content + * vpDisplay::display(I); + * + * // Flush the foreground and background display + * vpDisplay::flush(I); + * + * // wait for a mouse clink in the display to exit + * vpDisplay::getClick(I); + * + * delete d; + * #endif + * } + * \endcode + */ virtual void init(unsigned int width, unsigned int height, int x = -1, int y = -1, const std::string &title = "") = 0; /*! - Set the font used to display a text in overlay. The display is - performed using displayCharString(). - - \param font : The expected font name. The available fonts are given by - the "xlsfonts" binary. To choose a font you can also use the - "xfontsel" binary. - - \note Under UNIX, to know all the available fonts, use the - "xlsfonts" binary in a terminal. You can also use the "xfontsel" binary. - - \sa displayCharString() - */ + * Set the font used to display a text in overlay. The display is + * performed using displayCharString(). + * + * \param font : The expected font name. The available fonts are given by + * the "xlsfonts" binary. To choose a font you can also use the + * "xfontsel" binary. + * + * \note Under UNIX, to know all the available fonts, use the + * "xlsfonts" binary in a terminal. You can also use the "xfontsel" binary. + * + * \sa displayCharString() + */ virtual void setFont(const std::string &font) = 0; /*! - Set the window title. - \param title : Window title. - */ + * Set the window title. + * \param title : Window title. + */ virtual void setTitle(const std::string &title) = 0; /*! - Set the window position in the screen. + * Set the window position in the screen. + * + * \param x : Horizontal position of the upper/left window corner. + * \param y : Vertical position of the upper/left window corner. - \param x : Horizontal position of the upper/left window corner. - \param y : Vertical position of the upper/left window corner. - - */ + */ virtual void setWindowPosition(int x, int y) = 0; //@} #endif // ifndef DOXYGEN_SHOULD_SKIP_THIS /*! - @name Static public vpDisplay functionalities on gray level images. - */ + * @name Static public vpDisplay functionalities on gray level images. + */ //@{ static void close(vpImage &I); static void display(const vpImage &I); @@ -823,8 +809,8 @@ class VISP_EXPORT vpDisplay //@} /*! - @name Static public vpDisplay functionalities on 32 bits color images. - */ + * @name Static public vpDisplay functionalities on 32 bits color images. + */ //@{ static void close(vpImage &I); static void display(const vpImage &I); @@ -925,7 +911,7 @@ class VISP_EXPORT vpDisplay //@} private: - //! get the window pixmap and put it in vpRGBa image + //! Get the window pixmap and put it in vpRGBa image. virtual void getImage(vpImage &I) = 0; }; diff --git a/modules/core/include/visp3/core/vpException.h b/modules/core/include/visp3/core/vpException.h index 6d96c0fb62..6dffe6105e 100644 --- a/modules/core/include/visp3/core/vpException.h +++ b/modules/core/include/visp3/core/vpException.h @@ -68,7 +68,7 @@ class VISP_EXPORT vpException : public std::exception void setMessage(const char *format, va_list args); //! forbid the empty constructor (protected) - vpException() : code(notInitialized), message("") { }; + vpException() : code(notInitialized), message("") { } public: enum generalExceptionEnum @@ -105,16 +105,6 @@ class VISP_EXPORT vpException : public std::exception */ explicit vpException(int code); - /*! - * Destructor. Do nothing but implemented to fit the inheritance from - * std::exception - */ -#if VISP_CXX_STANDARD > VISP_CXX_STANDARD_98 - virtual ~vpException() { } -#else - virtual ~vpException() throw() { } -#endif - /** @name Inherited functionalities from vpException */ //@{ /*! diff --git a/modules/core/include/visp3/core/vpForceTwistMatrix.h b/modules/core/include/visp3/core/vpForceTwistMatrix.h index 993e66ef33..8f01f4ce91 100644 --- a/modules/core/include/visp3/core/vpForceTwistMatrix.h +++ b/modules/core/include/visp3/core/vpForceTwistMatrix.h @@ -177,11 +177,6 @@ class VISP_EXPORT vpForceTwistMatrix : public vpArray2D vpForceTwistMatrix(const vpRotationMatrix &R); vpForceTwistMatrix(const vpThetaUVector &thetau); - /*! - Destructor. - */ - virtual ~vpForceTwistMatrix(){} - vpForceTwistMatrix buildFrom(const vpTranslationVector &t, const vpRotationMatrix &R); vpForceTwistMatrix buildFrom(const vpTranslationVector &t, const vpThetaUVector &thetau); vpForceTwistMatrix buildFrom(const vpHomogeneousMatrix &M, bool full = true); @@ -224,7 +219,7 @@ class VISP_EXPORT vpForceTwistMatrix : public vpArray2D \deprecated Provided only for compat with previous releases. This function does nothing. */ - vp_deprecated void init(){} + vp_deprecated void init() { } /*! \deprecated You should rather use eye(). */ diff --git a/modules/core/include/visp3/core/vpForwardProjection.h b/modules/core/include/visp3/core/vpForwardProjection.h index 3cf257c87b..97e98c2cf6 100644 --- a/modules/core/include/visp3/core/vpForwardProjection.h +++ b/modules/core/include/visp3/core/vpForwardProjection.h @@ -35,9 +35,9 @@ #define vpForwardProjection_H /*! - \file vpForwardProjection.h - \brief class that defines what is a generic geometric feature -*/ + * \file vpForwardProjection.h + * \brief class that defines what is a generic geometric feature + */ #include #include @@ -46,114 +46,107 @@ #include /*! - \class vpForwardProjection - \brief Class that defines what is a generic geometric feature. - - Each geometric feature has parameters expressed: - - - in the object frame \e oP. These parameters are located in the public - attribute vpForwardProjection::oP. - - in the camera frame \e cP. These parameters are located in the public - attribute vpTracker::cP. - - in the image plane \e p. These parameters are located in the public - attribute vpTracker::p. They correspond to normalized coordinates - of the feature expressed in meters. -*/ + * \class vpForwardProjection + * \brief Class that defines what is a generic geometric feature. + * + * Each geometric feature has parameters expressed: + * + * - in the object frame \e oP. These parameters are located in the public + * attribute vpForwardProjection::oP. + * - in the camera frame \e cP. These parameters are located in the public + * attribute vpTracker::cP. + * - in the image plane \e p. These parameters are located in the public + * attribute vpTracker::p. They correspond to normalized coordinates + * of the feature expressed in meters. + */ class VISP_EXPORT vpForwardProjection : public vpTracker { public: /*! - Used for memory issue especially in the vpServo class. - */ + * Used for memory issue especially in the vpServo class. + */ typedef enum { user, vpDisplayForwardProjection } vpForwardProjectionDeallocatorType; /** @name Public Member Functions Inherited from vpForwardProjection */ //@{ - vpForwardProjection() : oP(), deallocate(user) {} - - //! Destructor that does nothing. - virtual ~vpForwardProjection() { ; } + vpForwardProjection() : oP(), deallocate(user) { } /*! - - Computes the features parameters in the camera frame (\e cP) thanks - to the parameters given in the object frame - (vpForwardProjection::oP) and the homogeneous matrix relative to - the pose (\e cMo) between the object frame and the camera frame. - - To set the parameters in the object frame you need to call - setWorldCoordinates(). - - \param cMo : The homogeneous matrix corresponding to the pose - between the camera frame and the object frame. - - \param cP : The vector which will contain the feature parameters - expressed in the camera frame. - - With this method, the vpTracker::cP public attribute is not updated. - - */ + * Computes the features parameters in the camera frame (\e cP) thanks + * to the parameters given in the object frame + * (vpForwardProjection::oP) and the homogeneous matrix relative to + * the pose (\e cMo) between the object frame and the camera frame. + * + * To set the parameters in the object frame you need to call + * setWorldCoordinates(). + * + * \param cMo : The homogeneous matrix corresponding to the pose + * between the camera frame and the object frame. + * + * \param cP : The vector which will contain the feature parameters + * expressed in the camera frame. + * + * With this method, the vpTracker::cP public attribute is not updated. + */ virtual void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const = 0; - /*! - - Computes the features parameters in the camera frame (\e cP) thanks - to the parameters given in the object frame - (vpForwardProjection::oP) and the homogeneous matrix relative to - the pose (\e cMo) between the object frame and the camera frame. - To set the parameters in the object frame you need to call - setWorldCoordinates(). - - \param cMo : The homogeneous matrix corresponding to the pose - between the camera frame and the object frame. - - The features parameters in the camera frame (cP) are updated in - the vpTracker::cP public attribute. - */ + /*! + * Computes the features parameters in the camera frame (\e cP) thanks + * to the parameters given in the object frame + * (vpForwardProjection::oP) and the homogeneous matrix relative to + * the pose (\e cMo) between the object frame and the camera frame. + * + * To set the parameters in the object frame you need to call + * setWorldCoordinates(). + * + * \param cMo : The homogeneous matrix corresponding to the pose + * between the camera frame and the object frame. + * + * The features parameters in the camera frame (cP) are updated in + * the vpTracker::cP public attribute. + */ virtual void changeFrame(const vpHomogeneousMatrix &cMo) = 0; /*! - - Displays the feature in the image \e I thanks to the 2D feature - parameters in the image plane (vpTracker::p) and the camera - parameters which enable to convert the features from meter to pixel. - - \param I : The image where the feature must be displayed in overlay. - - \param cam : The camera parameters to enable the conversion from - meter to pixel. - - \param color : The desired color to display the line in the image. - \param thickness : Thickness of the feature representation. - */ + * Displays the feature in the image \e I thanks to the 2D feature + * parameters in the image plane (vpTracker::p) and the camera + * parameters which enable to convert the features from meter to pixel. + * + * \param I : The image where the feature must be displayed in overlay. + * + * \param cam : The camera parameters to enable the conversion from + * meter to pixel. + * + * \param color : The desired color to display the line in the image. + * \param thickness : Thickness of the feature representation. + */ virtual void display(const vpImage &I, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1) = 0; /*! - - Displays the feature in the image \e I thanks to the features in - the object frame (vpForwardProjection::oP), the homogeneous matrix - relative to the pose between the object frame and the camera frame and the - camera parameters which enable to convert the features from meter - to pixel. - - \param I : The image where the line must be displayed in overlay. - - \param cMo : The homogeneous matrix corresponding to the pose - between the camera frame and the object frame. - - \param cam : The camera parameters to enable the conversion from - meter to pixel. - - \param color : The desired color to display the line in the image. - \param thickness : Thickness of the feature representation. + * Displays the feature in the image \e I thanks to the features in + * the object frame (vpForwardProjection::oP), the homogeneous matrix + * relative to the pose between the object frame and the camera frame and the + * camera parameters which enable to convert the features from meter + * to pixel. + * + * \param I : The image where the line must be displayed in overlay. + * + * \param cMo : The homogeneous matrix corresponding to the pose + * between the camera frame and the object frame. + * + * \param cam : The camera parameters to enable the conversion from + * meter to pixel. + * + * \param color : The desired color to display the line in the image. + * \param thickness : Thickness of the feature representation. */ virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1) = 0; /*! - Create an object with the same type. - */ + * Create an object with the same type. + */ virtual vpForwardProjection *duplicate() const = 0; //! Return object parameters expressed in the 3D object frame. @@ -164,25 +157,24 @@ class VISP_EXPORT vpForwardProjection : public vpTracker virtual void print() const; /*! - - Computes the feature parameters in the image plane from the - parameters expressed in the camera frame. - - \param cP [input] : Feature parameters expressed in the camera frame. - - \param p [output] : Feature parameters expressed in the image plane. - */ + * Computes the feature parameters in the image plane from the + * parameters expressed in the camera frame. + * + * \param cP [input] : Feature parameters expressed in the camera frame. + * + * \param p [output] : Feature parameters expressed in the image plane. + */ virtual void projection(const vpColVector &cP, vpColVector &p) const = 0; /*! - Computes the feature parameters in the image plane. These - parameters are than updated in the vpTracker::p public attribute. - - \warning To compute these parameters, the method exploit the - feature parameters in the camera frame. Thus, vpTracker::cP need - to be updated before the call of this method. For that, a call to - changeFrame(const vpHomogeneousMatrix &) is requested. - */ + * Computes the feature parameters in the image plane. These + * parameters are than updated in the vpTracker::p public attribute. + * + * \warning To compute these parameters, the method exploit the + * feature parameters in the camera frame. Thus, vpTracker::cP need + * to be updated before the call of this method. For that, a call to + * changeFrame(const vpHomogeneousMatrix &) is requested. + */ virtual void projection() = 0; void project(); @@ -191,12 +183,11 @@ class VISP_EXPORT vpForwardProjection : public vpTracker void setDeallocate(vpForwardProjectionDeallocatorType d) { deallocate = d; } /*! - Sets the parameters which define the feature in the object frame. - - \param oP : Feature parameters expressed in the object frame used - to set the vpForwardProjection::oP public attribute. - - */ + * Sets the parameters which define the feature in the object frame. + * + * \param oP : Feature parameters expressed in the object frame used + * to set the vpForwardProjection::oP public attribute. + */ virtual void setWorldCoordinates(const vpColVector &oP) = 0; void track(const vpHomogeneousMatrix &cMo); @@ -206,11 +197,11 @@ class VISP_EXPORT vpForwardProjection : public vpTracker /** @name Protected Member Functions Inherited from vpForwardProjection */ //@{ /*! - Default initialisation of the feature parameters: - - in the object frame: \e oP - - in the camera frame: \e cP - - in the image plane: \e p. - */ + * Default initialisation of the feature parameters: + * - in the object frame: \e oP + * - in the camera frame: \e cP + * - in the image plane: \e p. + */ virtual void init() = 0; //@} @@ -218,8 +209,8 @@ class VISP_EXPORT vpForwardProjection : public vpTracker /** @name Public Attributes Inherited from vpForwardProjection */ //@{ /*! - Feature coordinates expressed in the object frame. - */ + * Feature coordinates expressed in the object frame. + */ vpColVector oP; //@} diff --git a/modules/core/include/visp3/core/vpFrameGrabber.h b/modules/core/include/visp3/core/vpFrameGrabber.h index 0806476bd0..91ab6fd7c6 100644 --- a/modules/core/include/visp3/core/vpFrameGrabber.h +++ b/modules/core/include/visp3/core/vpFrameGrabber.h @@ -38,61 +38,58 @@ #include /*! - \file vpFrameGrabber.h - \brief Base class for all video devices. It is - designed to provide a generic front end to video sources. -*/ + * \file vpFrameGrabber.h + * \brief Base class for all video devices. It is + * designed to provide a generic front end to video sources. + */ /*! - \class vpFrameGrabber - - \brief Base class for all video devices. It is designed to provide a front - end to video sources. - - This class should provide a virtual function that allows the acquisition - of an image. - - The example below shows how to use this class. - \code -#include -#include -#include -#include -#include - -int main() -{ -#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) - vpImage I; - vpFrameGrabber *g; // Generic framegrabber - -#if defined( VISP_HAVE_DC1394 ) - vp1394TwoGrabber *g_1394_2 = new vp1394TwoGrabber; - // specific settings for firewire grabber based on libdc1394-2.x version - g_1394_2->setVideoMode(vp1394TwoGrabber::vpVIDEO_MODE_320x240_YUV422); - g_1394_2->setFramerate(vp1394TwoGrabber::vpFRAMERATE_30); - g = g_1394_2; -#elif defined( VISP_HAVE_V4L2 ) - vpV4l2Grabber *g_v4l2 = new vpV4l2Grabber; - // specific settings for Video For Linux Two grabber - g_v4l2->setInput(2); // Input 2 on the board - g_v4l2->setFramerate(vpV4l2Grabber::framerate_50fps); // 50 fps - g_v4l2->setWidth(384); // Acquired images are 768 width - g_v4l2->setHeight(288); // Acquired images are 576 height - g_v4l2->setNBuffers(3); // 3 ring buffers to ensure real-time acquisition - g = g_v4l2; -#endif - - g->open(I); // Open the framegrabber - g->acquire(I); // Acquire an image - vpImageIo::write(I, "image.pgm"); // Write image on the disk -#endif -} - \endcode - - - \author Eric Marchand (Eric.Marchand@irisa.fr), Irisa / Inria Rennes -*/ + * \class vpFrameGrabber + * + * \brief Base class for all video devices. It is designed to provide a front + * end to video sources. + * + * This class should provide a virtual function that allows the acquisition + * of an image. + * + * The example below shows how to use this class. + * \code + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * #if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) + * vpImage I; + * vpFrameGrabber *g; // Generic framegrabber + * + * #if defined( VISP_HAVE_DC1394 ) + * vp1394TwoGrabber *g_1394_2 = new vp1394TwoGrabber; + * // specific settings for firewire grabber based on libdc1394-2.x version + * g_1394_2->setVideoMode(vp1394TwoGrabber::vpVIDEO_MODE_320x240_YUV422); + * g_1394_2->setFramerate(vp1394TwoGrabber::vpFRAMERATE_30); + * g = g_1394_2; + * #elif defined( VISP_HAVE_V4L2 ) + * vpV4l2Grabber *g_v4l2 = new vpV4l2Grabber; + * // specific settings for Video For Linux Two grabber + * g_v4l2->setInput(2); // Input 2 on the board + * g_v4l2->setFramerate(vpV4l2Grabber::framerate_50fps); // 50 fps + * g_v4l2->setWidth(384); // Acquired images are 768 width + * g_v4l2->setHeight(288); // Acquired images are 576 height + * g_v4l2->setNBuffers(3); // 3 ring buffers to ensure real-time acquisition + * g = g_v4l2; + * #endif + * + * g->open(I); // Open the framegrabber + * g->acquire(I); // Acquire an image + * vpImageIo::write(I, "image.pgm"); // Write image on the disk + * #endif + * } + * \endcode + */ class VISP_EXPORT vpFrameGrabber { public: @@ -112,8 +109,7 @@ class VISP_EXPORT vpFrameGrabber //@} public: - vpFrameGrabber() : init(false), height(0), width(0){}; - virtual ~vpFrameGrabber() { ; } + vpFrameGrabber() : init(false), height(0), width(0) { }; virtual void open(vpImage &I) = 0; virtual void open(vpImage &I) = 0; @@ -122,9 +118,9 @@ class VISP_EXPORT vpFrameGrabber virtual void acquire(vpImage &I) = 0; /*! - This virtual function is used to de-allocate - the memory used by a specific frame grabber - */ + * This virtual function is used to de-allocate + * the memory used by a specific frame grabber + */ virtual void close() = 0; }; diff --git a/modules/core/include/visp3/core/vpHistogramValey.h b/modules/core/include/visp3/core/vpHistogramValey.h index d4ab3f463c..d0a753b17e 100644 --- a/modules/core/include/visp3/core/vpHistogramValey.h +++ b/modules/core/include/visp3/core/vpHistogramValey.h @@ -58,14 +58,11 @@ class VISP_EXPORT vpHistogramValey : vpHistogramPeak { public: - vpHistogramValey() : vpHistogramPeak(){}; + vpHistogramValey() : vpHistogramPeak() { }; - vpHistogramValey(unsigned char lvl, unsigned val) : vpHistogramPeak(lvl, val){}; + vpHistogramValey(unsigned char lvl, unsigned val) : vpHistogramPeak(lvl, val) { }; - vpHistogramValey(const vpHistogramValey &v) : vpHistogramPeak(v){}; - - /*! Destructor that does nothing. */ - virtual ~vpHistogramValey() {} + vpHistogramValey(const vpHistogramValey &v) : vpHistogramPeak(v) { }; vpHistogramValey &operator=(const vpHistogramValey &v); bool operator==(const vpHistogramValey &v) const; diff --git a/modules/core/include/visp3/core/vpHomogeneousMatrix.h b/modules/core/include/visp3/core/vpHomogeneousMatrix.h index 78fe4321c3..6e4291f992 100644 --- a/modules/core/include/visp3/core/vpHomogeneousMatrix.h +++ b/modules/core/include/visp3/core/vpHomogeneousMatrix.h @@ -212,10 +212,6 @@ class VISP_EXPORT vpHomogeneousMatrix : public vpArray2D #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpHomogeneousMatrix(const std::initializer_list &list); #endif - /*! - Destructor. - */ - virtual ~vpHomogeneousMatrix() { } void buildFrom(const vpTranslationVector &t, const vpRotationMatrix &R); void buildFrom(const vpTranslationVector &t, const vpThetaUVector &tu); diff --git a/modules/core/include/visp3/core/vpLine.h b/modules/core/include/visp3/core/vpLine.h index 83ae2471b2..6b73e68511 100644 --- a/modules/core/include/visp3/core/vpLine.h +++ b/modules/core/include/visp3/core/vpLine.h @@ -35,9 +35,9 @@ #define vpLine_H /*! - \file vpLine.h - \brief class that defines what is a line -*/ + * \file vpLine.h + * \brief class that defines what is a line + */ #include #include @@ -45,120 +45,117 @@ #include /*! - \class vpLine - \ingroup group_core_geometry - - \brief Class that defines a 3D line in the object frame and allows forward projection - of the line in the camera frame and in the 2D image plane by perspective projection. - All the parameters must be set in meter. - - Note that a 3D line is defined from the intersection between two 3D planes. - - A 3D line has the followings parameters: - - **in the 3D object frame**: parameters are located in vpForwardProjection::oP 8-dim internal vector. They correspond - to the parameters oA1, oB1, oC1, oD1 and oA2, oB2, oC2, oD2 defining the equations of the two planes. - Each point \f$ (X, Y, Z) \f$ which belongs to the 3D line is a solution of those two - equations: - \f[ oA1*X + oB1*Y + oC1*Z + oD1 = 0 \f] - \f[ oA2*X + oB2*Y + oC2*Z + oD2 = 0 \f] - To update these line parameters you may use setWorldCoordinates(). To get theses parameters use get_oP(). - - - **in the 3D camera frame**: parameters are saved in vpTracker::cP 8-dim internal vector. They correspond - to the parameters cA1, cB1, cC1, cD1 and cA2, cB2, cC2, cD2 defining the equations of the two planes. - Each point \f$ (X, Y, Z) \f$ which belongs to the 3D line is a solution of those two - equations: - \f[ cA1*X + cB1*Y + cC1*Z + cD1 = 0 \f] - \f[ cA2*X + cB2*Y + cC2*Z + cD2 = 0 \f] - It is easily possible to compute these parameters thanks to the corresponding 3D parameters oP in the - object frame. But you have to note that four constraints are added in the planes equations. - \f[ cD1 = 0 \f] - \f[ cD2 > 0 \f] - \f[ cA1*cA2 + cB1*cB2 + cC1*cC2 = 0 \f] - \f[ || cA2 || = 1 \f] - To compute these parameters you may use changeFrame(). To get these parameters use get_cP(). - - - **in the 2D image plane**: parameters are saved in vpTracker::p 2-dim vector. They correspond - to the parameters (\f$\rho\f$, \f$\theta\f$). These - 2D parameters are obtained from the perspective projection of the 3D line parameters expressed - in the camera frame. They are defined thanks to the 2D equation of a line. - \f[ x \; cos(\theta) + y \; sin(\theta) -\rho = 0 \f] Here \f$ x - \f$ and \f$ y \f$ are the coordinates of a point belonging to the - line in the image plane while \f$ \rho \f$ and \f$ \theta \f$ are - the parameters used to define the line. The value of \f$ \theta - \f$ is between \f$ -\pi/2 \f$ and \f$ \pi/2 \f$ and the value of - \f$ \rho \f$ can be positive or negative. The conventions used to - choose the sign of \f$ \rho \f$ and the value of \f$ \theta \f$ - are illustrated by the following image. - \image html vpFeatureLine.gif - \image latex vpFeatureLine.ps width=10cm - The line parameters corresponding to the image frame are located - in the vpTracker::p public attribute, where \e p is a vector defined - as: \f[ p = \left[\begin{array}{c} \rho \\ \theta \end{array}\right] \f] - To compute these parameters use projection(). To get the corresponding values use get_p(). -*/ + * \class vpLine + * \ingroup group_core_geometry + * + * \brief Class that defines a 3D line in the object frame and allows forward projection + * of the line in the camera frame and in the 2D image plane by perspective projection. + * All the parameters must be set in meter. + * + * Note that a 3D line is defined from the intersection between two 3D planes. + * + * A 3D line has the followings parameters: + * - **in the 3D object frame**: parameters are located in vpForwardProjection::oP 8-dim internal vector. They correspond + * to the parameters oA1, oB1, oC1, oD1 and oA2, oB2, oC2, oD2 defining the equations of the two planes. + * Each point \f$ (X, Y, Z) \f$ which belongs to the 3D line is a solution of those two + * equations: + * \f[ oA1*X + oB1*Y + oC1*Z + oD1 = 0 \f] + * \f[ oA2*X + oB2*Y + oC2*Z + oD2 = 0 \f] + * To update these line parameters you may use setWorldCoordinates(). To get theses parameters use get_oP(). + * + * - **in the 3D camera frame**: parameters are saved in vpTracker::cP 8-dim internal vector. They correspond + * to the parameters cA1, cB1, cC1, cD1 and cA2, cB2, cC2, cD2 defining the equations of the two planes. + * Each point \f$ (X, Y, Z) \f$ which belongs to the 3D line is a solution of those two + * equations: + * \f[ cA1*X + cB1*Y + cC1*Z + cD1 = 0 \f] + * \f[ cA2*X + cB2*Y + cC2*Z + cD2 = 0 \f] + * It is easily possible to compute these parameters thanks to the corresponding 3D parameters oP in the + * object frame. But you have to note that four constraints are added in the planes equations. + * \f[ cD1 = 0 \f] + * \f[ cD2 > 0 \f] + * \f[ cA1*cA2 + cB1*cB2 + cC1*cC2 = 0 \f] + * \f[ || cA2 || = 1 \f] + * To compute these parameters you may use changeFrame(). To get these parameters use get_cP(). + * + * - **in the 2D image plane**: parameters are saved in vpTracker::p 2-dim vector. They correspond + * to the parameters (\f$\rho\f$, \f$\theta\f$). These + * 2D parameters are obtained from the perspective projection of the 3D line parameters expressed + * in the camera frame. They are defined thanks to the 2D equation of a line. + * \f[ x \; cos(\theta) + y \; sin(\theta) -\rho = 0 \f] Here \f$ x + * \f$ and \f$ y \f$ are the coordinates of a point belonging to the + * line in the image plane while \f$ \rho \f$ and \f$ \theta \f$ are + * the parameters used to define the line. The value of \f$ \theta + * \f$ is between \f$ -\pi/2 \f$ and \f$ \pi/2 \f$ and the value of + * \f$ \rho \f$ can be positive or negative. The conventions used to + * choose the sign of \f$ \rho \f$ and the value of \f$ \theta \f$ + * are illustrated by the following image. + * \image html vpFeatureLine.gif + * \image latex vpFeatureLine.ps width=10cm + * + * The line parameters corresponding to the image frame are located + * in the vpTracker::p public attribute, where \e p is a vector defined + * as: \f[ p = \left[\begin{array}{c} \rho \\ \theta \end{array}\right] \f] + * To compute these parameters use projection(). To get the corresponding values use get_p(). + */ class VISP_EXPORT vpLine : public vpForwardProjection { public: vpLine(); - //! Destructor - virtual ~vpLine() { ; } - void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const; - void changeFrame(const vpHomogeneousMatrix &cMo); + void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const override; + void changeFrame(const vpHomogeneousMatrix &cMo) override; void display(const vpImage &I, const vpCameraParameters &cam, const vpColor &color = vpColor::green, - unsigned int thickness = 1); + unsigned int thickness = 1) override; void display(const vpImage &I, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1); void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &color = vpColor::green, unsigned int thickness = 1); + const vpColor &color = vpColor::green, unsigned int thickness = 1) override; void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1); - vpLine *duplicate() const; + vpLine *duplicate() const override; /*! - Gets the \f$ \rho \f$ value corresponding to one of the - two parameters used to define the line parametrization in the - image plane. - - \return Returns the current value of \f$ \rho \f$. - - \sa getTheta() - */ + * Gets the \f$ \rho \f$ value corresponding to one of the + * two parameters used to define the line parametrization in the + * image plane. + * + * \return Returns the current value of \f$ \rho \f$. + * + * \sa getTheta() + */ double getRho() const { return p[0]; } /*! - - Gets the \f$ \theta \f$ angle value corresponding to one of the - two parameters used to define the line parametrization in the - image plane. - - \return Returns the current value of \f$ \theta \f$. - - \sa getRho() - */ + * Gets the \f$ \theta \f$ angle value corresponding to one of the + * two parameters used to define the line parametrization in the + * image plane. + * + * \return Returns the current value of \f$ \theta \f$. + * + * \sa getRho() + */ double getTheta() const { return p[1]; } /*! - - Sets the \f$ \rho \f$ parameter used to define the line in the - image plane. - - \param rho : The desired value for \f$ \rho \f$. - - \sa setTheta() - */ + * Sets the \f$ \rho \f$ parameter used to define the line in the + * image plane. + * + * \param rho : The desired value for \f$ \rho \f$. + * + * \sa setTheta() + */ void setRho(double rho) { p[0] = rho; } /*! - Sets the \f$ \theta \f$ angle value used to define the line in the - image plane. - - \param theta : The desired value for \f$ \theta \f$ angle. - - \sa setRho() - */ + * Sets the \f$ \theta \f$ angle value used to define the line in the + * image plane. + * + * \param theta : The desired value for \f$ \theta \f$ angle. + * + * \sa setRho() + */ void setTheta(double theta) { p[1] = theta; } void setWorldCoordinates(const double &oA1, const double &oB1, const double &oC1, const double &oD1, @@ -166,19 +163,13 @@ class VISP_EXPORT vpLine : public vpForwardProjection void setWorldCoordinates(const vpColVector &oP1, const vpColVector &oP2); - void setWorldCoordinates(const vpColVector &oP); + void setWorldCoordinates(const vpColVector &oP) override; - void projection(); - void projection(const vpColVector &cP, vpColVector &p) const; + void projection() override; + void projection(const vpColVector &cP, vpColVector &p) const override; protected: - void init(); + void init() override; }; #endif - -/* - * Local variables: - * c-basic-offset: 2 - * End: - */ diff --git a/modules/core/include/visp3/core/vpLinearKalmanFilterInstantiation.h b/modules/core/include/visp3/core/vpLinearKalmanFilterInstantiation.h index 54e39faa6c..d6d7b207ac 100644 --- a/modules/core/include/visp3/core/vpLinearKalmanFilterInstantiation.h +++ b/modules/core/include/visp3/core/vpLinearKalmanFilterInstantiation.h @@ -55,11 +55,12 @@ class VISP_EXPORT vpLinearKalmanFilterInstantiation : public vpKalmanFilter /*! Selector used to set the Kalman filter state model. */ - typedef enum { - /*! Consider the state as a constant velocity model with white - noise. Measures available are the successive positions of the - target. To know more about this state model, see - initStateConstVel_MeasurePos(). */ + typedef enum + { +/*! Consider the state as a constant velocity model with white + noise. Measures available are the successive positions of the + target. To know more about this state model, see + initStateConstVel_MeasurePos(). */ stateConstVel_MeasurePos, /*! Consider the state as a constant velocity model with colored noise measurements as acceleration terms. Measured available are the @@ -81,10 +82,8 @@ class VISP_EXPORT vpLinearKalmanFilterInstantiation : public vpKalmanFilter By default the state model is unknown and set to vpLinearKalmanFilterInstantiation::unknown. */ - vpLinearKalmanFilterInstantiation() : model(unknown){}; + vpLinearKalmanFilterInstantiation() : model(unknown) { }; - /*! Destructor that does nothng. */ - virtual ~vpLinearKalmanFilterInstantiation(){}; /*! Return the current state model. */ diff --git a/modules/core/include/visp3/core/vpMath.h b/modules/core/include/visp3/core/vpMath.h index f082ab5d33..237b2decfb 100644 --- a/modules/core/include/visp3/core/vpMath.h +++ b/modules/core/include/visp3/core/vpMath.h @@ -218,14 +218,14 @@ class VISP_EXPORT vpMath static double lineFitting(const std::vector &imPts, double &a, double &b, double &c); - template static inline _Tp saturate(unsigned char v) { return _Tp(v); } - template static inline _Tp saturate(char v) { return _Tp(v); } - template static inline _Tp saturate(unsigned short v) { return _Tp(v); } - template static inline _Tp saturate(short v) { return _Tp(v); } - template static inline _Tp saturate(unsigned v) { return _Tp(v); } - template static inline _Tp saturate(int v) { return _Tp(v); } - template static inline _Tp saturate(float v) { return _Tp(v); } - template static inline _Tp saturate(double v) { return _Tp(v); } + template static inline Tp saturate(unsigned char v) { return Tp(v); } + template static inline Tp saturate(char v) { return Tp(v); } + template static inline Tp saturate(unsigned short v) { return Tp(v); } + template static inline Tp saturate(short v) { return Tp(v); } + template static inline Tp saturate(unsigned v) { return Tp(v); } + template static inline Tp saturate(int v) { return Tp(v); } + template static inline Tp saturate(float v) { return Tp(v); } + template static inline Tp saturate(double v) { return Tp(v); } static double getMean(const std::vector &v); static double getMedian(const std::vector &v); @@ -323,13 +323,13 @@ long double vpMath::comb(unsigned int n, unsigned int p) int vpMath::round(double x) { #if defined(VISP_HAVE_FUNC_STD_ROUND) - return (int)std::round(x); + return static_cast(std::round(x)); #elif defined(VISP_HAVE_FUNC_ROUND) //:: to design the global namespace and avoid to call recursively // vpMath::round - return (int)::round(x); + return static_cast(::round(x)); #else - return (x > 0.0) ? ((int)floor(x + 0.5)) : ((int)ceil(x - 0.5)); + return (x > 0.0) ? (static_cast(floor(x + 0.5))) : (static_cast(ceil(x - 0.5))); #endif } @@ -408,26 +408,29 @@ template <> inline unsigned char vpMath::saturate(char v) // On little endian arch, CHAR_MIN=-127 and CHAR_MAX=128 leading to // (int)(char -127) = -127. if (std::numeric_limits::is_signed) - return (unsigned char)(((std::max))((int)v, 0)); + return static_cast(std::max(static_cast(v), 0)); else - return (unsigned char)((unsigned int)v > SCHAR_MAX ? 0 : v); + return static_cast(static_cast(v) > SCHAR_MAX ? 0 : v); } template <> inline unsigned char vpMath::saturate(unsigned short v) { - return (unsigned char)((std::min))((unsigned int)v, (unsigned int)UCHAR_MAX); + return static_cast(std::min(static_cast(v), static_cast(UCHAR_MAX))); } template <> inline unsigned char vpMath::saturate(int v) { - return (unsigned char)((unsigned int)v <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); + return static_cast(static_cast(v) <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); } -template <> inline unsigned char vpMath::saturate(short v) { return saturate((int)v); } +template <> inline unsigned char vpMath::saturate(short v) +{ + return saturate(static_cast(v)); +} template <> inline unsigned char vpMath::saturate(unsigned int v) { - return (unsigned char)((std::min))(v, (unsigned int)UCHAR_MAX); + return static_cast(std::min(v, static_cast(UCHAR_MAX))); } template <> inline unsigned char vpMath::saturate(float v) @@ -443,23 +446,29 @@ template <> inline unsigned char vpMath::saturate(double v) } // char -template <> inline char vpMath::saturate(unsigned char v) { return (char)((std::min))((int)v, SCHAR_MAX); } +template <> inline char vpMath::saturate(unsigned char v) +{ + return static_cast(std::min(static_cast(v), SCHAR_MAX)); +} template <> inline char vpMath::saturate(unsigned short v) { - return (char)((std::min))((unsigned int)v, (unsigned int)SCHAR_MAX); + return static_cast(std::min(static_cast(v), static_cast(SCHAR_MAX))); } template <> inline char vpMath::saturate(int v) { - return (char)((unsigned int)(v - SCHAR_MIN) <= (unsigned int)UCHAR_MAX ? v : v > 0 ? SCHAR_MAX : SCHAR_MIN); + return static_cast(static_cast(v - SCHAR_MIN) <= static_cast(UCHAR_MAX) ? v : v > 0 ? SCHAR_MAX : SCHAR_MIN); } -template <> inline char vpMath::saturate(short v) { return saturate((int)v); } +template <> inline char vpMath::saturate(short v) +{ + return saturate((int)v); +} template <> inline char vpMath::saturate(unsigned int v) { - return (char)((std::min))(v, (unsigned int)SCHAR_MAX); + return static_cast(std::min(v, static_cast(SCHAR_MAX))); } template <> inline char vpMath::saturate(float v) @@ -483,24 +492,24 @@ template <> inline unsigned short vpMath::saturate(char v) // On little endian arch, CHAR_MIN=-127 and CHAR_MAX=128 leading to // (int)(char -127) = -127. if (std::numeric_limits::is_signed) - return (unsigned char)(((std::max))((int)v, 0)); + return static_cast(std::max(static_cast(v), 0)); else - return (unsigned char)((unsigned int)v > SCHAR_MAX ? 0 : v); + return static_cast(static_cast(v) > SCHAR_MAX ? 0 : v); } template <> inline unsigned short vpMath::saturate(short v) { - return (unsigned short)((std::max))((int)v, 0); + return static_cast(std::max(static_cast(v), 0)); } template <> inline unsigned short vpMath::saturate(int v) { - return (unsigned short)((unsigned int)v <= (unsigned int)USHRT_MAX ? v : v > 0 ? USHRT_MAX : 0); + return static_cast(static_cast(v) <= static_cast(USHRT_MAX) ? v : v > 0 ? USHRT_MAX : 0); } template <> inline unsigned short vpMath::saturate(unsigned int v) { - return (unsigned short)((std::min))(v, (unsigned int)USHRT_MAX); + return static_cast(std::min(v, static_cast(USHRT_MAX))); } template <> inline unsigned short vpMath::saturate(float v) @@ -516,14 +525,17 @@ template <> inline unsigned short vpMath::saturate(double v) } // short -template <> inline short vpMath::saturate(unsigned short v) { return (short)((std::min))((int)v, SHRT_MAX); } +template <> inline short vpMath::saturate(unsigned short v) +{ + return static_cast(std::min(static_cast(v), SHRT_MAX)); +} template <> inline short vpMath::saturate(int v) { - return (short)((unsigned int)(v - SHRT_MIN) <= (unsigned int)USHRT_MAX ? v : v > 0 ? SHRT_MAX : SHRT_MIN); + return static_cast(static_cast(v - SHRT_MIN) <= static_cast(USHRT_MAX) ? v : v > 0 ? SHRT_MAX : SHRT_MIN); } template <> inline short vpMath::saturate(unsigned int v) { - return (short)((std::min))(v, (unsigned int)SHRT_MAX); + return static_cast(std::min(v, static_cast(SHRT_MAX))); } template <> inline short vpMath::saturate(float v) { @@ -537,15 +549,27 @@ template <> inline short vpMath::saturate(double v) } // int -template <> inline int vpMath::saturate(float v) { return vpMath::round(v); } +template <> inline int vpMath::saturate(float v) +{ + return vpMath::round(v); +} -template <> inline int vpMath::saturate(double v) { return vpMath::round(v); } +template <> inline int vpMath::saturate(double v) +{ + return vpMath::round(v); +} // unsigned int // (Comment from OpenCV) we intentionally do not clip negative numbers, to // make -1 become 0xffffffff etc. -template <> inline unsigned int vpMath::saturate(float v) { return (unsigned int)vpMath::round(v); } +template <> inline unsigned int vpMath::saturate(float v) +{ + return static_cast(vpMath::round(v)); +} -template <> inline unsigned int vpMath::saturate(double v) { return (unsigned int)vpMath::round(v); } +template <> inline unsigned int vpMath::saturate(double v) +{ + return static_cast(vpMath::round(v)); +} #endif diff --git a/modules/core/include/visp3/core/vpMatrix.h b/modules/core/include/visp3/core/vpMatrix.h index a1f28fd036..f1dac9443c 100644 --- a/modules/core/include/visp3/core/vpMatrix.h +++ b/modules/core/include/visp3/core/vpMatrix.h @@ -208,9 +208,6 @@ vpMatrix M(R); explicit vpMatrix(const std::initializer_list > &lists); #endif - //! Destructor (Memory de-allocation) - virtual ~vpMatrix() { } - /*! Removes all elements from the matrix (which are destroyed), leaving the container with a size of 0. diff --git a/modules/core/include/visp3/core/vpMatrixException.h b/modules/core/include/visp3/core/vpMatrixException.h index c38ee9a49a..edc9e136a1 100644 --- a/modules/core/include/visp3/core/vpMatrixException.h +++ b/modules/core/include/visp3/core/vpMatrixException.h @@ -54,7 +54,7 @@ class VISP_EXPORT vpMatrixException : public vpException */ enum errorCodeEnum { -//! Error returns by a constructor + //! Error returns by a constructor constructionError, //! Something is not initialized notInitializedError, diff --git a/modules/core/include/visp3/core/vpMeterPixelConversion.h b/modules/core/include/visp3/core/vpMeterPixelConversion.h index 29c3f969ed..0334b69d39 100644 --- a/modules/core/include/visp3/core/vpMeterPixelConversion.h +++ b/modules/core/include/visp3/core/vpMeterPixelConversion.h @@ -104,7 +104,7 @@ class VISP_EXPORT vpMeterPixelConversion */ inline static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v) { - switch (cam.projModel) { + switch (cam.m_projModel) { case vpCameraParameters::perspectiveProjWithoutDistortion: convertPointWithoutDistortion(cam, x, y, u, v); break; @@ -146,7 +146,7 @@ class VISP_EXPORT vpMeterPixelConversion inline static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, vpImagePoint &iP) { - switch (cam.projModel) { + switch (cam.m_projModel) { case vpCameraParameters::perspectiveProjWithoutDistortion: convertPointWithoutDistortion(cam, x, y, iP); break; @@ -172,8 +172,8 @@ class VISP_EXPORT vpMeterPixelConversion inline static void convertPointWithoutDistortion(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v) { - u = x * cam.px + cam.u0; - v = y * cam.py + cam.v0; + u = x * cam.m_px + cam.m_u0; + v = y * cam.m_py + cam.m_v0; } /*! @@ -189,8 +189,8 @@ class VISP_EXPORT vpMeterPixelConversion inline static void convertPointWithoutDistortion(const vpCameraParameters &cam, const double &x, const double &y, vpImagePoint &iP) { - iP.set_u(x * cam.px + cam.u0); - iP.set_v(y * cam.py + cam.v0); + iP.set_u(x * cam.m_px + cam.m_u0); + iP.set_v(y * cam.m_py + cam.m_v0); } /*! @@ -212,9 +212,9 @@ class VISP_EXPORT vpMeterPixelConversion inline static void convertPointWithDistortion(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v) { - double r2 = 1. + cam.kud * (x * x + y * y); - u = cam.u0 + cam.px * x * r2; - v = cam.v0 + cam.py * y * r2; + double r2 = 1. + cam.m_kud * (x * x + y * y); + u = cam.m_u0 + cam.m_px * x * r2; + v = cam.m_v0 + cam.m_py * y * r2; } /*! @@ -236,9 +236,9 @@ class VISP_EXPORT vpMeterPixelConversion inline static void convertPointWithDistortion(const vpCameraParameters &cam, const double &x, const double &y, vpImagePoint &iP) { - double r2 = 1. + cam.kud * (x * x + y * y); - iP.set_u(cam.u0 + cam.px * x * r2); - iP.set_v(cam.v0 + cam.py * y * r2); + double r2 = 1. + cam.m_kud * (x * x + y * y); + iP.set_u(cam.m_u0 + cam.m_px * x * r2); + iP.set_v(cam.m_v0 + cam.m_py * y * r2); } /*! @@ -272,7 +272,7 @@ class VISP_EXPORT vpMeterPixelConversion std::vector k = cam.getKannalaBrandtDistortionCoefficients(); double theta2 = theta * theta, theta3 = theta2 * theta, theta4 = theta2 * theta2, theta5 = theta4 * theta, - theta6 = theta3 * theta3, theta7 = theta6 * theta, theta8 = theta4 * theta4, theta9 = theta8 * theta; + theta6 = theta3 * theta3, theta7 = theta6 * theta, theta8 = theta4 * theta4, theta9 = theta8 * theta; double r_d = theta + k[0] * theta3 + k[1] * theta5 + k[2] * theta7 + k[3] * theta9; @@ -281,8 +281,8 @@ class VISP_EXPORT vpMeterPixelConversion double x_d = x * scale; double y_d = y * scale; - u = cam.px * x_d + cam.u0; - v = cam.py * y_d + cam.v0; + u = cam.m_px * x_d + cam.m_u0; + v = cam.m_py * y_d + cam.m_v0; } /*! @@ -315,7 +315,7 @@ class VISP_EXPORT vpMeterPixelConversion std::vector k = cam.getKannalaBrandtDistortionCoefficients(); double theta2 = theta * theta, theta3 = theta2 * theta, theta4 = theta2 * theta2, theta5 = theta4 * theta, - theta6 = theta3 * theta3, theta7 = theta6 * theta, theta8 = theta4 * theta4, theta9 = theta8 * theta; + theta6 = theta3 * theta3, theta7 = theta6 * theta, theta8 = theta4 * theta4, theta9 = theta8 * theta; double r_d = theta + k[0] * theta3 + k[1] * theta5 + k[2] * theta7 + k[3] * theta9; @@ -324,8 +324,8 @@ class VISP_EXPORT vpMeterPixelConversion double x_d = x * scale; double y_d = y * scale; - iP.set_u(cam.px * x_d + cam.u0); - iP.set_v(cam.py * y_d + cam.v0); + iP.set_u(cam.m_px * x_d + cam.m_u0); + iP.set_v(cam.m_py * y_d + cam.m_v0); } #endif // #ifndef DOXYGEN_SHOULD_SKIP_THIS diff --git a/modules/core/include/visp3/core/vpMomentAlpha.h b/modules/core/include/visp3/core/vpMomentAlpha.h index 6c52aeed00..56eada8a9c 100644 --- a/modules/core/include/visp3/core/vpMomentAlpha.h +++ b/modules/core/include/visp3/core/vpMomentAlpha.h @@ -42,163 +42,163 @@ #include /*! - \class vpMomentAlpha - - \ingroup group_core_moments - - \brief This class defines the orientation of the object inside the plane - parallel to the object. - - In general the value of the moment is computed in \f$ [-\pi/2 ; \pi/2] \f$ - interval by the formula \f$ \alpha = \frac{1}{2} - \mathrm{atan2}(2\mu_{11}, \mu_{20}-\mu_{02}) \f$. - - To obtain a \f$ [-\pi ; \pi] \f$ precision for non symmetric object, you - have to specify a reference information. This reference information is an - alpha computed using the previous formula in \f$ [-\pi/2 ; \pi/2] \f$. - Obtaining this precision comes from third-order centered moments and this - reference information. - - Therefore there are two modes for vpMomentAlpha and one constructor per - mode: - - Reference mode using the empty constructor vpMomentAlpha(): - The vpMomentAlpha doesn't need any additionnal information, it will compute - its values from available moments in \f$ [-\pi/2 ; \pi/2] \f$. - - Relative mode using non-empty constructor - vpMomentAlpha(std::vector&, double): The vpMomentAlpha is computed in - \f$ [-\pi ; \pi] \f$ from the available moments and the reference - information. By knowing the reference, it may distinguish in-plane rotations - of \f$ \alpha \f$ from rotations of \f$ \alpha + \pi \f$. - - The following code demonstrates a calculation of a reference alpha and then - uses this alpha to estimate the orientation of the same object after - performing a 180 degrees rotation. Therefore the first and second alpha should - have opposite values. - - \code -#include -#include -#include -#include -#include -#include - -//generic function for printing -void print (double i) { std::cout << i << "\t";} - -int main() -{ - vpPoint p; - std::vector vec_p; // Vector that contains the vertices of the contour polygon - p.set_x(1); p.set_y(1); // Coordinates in meters in the image plane (vertex 1) - vec_p.push_back(p); - p.set_x(2); p.set_y(2); // Coordinates in meters in the image plane (vertex 2) - vec_p.push_back(p); - p.set_x(-3); p.set_y(0); // Coordinates in meters in the image plane (vertex 3) - vec_p.push_back(p); - p.set_x(-3); p.set_y(-1); // Coordinates in meters in the image plane (vertex 4) - vec_p.push_back(p); - - //////////////////////////////REFERENCE VALUES//////////////////////////////// - vpMomentObject objRef(3); // Reference object. Must be of order 3 because we will - // need the 3rd order centered moments - - objRef.setType(vpMomentObject::DENSE_POLYGON); // Object is the inner part of a polygon - objRef.fromVector(vec_p); // Init the dense object with the polygon - - vpMomentDatabase dbRef; // Reference database - vpMomentGravityCenter gRef; // Declaration of gravity center - vpMomentCentered mcRef; // Centered moments - vpMomentAlpha alphaRef; // Declare alpha as reference - - gRef.linkTo(dbRef); // Add gravity center to database - mcRef.linkTo(dbRef); // Add centered moments - alphaRef.linkTo(dbRef); // Add alpha depending on centered moments - - dbRef.updateAll(objRef); // All of the moments must be updated, not just alpha - - gRef.compute(); // Compute the moment - mcRef.compute(); // Compute centered moments AFTER gravity center - alphaRef.compute(); // Compute alpha AFTER centered moments. - - // The order of values in the vector must be as follows: mu30 mu21 mu12 mu03 - std::vector mu3ref = {mcRef.get(3,0), mcRef.get(2,1), mcRef.get(1,2), mcRef.get(0,3)}; - - std::cout << "--- Reference object ---" << std::endl; - std::cout << "alphaRef=" << vpMath::deg(alphaRef.get()) << " deg" << std::endl << "mu3="; // print reference alpha - std::for_each (mu3ref.begin(), mu3ref.end(), print); - std::cout << std::endl; - - ////////////CURRENT VALUES (same object rotated 180deg - must be - ////////////entered in reverse order)//////////////// - vec_p.clear(); - - p.set_x(-3); p.set_y(1); // Coordinates in meters in the image plane (vertex 4) - vec_p.push_back(p); - p.set_x(-3); p.set_y(0); // Coordinates in meters in the image plane (vertex 3) - vec_p.push_back(p); - p.set_x(2); p.set_y(-2); // Coordinates in meters in the image plane (vertex 2) - vec_p.push_back(p); - p.set_x(1); p.set_y(-1); // Coordinates in meters in the image plane (vertex 1) - vec_p.push_back(p); - - vpMomentObject obj(3); // Second object. Order 3 is also required because of the Alpha - // will compare third-order centered moments to given reference. - - obj.setType(vpMomentObject::DENSE_POLYGON); // Object is the inner part of a polygon - obj.fromVector(vec_p); // Init the dense object with the polygon - - vpMomentDatabase db; // Database - vpMomentGravityCenter g; // Declaration of gravity center - vpMomentCentered mc; // mc contains centered moments - vpMomentAlpha alpha(mu3ref, alphaRef.get()); // Declare alpha as relative to a reference - - g.linkTo(db); // Add gravity center to database - mc.linkTo(db); // Add centered moments - alpha.linkTo(db); // Add alpha depending on centered moments - - db.updateAll(obj); // All of the moments must be updated - - g.compute(); // Compute the moment - mc.compute(); // Compute centered moments AFTER gravity center - alpha.compute(); // Compute alpha AFTER centered moments. - - std::cout << "--- current object ---" << std::endl; - std::cout << "alpha=" << vpMath::deg(alpha.get()) << " deg" << std::endl; - - return 0; -} - \endcode -This program outputs: -\code ---- Reference object --- -alphaRef=25.3019 deg -mu3=1.80552 0.921882 0.385828 0.122449 ---- current object --- -alpha=-25.3019 deg -\endcode - - There is also testMomentAlpha.cpp example that shows how to compute alpha in the range \f$ [-\pi ; \pi] \f$ - using arrow images as input. The code is given below: - \include testMomentAlpha.cpp - - From the first image we compute the 3rd order centered moments and the value of the reference alpha - that is than used to compute the alpha moment in the range \f$ [-\pi ; \pi] \f$. Running this example you will get: - \code -alpha expected 0 computed -0.128108 deg -alpha expected 45 computed 44.8881 deg -alpha expected 90 computed 89.8719 deg -alpha expected 135 computed 134.888 deg -alpha expected 180 computed 179.872 deg -alpha expected -135 computed -135.112 deg -alpha expected -90 computed -90.1281 deg -alpha expected -45 computed -45.1119 deg - \endcode - - Shortcuts for quickly getting those references exist in vpMomentCommon. - - This moment depends on vpMomentCentered. -*/ + * \class vpMomentAlpha + * + * \ingroup group_core_moments + * + * \brief This class defines the orientation of the object inside the plane + * parallel to the object. + * + * In general the value of the moment is computed in \f$ [-\pi/2 ; \pi/2] \f$ + * interval by the formula \f$ \alpha = \frac{1}{2} + * \mathrm{atan2}(2\mu_{11}, \mu_{20}-\mu_{02}) \f$. + * + * To obtain a \f$ [-\pi ; \pi] \f$ precision for non symmetric object, you + * have to specify a reference information. This reference information is an + * alpha computed using the previous formula in \f$ [-\pi/2 ; \pi/2] \f$. + * Obtaining this precision comes from third-order centered moments and this + * reference information. + * + * Therefore there are two modes for vpMomentAlpha and one constructor per + * mode: + * - Reference mode using the empty constructor vpMomentAlpha(): + * The vpMomentAlpha doesn't need any additional information, it will compute + * its values from available moments in \f$ [-\pi/2 ; \pi/2] \f$. + * - Relative mode using non-empty constructor + * vpMomentAlpha(std::vector&, double): The vpMomentAlpha is computed in + * \f$ [-\pi ; \pi] \f$ from the available moments and the reference + * information. By knowing the reference, it may distinguish in-plane rotations + * of \f$ \alpha \f$ from rotations of \f$ \alpha + \pi \f$. + * + * The following code demonstrates a calculation of a reference alpha and then + * uses this alpha to estimate the orientation of the same object after + * performing a 180 degrees rotation. Therefore the first and second alpha should + * have opposite values. + * + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * + * //generic function for printing + * void print (double i) { std::cout << i << "\t";} + * + * int main() + * { + * vpPoint p; + * std::vector vec_p; // Vector that contains the vertices of the contour polygon + * p.set_x(1); p.set_y(1); // Coordinates in meters in the image plane (vertex 1) + * vec_p.push_back(p); + * p.set_x(2); p.set_y(2); // Coordinates in meters in the image plane (vertex 2) + * vec_p.push_back(p); + * p.set_x(-3); p.set_y(0); // Coordinates in meters in the image plane (vertex 3) + * vec_p.push_back(p); + * p.set_x(-3); p.set_y(-1); // Coordinates in meters in the image plane (vertex 4) + * vec_p.push_back(p); + * + * //////////////////////////////REFERENCE VALUES//////////////////////////////// + * vpMomentObject objRef(3); // Reference object. Must be of order 3 because we will + * // need the 3rd order centered moments + * + * objRef.setType(vpMomentObject::DENSE_POLYGON); // Object is the inner part of a polygon + * objRef.fromVector(vec_p); // Init the dense object with the polygon + * + * vpMomentDatabase dbRef; // Reference database + * vpMomentGravityCenter gRef; // Declaration of gravity center + * vpMomentCentered mcRef; // Centered moments + * vpMomentAlpha alphaRef; // Declare alpha as reference + * + * gRef.linkTo(dbRef); // Add gravity center to database + * mcRef.linkTo(dbRef); // Add centered moments + * alphaRef.linkTo(dbRef); // Add alpha depending on centered moments + * + * dbRef.updateAll(objRef); // All of the moments must be updated, not just alpha + * + * gRef.compute(); // Compute the moment + * mcRef.compute(); // Compute centered moments AFTER gravity center + * alphaRef.compute(); // Compute alpha AFTER centered moments. + * + * // The order of values in the vector must be as follows: mu30 mu21 mu12 mu03 + * std::vector mu3ref = {mcRef.get(3,0), mcRef.get(2,1), mcRef.get(1,2), mcRef.get(0,3)}; + * + * std::cout << "--- Reference object ---" << std::endl; + * std::cout << "alphaRef=" << vpMath::deg(alphaRef.get()) << " deg" << std::endl << "mu3="; // print reference alpha + * std::for_each (mu3ref.begin(), mu3ref.end(), print); + * std::cout << std::endl; + * + * ////////////CURRENT VALUES (same object rotated 180deg - must be + * ////////////entered in reverse order)//////////////// + * vec_p.clear(); + * + * p.set_x(-3); p.set_y(1); // Coordinates in meters in the image plane (vertex 4) + * vec_p.push_back(p); + * p.set_x(-3); p.set_y(0); // Coordinates in meters in the image plane (vertex 3) + * vec_p.push_back(p); + * p.set_x(2); p.set_y(-2); // Coordinates in meters in the image plane (vertex 2) + * vec_p.push_back(p); + * p.set_x(1); p.set_y(-1); // Coordinates in meters in the image plane (vertex 1) + * vec_p.push_back(p); + * + * vpMomentObject obj(3); // Second object. Order 3 is also required because of the Alpha + * // will compare third-order centered moments to given reference. + * + * obj.setType(vpMomentObject::DENSE_POLYGON); // Object is the inner part of a polygon + * obj.fromVector(vec_p); // Init the dense object with the polygon + * + * vpMomentDatabase db; // Database + * vpMomentGravityCenter g; // Declaration of gravity center + * vpMomentCentered mc; // mc contains centered moments + * vpMomentAlpha alpha(mu3ref, alphaRef.get()); // Declare alpha as relative to a reference + * + * g.linkTo(db); // Add gravity center to database + * mc.linkTo(db); // Add centered moments + * alpha.linkTo(db); // Add alpha depending on centered moments + * + * db.updateAll(obj); // All of the moments must be updated + * + * g.compute(); // Compute the moment + * mc.compute(); // Compute centered moments AFTER gravity center + * alpha.compute(); // Compute alpha AFTER centered moments. + * + * std::cout << "--- current object ---" << std::endl; + * std::cout << "alpha=" << vpMath::deg(alpha.get()) << " deg" << std::endl; + * + * return 0; + * } + * \endcode + * This program outputs: + * \code + * --- Reference object --- + * alphaRef=25.3019 deg + * mu3=1.80552 0.921882 0.385828 0.122449 + * --- current object --- + * alpha=-25.3019 deg + * \endcode + * + * There is also testMomentAlpha.cpp example that shows how to compute alpha in the range \f$ [-\pi ; \pi] \f$ + * using arrow images as input. The code is given below: + * \include testMomentAlpha.cpp + * + * From the first image we compute the 3rd order centered moments and the value of the reference alpha + * that is than used to compute the alpha moment in the range \f$ [-\pi ; \pi] \f$. Running this example you will get: + * \code + * alpha expected 0 computed -0.128108 deg + * alpha expected 45 computed 44.8881 deg + * alpha expected 90 computed 89.8719 deg + * alpha expected 135 computed 134.888 deg + * alpha expected 180 computed 179.872 deg + * alpha expected -135 computed -135.112 deg + * alpha expected -90 computed -90.1281 deg + * alpha expected -45 computed -45.1119 deg + * \endcode + * + * Shortcuts for quickly getting those references exist in vpMomentCommon. + * + * This moment depends on vpMomentCentered. + */ class VISP_EXPORT vpMomentAlpha : public vpMoment { private: @@ -211,21 +211,20 @@ class VISP_EXPORT vpMomentAlpha : public vpMoment public: vpMomentAlpha(); vpMomentAlpha(const std::vector &mu3_ref, double alpha_ref, double threshold = 1e-6); - virtual ~vpMomentAlpha(){}; void compute(); /*! - Retrieve the orientation of the object as a single double value. + * Retrieve the orientation of the object as a single double value. */ double get() const { return values[0]; } /*! - Moment name. + * Moment name. */ const char *name() const { return "vpMomentAlpha"; } /*! - Returns true if the alpha moment was constructed as a reference with values in \f$ [-\pi/2 ; \pi/2] \f$, false - otherwise. + * Returns true if the alpha moment was constructed as a reference with values in \f$ [-\pi/2 ; \pi/2] \f$, false + * otherwise. */ inline bool is_ref() const { @@ -236,8 +235,8 @@ class VISP_EXPORT vpMomentAlpha : public vpMoment } /*! - Returns true if the alpha moment is computed on a symmetric object along its two axis. - Symmetry is computed using 3rd order centered moments \f$\mu_{30},\mu_{21},\mu_{12},\mu_{03}\f$. + * Returns true if the alpha moment is computed on a symmetric object along its two axis. + * Symmetry is computed using 3rd order centered moments \f$\mu_{30},\mu_{21},\mu_{12},\mu_{03}\f$. */ inline bool is_symmetric() const { diff --git a/modules/core/include/visp3/core/vpMomentArea.h b/modules/core/include/visp3/core/vpMomentArea.h index d9467c12e4..9a05d6f8b7 100644 --- a/modules/core/include/visp3/core/vpMomentArea.h +++ b/modules/core/include/visp3/core/vpMomentArea.h @@ -39,26 +39,24 @@ class vpMomentObject; class vpMomentCentered; // Required for discrete case of vpMomentObject /*! - \class vpMomentArea - - \ingroup group_core_moments - - \brief Class handling the surface moment. - - For a dense planar object, the area corresponds to the zero-order moment: - \f[ a = m_{00} = \mu_{00} \f] - - When considering a discrete set of points, the moment \f$ m_{00} \f$ simply - corresponds to the number of points. Since this is of no use in a servoing - scheme, this class uses in this case \f$ a = \mu_{20} + \mu_{02} \f$, which is - invariant to planar translation and rotation. - -*/ + * \class vpMomentArea + * + * \ingroup group_core_moments + * + * \brief Class handling the surface moment. + * + * For a dense planar object, the area corresponds to the zero-order moment: + * \f[ a = m_{00} = \mu_{00} \f] + * + * When considering a discrete set of points, the moment \f$ m_{00} \f$ simply + * corresponds to the number of points. Since this is of no use in a servoing + * scheme, this class uses in this case \f$ a = \mu_{20} + \mu_{02} \f$, which is + * invariant to planar translation and rotation. + */ class VISP_EXPORT vpMomentArea : public vpMoment { public: vpMomentArea(); - virtual ~vpMomentArea(){}; /** @name Inherited functionalities from vpMomentArea */ //@{ diff --git a/modules/core/include/visp3/core/vpMomentAreaNormalized.h b/modules/core/include/visp3/core/vpMomentAreaNormalized.h index ab512fd9d7..342bbf6c83 100644 --- a/modules/core/include/visp3/core/vpMomentAreaNormalized.h +++ b/modules/core/include/visp3/core/vpMomentAreaNormalized.h @@ -31,9 +31,9 @@ * 2D normalized surface moment descriptor (usually described as An) */ /*! - \file vpMomentAreaNormalized.h - \brief 2D normalized surface moment descriptor (usually described as An). -*/ + * \file vpMomentAreaNormalized.h + * \brief 2D normalized surface moment descriptor (usually described as An). + */ #ifndef _vpMomentAreaNormalized_h_ #define _vpMomentAreaNormalized_h_ @@ -43,93 +43,92 @@ class vpMomentObject; class vpMomentCentered; /*! - \class vpMomentAreaNormalized - - \ingroup group_core_moments - - \brief Class handling the normalized surface moment that is invariant in -scale and used to estimate depth. - - This moment depends on vpMomentCentered. - - The idea behind vpMomentAreaNormalized is described in \cite Tahri05z. - - During a visual servoing process, a vpMomentAreaNormalized will converge -towards the desired depth when the current surface will converge to the -destination surface. It is defined as follows: \f$ a_n=Z^* -\sqrt{\frac{a^*}{a}} \f$ where \e a is the current surface and \e a* the -destination surface. Consequently, the vpMomentAreaNormalized needs to have -information about the desired depth \e Z* and the desired surface \e a*. - - \warning About the order of the object. - The surface (refered to as \e a in the above paragraph) depends of the -nature of the object. - - In case of a continuous object (when vpMomentObject::getType() is -vpMomentObject::DENSE_FULL_OBJECT or vpMomentObject::DENSE_POLYGON) -\f$a=m_{00}\f$. - - In case of a discrete object (when vpMomentObject::getType() is -vpMomentObject::DISCRETE) \f$a=\mu_{20}+\mu_{02}\f$. - - Therefore, a vpMomentObject has to be of minimum order 2 in order to compute -a vpMomentAreaNormalized moment in the discrete case and of minimum order 0 in -continous cases. - - This example shows a computation in the discrete case. - \code -#include -#include -#include -#include -#include -#include - -int main() -{ - vpPoint p; - std::vector vec_p; // vector that contains object points - - p.set_x(1); p.set_y(1); // coordinates in meters in the image plane of point 1 - vec_p.push_back(p); - p.set_x(2); p.set_y(2); // coordinates in meters in the image plane of point 2 - vec_p.push_back(p); - - //////////////////////////////REFERENCE VALUES//////////////////////////////// - vpMomentObject obj(2); // Object where all the moment defined with - // i+j <= 2 will be computed below. Order is - // 2 because in discrete mode, the surface - // computation is a=mu02+mu02 - - - obj.setType(vpMomentObject::DISCRETE); // Discrete mode for object - obj.fromVector(vec_p); // initialize the object with the points coordinates - - vpMomentDatabase db; //reference database - vpMomentGravityCenter g; // declaration of gravity center - vpMomentCentered mc; // centered moments - vpMomentAreaNormalized an(2,1); //declare normalized surface with - //destination depth of 1 meter and - //destination surface of 2 m2 - - g.linkTo(db); //add gravity center to database - mc.linkTo(db); //add centered moments - an.linkTo(db); //add alpha depending on centered moments - - db.updateAll(obj); // All of the moments must be updated, not just an - - g.compute(); // compute the moment - mc.compute(); //compute centered moments AFTER gravity center - an.compute(); //compute alpha AFTER centered moments. - - std::cout << an << std::endl; - - return 0; -} - \endcode - This code produces the following output: - \code -An:1.41421 - \endcode -*/ + * \class vpMomentAreaNormalized + * + * \ingroup group_core_moments + * + * \brief Class handling the normalized surface moment that is invariant in + * scale and used to estimate depth. + * + * This moment depends on vpMomentCentered. + * + * The idea behind vpMomentAreaNormalized is described in \cite Tahri05z. + * + * During a visual servoing process, a vpMomentAreaNormalized will converge + * towards the desired depth when the current surface will converge to the + * destination surface. It is defined as follows: \f$ a_n=Z^* + * \sqrt{\frac{a^*}{a}} \f$ where \e a is the current surface and \e a* the + * destination surface. Consequently, the vpMomentAreaNormalized needs to have + * information about the desired depth \e Z* and the desired surface \e a*. + * + * \warning About the order of the object. + * The surface (referred to as \e a in the above paragraph) depends of the + * nature of the object. + * - In case of a continuous object (when vpMomentObject::getType() is + * vpMomentObject::DENSE_FULL_OBJECT or vpMomentObject::DENSE_POLYGON) + * \f$a=m_{00}\f$. + * - In case of a discrete object (when vpMomentObject::getType() is + * vpMomentObject::DISCRETE) \f$a=\mu_{20}+\mu_{02}\f$. + * + * Therefore, a vpMomentObject has to be of minimum order 2 in order to compute + * a vpMomentAreaNormalized moment in the discrete case and of minimum order 0 in + * continuous cases. + * + * This example shows a computation in the discrete case. + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * vpPoint p; + * std::vector vec_p; // vector that contains object points + * + * p.set_x(1); p.set_y(1); // coordinates in meters in the image plane of point 1 + * vec_p.push_back(p); + * p.set_x(2); p.set_y(2); // coordinates in meters in the image plane of point 2 + * vec_p.push_back(p); + * + * //////////////////////////////REFERENCE VALUES//////////////////////////////// + * vpMomentObject obj(2); // Object where all the moment defined with + * // i+j <= 2 will be computed below. Order is + * // 2 because in discrete mode, the surface + * // computation is a=mu02+mu02 + * + * obj.setType(vpMomentObject::DISCRETE); // Discrete mode for object + * obj.fromVector(vec_p); // initialize the object with the points coordinates + * + * vpMomentDatabase db; //reference database + * vpMomentGravityCenter g; // declaration of gravity center + * vpMomentCentered mc; // centered moments + * vpMomentAreaNormalized an(2,1); //declare normalized surface with + * //destination depth of 1 meter and + * //destination surface of 2 m2 + * + * g.linkTo(db); //add gravity center to database + * mc.linkTo(db); //add centered moments + * an.linkTo(db); //add alpha depending on centered moments + * + * db.updateAll(obj); // All of the moments must be updated, not just an + * + * g.compute(); // compute the moment + * mc.compute(); //compute centered moments AFTER gravity center + * an.compute(); //compute alpha AFTER centered moments. + * + * std::cout << an << std::endl; + * + * return 0; + * } + * \endcode + * This code produces the following output: + * \code + * An:1.41421 + * \endcode + */ class VISP_EXPORT vpMomentAreaNormalized : public vpMoment { private: @@ -138,45 +137,46 @@ class VISP_EXPORT vpMomentAreaNormalized : public vpMoment public: vpMomentAreaNormalized(double a_star, double Z_star); - virtual ~vpMomentAreaNormalized() { }; void compute(); + /*! - Retrieves the desired surface \e a* as specified in the constructor. - */ + * Retrieves the desired surface \e a* as specified in the constructor. + */ double getDesiredArea() const { return desiredSurface; } + /*! - Retrieves the desired depth \e Z* as specified in the constructor. - */ + * Retrieves the desired depth \e Z* as specified in the constructor. + */ double getDesiredDepth() const { return desiredDepth; } /*! - Set the desired depth \e Z* to a new value than the one specified in the constructor. - This value has to be set before calling compute(). - */ + * Set the desired depth \e Z* to a new value than the one specified in the constructor. + * This value has to be set before calling compute(). + */ void setDesiredDepth(double Z_star) { desiredDepth = Z_star; } + /*! - Set the desired area \e a* to a new value than the one specified in the constructor. - This value has to be set before calling compute(). - */ + * Set the desired area \e a* to a new value than the one specified in the constructor. + * This value has to be set before calling compute(). + */ void setDesiredArea(double a_star) { desiredSurface = a_star; } - #if defined(VISP_BUILD_DEPRECATED_FUNCTIONS) /*! - @name Deprecated functions - */ + * @name Deprecated functions + */ //@{ /*! - \deprecated Use rather getDesiredArea() - Retrieves the desired surface \e a* as specified in the constructor. - */ + * \deprecated Use rather getDesiredArea() + * Retrieves the desired surface \e a* as specified in the constructor. + */ vp_deprecated double getDesiredSurface() const { return desiredSurface; } //@} #endif /*! - Moment name. - */ + * Moment name. + */ const char *name() const { return "vpMomentAreaNormalized"; } friend VISP_EXPORT std::ostream &operator<<(std::ostream &os, const vpMomentAreaNormalized &v); void printDependencies(std::ostream &os) const; diff --git a/modules/core/include/visp3/core/vpMomentBasic.h b/modules/core/include/visp3/core/vpMomentBasic.h index 5d8acf8dc1..7597478402 100644 --- a/modules/core/include/visp3/core/vpMomentBasic.h +++ b/modules/core/include/visp3/core/vpMomentBasic.h @@ -70,7 +70,6 @@ class VISP_EXPORT vpMomentBasic : public vpMoment { public: vpMomentBasic(); - virtual ~vpMomentBasic(){}; void compute(); const std::vector &get() const; diff --git a/modules/core/include/visp3/core/vpMomentCInvariant.h b/modules/core/include/visp3/core/vpMomentCInvariant.h index 6323fa859f..106d576f61 100644 --- a/modules/core/include/visp3/core/vpMomentCInvariant.h +++ b/modules/core/include/visp3/core/vpMomentCInvariant.h @@ -51,25 +51,25 @@ class vpMomentBasic; \ingroup group_core_moments This class defines several 2D (translation+rotation+scale) invariants for -both symmetric and non-symmetric objects. These moment-based invariants are -described in the following papers \cite Chaumette04a, \cite Tahri05z. + both symmetric and non-symmetric objects. These moment-based invariants are + described in the following papers \cite Chaumette04a, \cite Tahri05z. The descriptions for the invariants \f$C_1\f$ to \f$C_{10}\f$ can be found -in \cite Chaumette04a and for invariants -\f$P_x\f$,\f$P_y\f$,\f$S_x\f$,\f$S_y\f$ in \cite Tahri05z. + in \cite Chaumette04a and for invariants + \f$P_x\f$,\f$P_y\f$,\f$S_x\f$,\f$S_y\f$ in \cite Tahri05z. These invariants are classicaly used in visual servoing to control the -out-of-plane rotations. The C-type or P-type invariants are used for -non-symmetric objects whereas the S-type invariants are used for symmetric -objects. + out-of-plane rotations. The C-type or P-type invariants are used for + non-symmetric objects whereas the S-type invariants are used for symmetric + objects. For most cases of non-symmetric objects, (\f$C_4\f$,\f$C_6\f$) or -(\f$P_x\f$,\f$P_y\f$) couples are widely used to control x and y rotations. + (\f$P_x\f$,\f$P_y\f$) couples are widely used to control x and y rotations. For symmetric objects \f$S_x\f$ and \f$S_y\f$ are the only choice. There are 14 translation+rotation+scale invariants (10 C-type, 2 P-type and -2 S-type) that can be accessed from by vpMomentCInvariant::get or any of the -get shortcuts. + 2 S-type) that can be accessed from by vpMomentCInvariant::get or any of the + get shortcuts. The example below shows how to retrieve the \f$C_2\f$ invariant: \code @@ -140,7 +140,6 @@ class VISP_EXPORT vpMomentCInvariant : public vpMoment public: explicit vpMomentCInvariant(bool flg_sxsynormalization = false); - virtual ~vpMomentCInvariant() { }; /*! Shorcut for getting the value of \f$C_1\f$. diff --git a/modules/core/include/visp3/core/vpMomentCentered.h b/modules/core/include/visp3/core/vpMomentCentered.h index 2e0a8043a9..90b0a00f78 100644 --- a/modules/core/include/visp3/core/vpMomentCentered.h +++ b/modules/core/include/visp3/core/vpMomentCentered.h @@ -37,7 +37,7 @@ #include /*! \file vpMomentCentered.h - \brief Centered moment descriptor (also refered to as \f$\mu_{ij}\f$). + \brief Centered moment descriptor (also referred as \f$\mu_{ij}\f$). */ @@ -75,7 +75,6 @@ class VISP_EXPORT vpMomentCentered : public vpMoment { public: vpMomentCentered(); - virtual ~vpMomentCentered(){}; void compute(); double get(unsigned int i, unsigned int j) const; diff --git a/modules/core/include/visp3/core/vpMomentCommon.h b/modules/core/include/visp3/core/vpMomentCommon.h index 5fbae91c2d..33e6a19979 100644 --- a/modules/core/include/visp3/core/vpMomentCommon.h +++ b/modules/core/include/visp3/core/vpMomentCommon.h @@ -68,27 +68,27 @@ class vpMomentObject; - vpMomentAlpha - vpMomentArea - There is no need to do the linkTo operations manually nor is it necessary + There is no need to do the linkTo operations manually nor is it necessary to care about the order of moment computation. - This class carries an vpMomentCommon::updateAll() method capable of + This class carries an vpMomentCommon::updateAll() method capable of updating AND computing moments from an object (see 4-step process in vpMoment). The moments computed by this class are classical moments used in moment-based visual servoing. For more information see \cite Tahri05z. - To initialize this moment set the user needs to compute the following + To initialize this moment set the user needs to compute the following things: - the Mu3 value set: set of third-order centered moments computed for a - reference object. (\f$\mu_{ij}$ with $i+j = 3\f$ ). These values allow the - system to save the reference angular position and to perform planar - rotations of more than 180 degrees if needed. + reference object. (\f$\mu_{ij}$ with $i+j = 3\f$ ). These values allow the + system to save the reference angular position and to perform planar + rotations of more than 180 degrees if needed. - the destination depth. - the surface of the destination object in the end of the visual servoing - process. + process. - the reference alpha: angular position of the object used to obtain the - Mu3 set. + Mu3 set. - Shortcuts for each of these prerequisites are provided by this class + Shortcuts for each of these prerequisites are provided by this class except depth (methods vpMomentCommon::getMu3(), vpMomentCommon::getSurface(), vpMomentCommon::getAlpha()). @@ -128,12 +128,12 @@ class VISP_EXPORT vpMomentCommon : public vpMomentDatabase public: vpMomentCommon(double dstSurface, const std::vector &ref, double refAlpha, double dstZ = 1.0, bool flg_sxsyfromnormalized = false); - virtual ~vpMomentCommon(); + virtual ~vpMomentCommon() override; static double getAlpha(vpMomentObject &object); static std::vector getMu3(vpMomentObject &object); static double getSurface(vpMomentObject &object); - void updateAll(vpMomentObject &object); + void updateAll(vpMomentObject &object) override; }; #endif // VPCOMMONMOMENTS_H diff --git a/modules/core/include/visp3/core/vpMomentDatabase.h b/modules/core/include/visp3/core/vpMomentDatabase.h index f9b4c80902..bb66dc2525 100644 --- a/modules/core/include/visp3/core/vpMomentDatabase.h +++ b/modules/core/include/visp3/core/vpMomentDatabase.h @@ -52,19 +52,19 @@ class vpMomentObject; \ingroup group_core_moments \brief This class allows to register all vpMoments so they can access each -other according to their dependencies. + other according to their dependencies. Sometimes, a moment needs to have access to other moment's values to be -computed. For example vpMomentCentered needs additionnal information about the -gravity center vpMomentGravityCenter in order to compute the moment's value -from a vpMomentObject. This gravity center should be stored in a -vpMomentDatabase where it can be accessed. + computed. For example vpMomentCentered needs additional information about the + gravity center vpMomentGravityCenter in order to compute the moment's value + from a vpMomentObject. This gravity center should be stored in a + vpMomentDatabase where it can be accessed. All moments in a database can access each other freely at any time. They can -also verify if a moment is present in the database or not. Here is a example -of a dependency between two moments using a vpMomentDatabase: + also verify if a moment is present in the database or not. Here is a example + of a dependency between two moments using a vpMomentDatabase: -\code + \code #include #include #include @@ -127,7 +127,8 @@ class VISP_EXPORT vpMomentDatabase { private: #ifndef DOXYGEN_SHOULD_SKIP_THIS - struct vpCmpStr_t { + struct vpCmpStr_t + { bool operator()(char const *a, char const *b) const { return std::strcmp(a, b) < 0; } }; #endif @@ -135,8 +136,8 @@ class VISP_EXPORT vpMomentDatabase void add(vpMoment &moment, const char *name); public: - vpMomentDatabase() : moments() {} - virtual ~vpMomentDatabase() {} + vpMomentDatabase() : moments() { } + virtual ~vpMomentDatabase() { } /** @name Inherited functionalities from vpMomentDatabase */ //@{ diff --git a/modules/core/include/visp3/core/vpMomentGravityCenter.h b/modules/core/include/visp3/core/vpMomentGravityCenter.h index 66e212ca76..39171649ff 100644 --- a/modules/core/include/visp3/core/vpMomentGravityCenter.h +++ b/modules/core/include/visp3/core/vpMomentGravityCenter.h @@ -55,7 +55,7 @@ class vpMomentObject; These coordinates are defined as follows: \f$x_g = \frac{m_{01}}{m_{00}} \f$,\f$y_g = \frac{m_{10}}{m_{00}} \f$ -\code + \code #include #include #include @@ -95,21 +95,20 @@ int main() return 0; } -\endcode + \endcode -This example produces the following results: -\code + This example produces the following results: + \code Xg=0.0166667 Yg=-0.00833333 Xg=0.0166667, Yg=-0.00833333 -\endcode + \endcode */ class VISP_EXPORT vpMomentGravityCenter : public vpMoment { public: vpMomentGravityCenter(); - virtual ~vpMomentGravityCenter(){}; /** @name Inherited functionalities from vpMomentGravityCenter */ //@{ diff --git a/modules/core/include/visp3/core/vpMomentGravityCenterNormalized.h b/modules/core/include/visp3/core/vpMomentGravityCenterNormalized.h index a86e80af28..aeb9d02b5b 100644 --- a/modules/core/include/visp3/core/vpMomentGravityCenterNormalized.h +++ b/modules/core/include/visp3/core/vpMomentGravityCenterNormalized.h @@ -61,7 +61,6 @@ class VISP_EXPORT vpMomentGravityCenterNormalized : public vpMomentGravityCenter { public: vpMomentGravityCenterNormalized(); - virtual ~vpMomentGravityCenterNormalized(){}; void compute(); //! Moment name. const char *name() const { return "vpMomentGravityCenterNormalized"; } diff --git a/modules/core/include/visp3/core/vpMomentObject.h b/modules/core/include/visp3/core/vpMomentObject.h index 1e39cec69d..fb8e1af6ff 100644 --- a/modules/core/include/visp3/core/vpMomentObject.h +++ b/modules/core/include/visp3/core/vpMomentObject.h @@ -98,7 +98,7 @@ class vpCameraParameters; to get the basic moments that are computed and how to compute other classical moments such as the gravity center or the centered moments. -\code + \code #include #include #include diff --git a/modules/core/include/visp3/core/vpPixelMeterConversion.h b/modules/core/include/visp3/core/vpPixelMeterConversion.h index 0e546b1149..962b3f8848 100644 --- a/modules/core/include/visp3/core/vpPixelMeterConversion.h +++ b/modules/core/include/visp3/core/vpPixelMeterConversion.h @@ -101,7 +101,7 @@ class VISP_EXPORT vpPixelMeterConversion */ inline static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y) { - switch (cam.projModel) { + switch (cam.m_projModel) { case vpCameraParameters::perspectiveProjWithoutDistortion: convertPointWithoutDistortion(cam, u, v, x, y); break; @@ -143,7 +143,7 @@ class VISP_EXPORT vpPixelMeterConversion */ inline static void convertPoint(const vpCameraParameters &cam, const vpImagePoint &iP, double &x, double &y) { - switch (cam.projModel) { + switch (cam.m_projModel) { case vpCameraParameters::perspectiveProjWithoutDistortion: convertPointWithoutDistortion(cam, iP, x, y); break; @@ -172,8 +172,8 @@ class VISP_EXPORT vpPixelMeterConversion inline static void convertPointWithoutDistortion(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y) { - x = (u - cam.u0) * cam.inv_px; - y = (v - cam.v0) * cam.inv_py; + x = (u - cam.m_u0) * cam.m_inv_px; + y = (v - cam.m_v0) * cam.m_inv_py; } /*! @@ -194,8 +194,8 @@ class VISP_EXPORT vpPixelMeterConversion inline static void convertPointWithoutDistortion(const vpCameraParameters &cam, const vpImagePoint &iP, double &x, double &y) { - x = (iP.get_u() - cam.u0) * cam.inv_px; - y = (iP.get_v() - cam.v0) * cam.inv_py; + x = (iP.get_u() - cam.m_u0) * cam.m_inv_px; + y = (iP.get_v() - cam.m_v0) * cam.m_inv_py; } /*! @@ -215,9 +215,9 @@ class VISP_EXPORT vpPixelMeterConversion inline static void convertPointWithDistortion(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y) { - double r2 = 1. + cam.kdu * (vpMath::sqr((u - cam.u0) * cam.inv_px) + vpMath::sqr((v - cam.v0) * cam.inv_py)); - x = (u - cam.u0) * r2 * cam.inv_px; - y = (v - cam.v0) * r2 * cam.inv_py; + double r2 = 1. + cam.m_kdu * (vpMath::sqr((u - cam.m_u0) * cam.m_inv_px) + vpMath::sqr((v - cam.m_v0) * cam.m_inv_py)); + x = (u - cam.m_u0) * r2 * cam.m_inv_px; + y = (v - cam.m_v0) * r2 * cam.m_inv_py; } /*! @@ -239,10 +239,10 @@ class VISP_EXPORT vpPixelMeterConversion inline static void convertPointWithDistortion(const vpCameraParameters &cam, const vpImagePoint &iP, double &x, double &y) { - double r2 = 1. + cam.kdu * (vpMath::sqr((iP.get_u() - cam.u0) * cam.inv_px) + - vpMath::sqr((iP.get_v() - cam.v0) * cam.inv_py)); - x = (iP.get_u() - cam.u0) * r2 * cam.inv_px; - y = (iP.get_v() - cam.v0) * r2 * cam.inv_py; + double r2 = 1. + cam.m_kdu * (vpMath::sqr((iP.get_u() - cam.m_u0) * cam.m_inv_px) + + vpMath::sqr((iP.get_v() - cam.m_v0) * cam.m_inv_py)); + x = (iP.get_u() - cam.m_u0) * r2 * cam.m_inv_px; + y = (iP.get_v() - cam.m_v0) * r2 * cam.m_inv_py; } /*! @@ -268,7 +268,7 @@ class VISP_EXPORT vpPixelMeterConversion inline static void convertPointWithKannalaBrandtDistortion(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y) { - double x_d = (u - cam.u0) / cam.px, y_d = (v - cam.v0) / cam.py; + double x_d = (u - cam.m_u0) / cam.m_px, y_d = (v - cam.m_v0) / cam.m_py; double scale = 1.0; double r_d = sqrt(vpMath::sqr(x_d) + vpMath::sqr(y_d)); @@ -285,10 +285,10 @@ class VISP_EXPORT vpPixelMeterConversion for (int j = 0; j < 10; j++) { double theta2 = theta * theta, theta4 = theta2 * theta2, theta6 = theta4 * theta2, theta8 = theta6 * theta2; double k0_theta2 = k[0] * theta2, k1_theta4 = k[1] * theta4, k2_theta6 = k[2] * theta6, - k3_theta8 = k[3] * theta8; - /* new_theta = theta - theta_fix, theta_fix = f0(theta) / f0'(theta) */ + k3_theta8 = k[3] * theta8; + /* new_theta = theta - theta_fix, theta_fix = f0(theta) / f0'(theta) */ double theta_fix = (theta * (1 + k0_theta2 + k1_theta4 + k2_theta6 + k3_theta8) - r_d) / - (1 + 3 * k0_theta2 + 5 * k1_theta4 + 7 * k2_theta6 + 9 * k3_theta8); + (1 + 3 * k0_theta2 + 5 * k1_theta4 + 7 * k2_theta6 + 9 * k3_theta8); theta = theta - theta_fix; if (fabs(theta_fix) < EPS) break; @@ -323,7 +323,7 @@ class VISP_EXPORT vpPixelMeterConversion inline static void convertPointWithKannalaBrandtDistortion(const vpCameraParameters &cam, const vpImagePoint &iP, double &x, double &y) { - double x_d = (iP.get_u() - cam.u0) / cam.px, y_d = (iP.get_v() - cam.v0) / cam.py; + double x_d = (iP.get_u() - cam.m_u0) / cam.m_px, y_d = (iP.get_v() - cam.m_v0) / cam.m_py; double scale = 1.0; double r_d = sqrt(vpMath::sqr(x_d) + vpMath::sqr(y_d)); @@ -340,10 +340,10 @@ class VISP_EXPORT vpPixelMeterConversion for (int j = 0; j < 10; j++) { double theta2 = theta * theta, theta4 = theta2 * theta2, theta6 = theta4 * theta2, theta8 = theta6 * theta2; double k0_theta2 = k[0] * theta2, k1_theta4 = k[1] * theta4, k2_theta6 = k[2] * theta6, - k3_theta8 = k[3] * theta8; - /* new_theta = theta - theta_fix, theta_fix = f0(theta) / f0'(theta) */ + k3_theta8 = k[3] * theta8; + /* new_theta = theta - theta_fix, theta_fix = f0(theta) / f0'(theta) */ double theta_fix = (theta * (1 + k0_theta2 + k1_theta4 + k2_theta6 + k3_theta8) - r_d) / - (1 + 3 * k0_theta2 + 5 * k1_theta4 + 7 * k2_theta6 + 9 * k3_theta8); + (1 + 3 * k0_theta2 + 5 * k1_theta4 + 7 * k2_theta6 + 9 * k3_theta8); theta = theta - theta_fix; if (fabs(theta_fix) < EPS) break; diff --git a/modules/core/include/visp3/core/vpPoint.h b/modules/core/include/visp3/core/vpPoint.h index 989859132d..9710e1ecc1 100644 --- a/modules/core/include/visp3/core/vpPoint.h +++ b/modules/core/include/visp3/core/vpPoint.h @@ -82,23 +82,21 @@ class VISP_EXPORT vpPoint : public vpForwardProjection vpPoint(double oX, double oY, double oZ); explicit vpPoint(const vpColVector &oP); explicit vpPoint(const std::vector &oP); - //! Destructor. - virtual ~vpPoint() {} public: // Compute the 3D coordinates _cP (camera frame) - void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const; - void changeFrame(const vpHomogeneousMatrix &cMo); + void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const override; + void changeFrame(const vpHomogeneousMatrix &cMo) override; void display(const vpImage &I, const vpCameraParameters &cam, const vpColor &color = vpColor::green, - unsigned int thickness = 1); + unsigned int thickness = 1) override; void display(const vpImage &I, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1); void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &color = vpColor::green, unsigned int thickness = 1); + const vpColor &color = vpColor::green, unsigned int thickness = 1) override; void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1); - vpPoint *duplicate() const; + vpPoint *duplicate() const override; // Get coordinates double get_X() const; @@ -119,17 +117,12 @@ class VISP_EXPORT vpPoint : public vpForwardProjection void getWorldCoordinates(std::vector &oP); friend VISP_EXPORT std::ostream &operator<<(std::ostream &os, const vpPoint &vpp); -#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) - vpPoint &operator=(const vpPoint &vpp) = default; -#else - vpPoint &operator=(const vpPoint &vpp); -#endif //! Projection onto the image plane of a point. Input: the 3D coordinates in //! the camera frame _cP, output : the 2D coordinates _p. - void projection(const vpColVector &_cP, vpColVector &_p) const; + void projection(const vpColVector &_cP, vpColVector &_p) const override; - void projection(); + void projection() override; // Set coordinates void set_X(double cX); @@ -145,12 +138,12 @@ class VISP_EXPORT vpPoint : public vpForwardProjection void set_w(double w); void setWorldCoordinates(double oX, double oY, double oZ); - void setWorldCoordinates(const vpColVector &oP); + void setWorldCoordinates(const vpColVector &oP) override; void setWorldCoordinates(const std::vector &oP); protected: //! Basic construction. - void init(); + void init() override; }; #endif diff --git a/modules/core/include/visp3/core/vpPoseVector.h b/modules/core/include/visp3/core/vpPoseVector.h index e98544a936..fb59bf63f2 100644 --- a/modules/core/include/visp3/core/vpPoseVector.h +++ b/modules/core/include/visp3/core/vpPoseVector.h @@ -201,10 +201,6 @@ class VISP_EXPORT vpPoseVector : public vpArray2D vpPoseVector(const vpTranslationVector &tv, const vpThetaUVector &tu); // constructor convert a translation and a rotation matrix into a pose vpPoseVector(const vpTranslationVector &tv, const vpRotationMatrix &R); - /*! - Destructor. - */ - virtual ~vpPoseVector() { }; vpPoseVector buildFrom(double tx, double ty, double tz, double tux, double tuy, double tuz); // convert an homogeneous matrix in a pose diff --git a/modules/core/include/visp3/core/vpQuaternionVector.h b/modules/core/include/visp3/core/vpQuaternionVector.h index 7b4d177535..93402d38b4 100644 --- a/modules/core/include/visp3/core/vpQuaternionVector.h +++ b/modules/core/include/visp3/core/vpQuaternionVector.h @@ -117,9 +117,6 @@ class VISP_EXPORT vpQuaternionVector : public vpRotationVector explicit vpQuaternionVector(const vpColVector &q); explicit vpQuaternionVector(const std::vector &q); - //! Destructor. - virtual ~vpQuaternionVector(){} - vpQuaternionVector buildFrom(const double qx, const double qy, const double qz, const double qw); vpQuaternionVector buildFrom(const vpRotationMatrix &R); vpQuaternionVector buildFrom(const vpThetaUVector &tu); @@ -127,15 +124,15 @@ class VISP_EXPORT vpQuaternionVector : public vpRotationVector vpQuaternionVector buildFrom(const std::vector &q); void set(double x, double y, double z, double w); - const double& x() const; - const double& y() const; - const double& z() const; - const double& w() const; + const double &x() const; + const double &y() const; + const double &z() const; + const double &w() const; - double& x(); - double& y(); - double& z(); - double& w(); + double &x(); + double &y(); + double &z(); + double &w(); vpQuaternionVector operator+(const vpQuaternionVector &q) const; vpQuaternionVector operator-(const vpQuaternionVector &q) const; @@ -154,11 +151,11 @@ class VISP_EXPORT vpQuaternionVector : public vpRotationVector double magnitude() const; void normalize(); - static double dot(const vpQuaternionVector& q0, const vpQuaternionVector& q1); + static double dot(const vpQuaternionVector &q0, const vpQuaternionVector &q1); - static vpQuaternionVector lerp(const vpQuaternionVector& q0, const vpQuaternionVector& q1, double t); - static vpQuaternionVector nlerp(const vpQuaternionVector& q0, const vpQuaternionVector& q1, double t); - static vpQuaternionVector slerp(const vpQuaternionVector& q0, const vpQuaternionVector& q1, double t); + static vpQuaternionVector lerp(const vpQuaternionVector &q0, const vpQuaternionVector &q1, double t); + static vpQuaternionVector nlerp(const vpQuaternionVector &q0, const vpQuaternionVector &q1, double t); + static vpQuaternionVector slerp(const vpQuaternionVector &q0, const vpQuaternionVector &q1, double t); }; #endif diff --git a/modules/core/include/visp3/core/vpRequest.h b/modules/core/include/visp3/core/vpRequest.h index c2370de1d6..41aa8c521a 100644 --- a/modules/core/include/visp3/core/vpRequest.h +++ b/modules/core/include/visp3/core/vpRequest.h @@ -49,7 +49,7 @@ \brief This the request that will transit on the network - Exemple request decoding an image on a specific form. + Example request decoding an image on a specific form. First parameter : Height of the image. Second parameter : Width of the image. Thirs parameter : Bitmap of the image (not compress). diff --git a/modules/core/include/visp3/core/vpRotationMatrix.h b/modules/core/include/visp3/core/vpRotationMatrix.h index 0862fa8cbb..09c66521ce 100644 --- a/modules/core/include/visp3/core/vpRotationMatrix.h +++ b/modules/core/include/visp3/core/vpRotationMatrix.h @@ -133,11 +133,6 @@ class VISP_EXPORT vpRotationMatrix : public vpArray2D explicit vpRotationMatrix(const std::initializer_list &list); #endif - /*! - Destructor. - */ - virtual ~vpRotationMatrix() { } - vpRotationMatrix buildFrom(const vpHomogeneousMatrix &M); vpRotationMatrix buildFrom(const vpThetaUVector &v); vpRotationMatrix buildFrom(const vpPoseVector &p); diff --git a/modules/core/include/visp3/core/vpRotationVector.h b/modules/core/include/visp3/core/vpRotationVector.h index 3ded305a11..a2a34b7506 100644 --- a/modules/core/include/visp3/core/vpRotationVector.h +++ b/modules/core/include/visp3/core/vpRotationVector.h @@ -96,21 +96,16 @@ class VISP_EXPORT vpRotationVector : public vpArray2D { public: //! Constructor that constructs a 0-size rotation vector. - vpRotationVector() : vpArray2D(), m_index(0) {} + vpRotationVector() : vpArray2D(), m_index(0) { } //! Constructor that constructs a vector of size n and initialize all values //! to zero. - explicit vpRotationVector(unsigned int n) : vpArray2D(n, 1), m_index(0) {} + explicit vpRotationVector(unsigned int n) : vpArray2D(n, 1), m_index(0) { } /*! Copy operator. */ - vpRotationVector(const vpRotationVector &v) : vpArray2D(v), m_index(0) {} - - /*! - Destructor. - */ - virtual ~vpRotationVector(){} + vpRotationVector(const vpRotationVector &v) : vpArray2D(v), m_index(0) { } /** @name Inherited functionalities from vpRotationVector */ //@{ diff --git a/modules/core/include/visp3/core/vpRowVector.h b/modules/core/include/visp3/core/vpRowVector.h index 2e17c4ea18..be20e8f889 100644 --- a/modules/core/include/visp3/core/vpRowVector.h +++ b/modules/core/include/visp3/core/vpRowVector.h @@ -111,15 +111,15 @@ class VISP_EXPORT vpRowVector : public vpArray2D { public: //! Basic constructor that creates an empty 0-size row vector. - vpRowVector() : vpArray2D() {} + vpRowVector() : vpArray2D() { } //! Construct a row vector of size n. All the elements are initialized to //! zero. - explicit vpRowVector(unsigned int n) : vpArray2D(1, n) {} + explicit vpRowVector(unsigned int n) : vpArray2D(1, n) { } //! Construct a row vector of size n. Each element is set to \e val. - vpRowVector(unsigned int n, double val) : vpArray2D(1, n, val) {} + vpRowVector(unsigned int n, double val) : vpArray2D(1, n, val) { } //! Copy constructor that allows to construct a row vector from an other //! one. - vpRowVector(const vpRowVector &v) : vpArray2D(v) {} + vpRowVector(const vpRowVector &v) : vpArray2D(v) { } vpRowVector(const vpRowVector &v, unsigned int c, unsigned int ncols); vpRowVector(const vpMatrix &M); vpRowVector(const vpMatrix &M, unsigned int i); @@ -127,12 +127,8 @@ class VISP_EXPORT vpRowVector : public vpArray2D vpRowVector(const std::vector &v); #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11) vpRowVector(vpRowVector &&v); - vpRowVector(const std::initializer_list &list) : vpArray2D(list) {} + vpRowVector(const std::initializer_list &list) : vpArray2D(list) { } #endif - /*! - Destructor. - */ - virtual ~vpRowVector() {} /*! Removes all elements from the vector (which are destroyed), @@ -170,7 +166,8 @@ class VISP_EXPORT vpRowVector : public vpArray2D /*! Extract a sub-row vector from a row vector. \param c : Index of the column corresponding to the first element of the - vector to extract. \param rowsize : Size of the vector to extract. + vector to extract. + \param rowsize : Size of the vector to extract. \exception vpException::fatalError If the vector to extract is not contained in the original one. @@ -310,7 +307,7 @@ class VISP_EXPORT vpRowVector : public vpArray2D \deprecated Provided only for compat with previous releases. This function does nothing. */ - vp_deprecated void init() {} + vp_deprecated void init() { } /*! \deprecated You should rather use stack(const vpRowVector &) */ diff --git a/modules/core/include/visp3/core/vpRxyzVector.h b/modules/core/include/visp3/core/vpRxyzVector.h index da541877ff..aa5d3c086e 100644 --- a/modules/core/include/visp3/core/vpRxyzVector.h +++ b/modules/core/include/visp3/core/vpRxyzVector.h @@ -189,9 +189,6 @@ class VISP_EXPORT vpRxyzVector : public vpRotationVector explicit vpRxyzVector(const vpColVector &rxyz); explicit vpRxyzVector(const std::vector &rxyz); - //! Destructor. - virtual ~vpRxyzVector(){} - // convert a rotation matrix into Rxyz vector vpRxyzVector buildFrom(const vpRotationMatrix &R); diff --git a/modules/core/include/visp3/core/vpRzyxVector.h b/modules/core/include/visp3/core/vpRzyxVector.h index 0782f10c3e..da55f25b90 100644 --- a/modules/core/include/visp3/core/vpRzyxVector.h +++ b/modules/core/include/visp3/core/vpRzyxVector.h @@ -175,7 +175,6 @@ int main() \endcode */ - class VISP_EXPORT vpRzyxVector : public vpRotationVector { public: @@ -191,9 +190,6 @@ class VISP_EXPORT vpRzyxVector : public vpRotationVector explicit vpRzyxVector(const vpColVector &rzyx); explicit vpRzyxVector(const std::vector &rzyx); - //! Destructor. - virtual ~vpRzyxVector(){} - // convert a rotation matrix into Rzyx vector vpRzyxVector buildFrom(const vpRotationMatrix &R); diff --git a/modules/core/include/visp3/core/vpRzyzVector.h b/modules/core/include/visp3/core/vpRzyzVector.h index 897522f861..bfc5c3ae09 100644 --- a/modules/core/include/visp3/core/vpRzyzVector.h +++ b/modules/core/include/visp3/core/vpRzyzVector.h @@ -189,9 +189,6 @@ class VISP_EXPORT vpRzyzVector : public vpRotationVector explicit vpRzyzVector(const vpColVector &rzyz); explicit vpRzyzVector(const std::vector &rzyz); - //! Destructor. - virtual ~vpRzyzVector(){} - // convert a rotation matrix into Rzyz vector vpRzyzVector buildFrom(const vpRotationMatrix &R); diff --git a/modules/core/include/visp3/core/vpServer.h b/modules/core/include/visp3/core/vpServer.h index d5eac7597c..83054b6696 100644 --- a/modules/core/include/visp3/core/vpServer.h +++ b/modules/core/include/visp3/core/vpServer.h @@ -52,8 +52,8 @@ TCP provides reliable, ordered delivery of a stream of bytes from a program on one computer to another program on another computer. - Exemple of server's code, receiving and sending basic message. - It corresponds to the client used in the first exemple of vpClient class' + Example of server's code, receiving and sending basic message. + It corresponds to the client used in the first example of vpClient class' documentation. \code @@ -93,8 +93,8 @@ int main(int argc,const char** argv) } \endcode - Exemple of server's code, receiving a vpImage on request form. - It correspond to the client used in the second exemple of vpClient class' + Example of server's code, receiving a vpImage on request form. + It correspond to the client used in the second example of vpClient class' documentation. \code @@ -174,7 +174,7 @@ class VISP_EXPORT vpServer : public vpNetwork explicit vpServer(const int &port); vpServer(const std::string &adress_serv, const int &port_serv); - virtual ~vpServer(); + virtual ~vpServer() override; bool checkForConnections(); diff --git a/modules/core/include/visp3/core/vpSphere.h b/modules/core/include/visp3/core/vpSphere.h index 4c818f7c5e..1992d8695c 100644 --- a/modules/core/include/visp3/core/vpSphere.h +++ b/modules/core/include/visp3/core/vpSphere.h @@ -32,9 +32,9 @@ */ /*! - \file vpSphere.h - \brief forward projection of a sphere -*/ + * \file vpSphere.h + * \brief forward projection of a sphere + */ #ifndef vpSphere_hh #define vpSphere_hh @@ -46,55 +46,54 @@ #include #include /*! - \class vpSphere - \ingroup group_core_geometry - \brief Class that defines a 3D sphere in the object frame and allows forward projection of a 3D sphere in the - camera frame and in the 2D image plane by perspective projection. - All the parameters must be set in meter. - - A sphere has the followings parameters: - - **in the object frame**: the 3D coordinates oX, oY, oZ of the center and radius R. These - parameters registered in vpForwardProjection::oP internal 4-dim vector are set using the constructors vpSphere(double - oX, double oY, double oZ, double R), vpSphere(const vpColVector &oP) or the functions setWorldCoordinates(double oX, - double oY, double oZ, double R) and setWorldCoordinates(const vpColVector &oP). To get theses parameters use get_oP(). - - - **in the camera frame**: the coordinates cX, cY, cZ of the center and radius R. These - parameters registered in vpTracker::cP internal 4-dim vector are computed using - changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const or changeFrame(const vpHomogeneousMatrix &cMo). - These parameters could be retrieved using getX(), getY(), getZ() and getR(). - To get theses parameters use get_cP(). - - - **in the image plane**: here we consider the parameters of the ellipse corresponding - to the perspective projection of the 3D sphere. The parameters are the ellipse centroid (x, y) - and n20, n11, n02 which are the second order centered moments of - the ellipse normalized by its area (i.e., such that \f$n_{ij} = \mu_{ij}/a\f$ where - \f$\mu_{ij}\f$ are the centered moments and a the area). - These parameters are registered in vpTracker::p internal 5-dim vector and computed using projection() and - projection(const vpColVector &cP, vpColVector &p) const. They could be retrieved using get_x(), get_y(), get_n20(), - get_n11() and get_n02(). They correspond to 2D normalized sphere parameters with values expressed in meters. - To get theses parameters use get_p(). -*/ + * \class vpSphere + * \ingroup group_core_geometry + * \brief Class that defines a 3D sphere in the object frame and allows forward projection of a 3D sphere in the + * camera frame and in the 2D image plane by perspective projection. + * All the parameters must be set in meter. + * + * A sphere has the followings parameters: + * - **in the object frame**: the 3D coordinates oX, oY, oZ of the center and radius R. These + *. parameters registered in vpForwardProjection::oP internal 4-dim vector are set using the constructors vpSphere(double + *. oX, double oY, double oZ, double R), vpSphere(const vpColVector &oP) or the functions setWorldCoordinates(double oX, + *. double oY, double oZ, double R) and setWorldCoordinates(const vpColVector &oP). To get theses parameters use get_oP(). + * + * - **in the camera frame**: the coordinates cX, cY, cZ of the center and radius R. These + *. parameters registered in vpTracker::cP internal 4-dim vector are computed using + *. changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const or changeFrame(const vpHomogeneousMatrix &cMo). + *. These parameters could be retrieved using getX(), getY(), getZ() and getR(). + *. To get theses parameters use get_cP(). + * + * - **in the image plane**: here we consider the parameters of the ellipse corresponding + *. to the perspective projection of the 3D sphere. The parameters are the ellipse centroid (x, y) + *. and n20, n11, n02 which are the second order centered moments of + *. the ellipse normalized by its area (i.e., such that \f$n_{ij} = \mu_{ij}/a\f$ where + *. \f$\mu_{ij}\f$ are the centered moments and a the area). + *. These parameters are registered in vpTracker::p internal 5-dim vector and computed using projection() and + *. projection(const vpColVector &cP, vpColVector &p) const. They could be retrieved using get_x(), get_y(), get_n20(), + *. get_n11() and get_n02(). They correspond to 2D normalized sphere parameters with values expressed in meters. + *. To get theses parameters use get_p(). + */ class VISP_EXPORT vpSphere : public vpForwardProjection { public: vpSphere(); explicit vpSphere(const vpColVector &oP); vpSphere(double oX, double oY, double oZ, double R); - virtual ~vpSphere(); - void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const; - void changeFrame(const vpHomogeneousMatrix &cMo); + void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &cP) const override; + void changeFrame(const vpHomogeneousMatrix &cMo) override; void display(const vpImage &I, const vpCameraParameters &cam, const vpColor &color = vpColor::green, - unsigned int thickness = 1); + unsigned int thickness = 1) override; void display(const vpImage &I, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1); void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &color = vpColor::green, unsigned int thickness = 1); + const vpColor &color = vpColor::green, unsigned int thickness = 1) override; void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &color = vpColor::green, unsigned int thickness = 1); - vpSphere *duplicate() const; + vpSphere *duplicate() const override; double get_x() const { return p[0]; } double get_y() const { return p[1]; } @@ -108,20 +107,20 @@ class VISP_EXPORT vpSphere : public vpForwardProjection double getZ() const { return cP[2]; } double getR() const { return cP[3]; } - void projection(); - void projection(const vpColVector &cP, vpColVector &p) const; + void projection() override; + void projection(const vpColVector &cP, vpColVector &p) const override; - void setWorldCoordinates(const vpColVector &oP); + void setWorldCoordinates(const vpColVector &oP) override; void setWorldCoordinates(double oX, double oY, double oZ, double R); protected: - void init(); + void init() override; public: #if defined(VISP_BUILD_DEPRECATED_FUNCTIONS) /*! - @name Deprecated functions - */ + * @name Deprecated functions + */ //@{ /*! * \deprecated You should rather use get_n20(). diff --git a/modules/core/include/visp3/core/vpSubColVector.h b/modules/core/include/visp3/core/vpSubColVector.h index 58f9ec9d79..99fa0579b2 100644 --- a/modules/core/include/visp3/core/vpSubColVector.h +++ b/modules/core/include/visp3/core/vpSubColVector.h @@ -37,40 +37,37 @@ #include /*! - \file vpSubColVector.h - - \brief Definition of the vpSubColVector class -*/ + * \file vpSubColVector.h + * + * \brief Definition of the vpSubColVector class + */ /*! - \class vpSubColVector - \ingroup group_core_matrices - This class provides a mask on a vpColVector. It has internally a - pointer to the parent vpColVector. - All properties of vpColVector are available with - a vpSubColVector. - - \author Jean Laneurit (IRISA - INRIA Rennes) - - \sa vpMatrix vpColvector vpRowVector -*/ + * \class vpSubColVector + * \ingroup group_core_matrices + * This class provides a mask on a vpColVector. It has internally a + * pointer to the parent vpColVector. + * All properties of vpColVector are available with + * a vpSubColVector. + * + * \sa vpMatrix vpColVector vpRowVector + */ class VISP_EXPORT vpSubColVector : public vpColVector { - private: - //! Copy constructor unavaible + //! Copy constructor unavailable vpSubColVector(const vpSubColVector & /* m */); protected: - //! Number of row of parent vpColvector at initialization - unsigned int pRowNum; - //! Parent vpColvector - vpColVector *parent; + //! Number of row of parent vpColVector at initialization + unsigned int m_pRowNum; + //! Parent vpColVector + vpColVector *m_parent; public: vpSubColVector(); vpSubColVector(vpColVector &v, const unsigned int &offset, const unsigned int &nrows); - virtual ~vpSubColVector(); + virtual ~vpSubColVector() override; void checkParentStatus() const; diff --git a/modules/core/include/visp3/core/vpSubMatrix.h b/modules/core/include/visp3/core/vpSubMatrix.h index 8cabf02d03..989cf9bf71 100644 --- a/modules/core/include/visp3/core/vpSubMatrix.h +++ b/modules/core/include/visp3/core/vpSubMatrix.h @@ -52,7 +52,7 @@ \author Jean Laneurit (IRISA - INRIA Rennes) - \sa vpMatrix vpColvector vpRowVector + \sa vpMatrix vpColVector vpRowVector */ class VISP_EXPORT vpSubMatrix : public vpMatrix { @@ -77,7 +77,7 @@ class VISP_EXPORT vpSubMatrix : public vpMatrix vpSubMatrix(vpMatrix &m, const unsigned int &row, const unsigned int &col, const unsigned int &nrows, const unsigned int &ncols); //! Destructor - virtual ~vpSubMatrix(); + virtual ~vpSubMatrix() override; //! Initialisation of vpMatrix void init(vpMatrix &m, const unsigned int &row, const unsigned int &col, const unsigned int &nrows, diff --git a/modules/core/include/visp3/core/vpSubRowVector.h b/modules/core/include/visp3/core/vpSubRowVector.h index 21660d7cf5..d5fc68e2b2 100644 --- a/modules/core/include/visp3/core/vpSubRowVector.h +++ b/modules/core/include/visp3/core/vpSubRowVector.h @@ -37,41 +37,37 @@ #include /*! - \file vpSubRowVector.h - - \brief Definition of the vpSubRowVector class -*/ + * \file vpSubRowVector.h + * + * \brief Definition of the vpSubRowVector class + */ /*! - \class vpSubRowVector - \ingroup group_core_matrices - This class provides a mask on a vpRowVector. It has internally a - pointer to the parent vpRowVector. - All properties of vpRowVector are available with - a vpSubRowVector. - - \author Jean Laneurit (IRISA - INRIA Rennes) - - \sa vpMatrix vpColvector vpRowVector -*/ - + * \class vpSubRowVector + * \ingroup group_core_matrices + * This class provides a mask on a vpRowVector. It has internally a + * pointer to the parent vpRowVector. + * All properties of vpRowVector are available with + * a vpSubRowVector. + * + * \sa vpMatrix vpColVector vpRowVector + */ class VISP_EXPORT vpSubRowVector : public vpRowVector { - private: - //! Copy constructor unavaible + //! Copy constructor unavailable vpSubRowVector(const vpSubRowVector & /* m */); protected: - //! Number of row of parent vpColvector at initialization - unsigned int pColNum; - //! Parent vpColvector - vpRowVector *parent; + //! Number of row of parent vpColVector at initialization + unsigned int m_pColNum; + //! Parent vpColVector + vpRowVector *m_parent; public: vpSubRowVector(); vpSubRowVector(vpRowVector &v, const unsigned int &offset, const unsigned int &ncols); - virtual ~vpSubRowVector(); + virtual ~vpSubRowVector() override; void checkParentStatus() const; diff --git a/modules/core/include/visp3/core/vpThetaUVector.h b/modules/core/include/visp3/core/vpThetaUVector.h index b940a6d4fa..fb442147ba 100644 --- a/modules/core/include/visp3/core/vpThetaUVector.h +++ b/modules/core/include/visp3/core/vpThetaUVector.h @@ -190,8 +190,6 @@ class VISP_EXPORT vpThetaUVector : public vpRotationVector explicit vpThetaUVector(const std::vector &tu); vpThetaUVector(double tux, double tuy, double tuz); - //! Destructor. - virtual ~vpThetaUVector(){} // convert an homogeneous matrix into Theta U vector vpThetaUVector buildFrom(const vpHomogeneousMatrix &M); diff --git a/modules/core/include/visp3/core/vpTracker.h b/modules/core/include/visp3/core/vpTracker.h index 25662d0f22..0d0d6bd860 100644 --- a/modules/core/include/visp3/core/vpTracker.h +++ b/modules/core/include/visp3/core/vpTracker.h @@ -35,46 +35,45 @@ #define vpTracker_H /*! - \file vpTracker.h - \brief Class that defines what is a generic tracker. -*/ + * \file vpTracker.h + * \brief Class that defines what is a generic tracker. + */ #include #include #include /*! - \class vpTracker - \ingroup group_core_trackers - \brief Class that defines what is a feature generic tracker. - - A tracker is able to track features with parameters expressed in: - - in the camera frame \e cP. These parameters are located in the public - attribute vpTracker::cP. - - in the image plane \e p. These parameters are located in the public - attribute vpTracker::p. They correspond to normalized coordinates - of the feature expressed in meters. - -*/ + * \class vpTracker + * \ingroup group_core_trackers + * \brief Class that defines what is a feature generic tracker. + * + * A tracker is able to track features with parameters expressed in: + * - in the camera frame \e cP. These parameters are located in the public + * attribute vpTracker::cP. + * - in the image plane \e p. These parameters are located in the public + * attribute vpTracker::p. They correspond to normalized coordinates + * of the feature expressed in meters. + */ class VISP_EXPORT vpTracker { public: /** @name Public Attributes Inherited from vpTracker */ //@{ /*! - Feature coordinates expressed in the image plane \e p. They correspond - to 2D normalized coordinates expressed in meters. - */ + * Feature coordinates expressed in the image plane \e p. They correspond + * to 2D normalized coordinates expressed in meters. + */ vpColVector p; /*! - Feature coordinates expressed in the camera frame \e cP. - */ + * Feature coordinates expressed in the camera frame \e cP. + */ vpColVector cP; /*! - Flag used to indicate if the feature parameters \e cP expressed - in the camera frame are available. - */ + * Flag used to indicate if the feature parameters \e cP expressed + * in the camera frame are available. + */ bool cPAvailable; //@} diff --git a/modules/core/include/visp3/core/vpTranslationVector.h b/modules/core/include/visp3/core/vpTranslationVector.h index a48d24b509..d8576c2ab0 100644 --- a/modules/core/include/visp3/core/vpTranslationVector.h +++ b/modules/core/include/visp3/core/vpTranslationVector.h @@ -117,7 +117,7 @@ class VISP_EXPORT vpTranslationVector : public vpArray2D Default constructor. The translation vector is initialized to zero. */ - vpTranslationVector() : vpArray2D(3, 1), m_index(0) { }; + vpTranslationVector() : vpArray2D(3, 1), m_index(0) { } vpTranslationVector(double tx, double ty, double tz); vpTranslationVector(const vpTranslationVector &tv); explicit vpTranslationVector(const vpHomogeneousMatrix &M); @@ -173,7 +173,7 @@ class VISP_EXPORT vpTranslationVector : public vpArray2D (void)ncols; (void)flagNullify; throw(vpException(vpException::fatalError, "Cannot resize a translation vector")); - }; + } void set(double tx, double ty, double tz); diff --git a/modules/core/include/visp3/core/vpUDPClient.h b/modules/core/include/visp3/core/vpUDPClient.h index f48b31a7ef..75d36acd44 100644 --- a/modules/core/include/visp3/core/vpUDPClient.h +++ b/modules/core/include/visp3/core/vpUDPClient.h @@ -52,117 +52,115 @@ #define VP_MAX_UDP_PAYLOAD 508 /*! - \class vpUDPClient - - \ingroup group_core_com_ethernet - - \brief This class implements a basic (IPv4) User Datagram Protocol (UDP) -client. - - More information here, - here - or here: -
- This User Datagram Protocol (UDP) is defined to make available a - datagram mode of packet-switched computer communication in the - environment of an interconnected set of computer networks. This - protocol assumes that the Internet Protocol (IP) [1] is used as the - underlying protocol. - - This protocol provides a procedure for application programs to send - messages to other programs with a minimum of protocol mechanism. The - protocol is transaction oriented, and delivery and duplicate protection - are not guaranteed. Applications requiring ordered reliable delivery of - streams of data should use the Transmission Control Protocol (TCP) [2]. -
- - Example of a client's code, sending a basic message and receiving the - server answer: - - \code -#include -#include -#include - -int main() { - try { - std::string servername = "127.0.0.1"; - unsigned int port = 50037; - vpUDPClient client(servername, port); - - while (true) { - std::cout << "Enter the message to send:" << std::endl; - std::string msg = ""; - std::getline(std::cin, msg); - if (client.send(msg) != (int) msg.size()) - std::cerr << "Error client.send()!" << std::endl; - if (client.receive(msg)) - std::cout << "Receive from the server: " << msg << std::endl; - } - return EXIT_SUCCESS; - } catch (const vpException &e) { - std::cerr << "Catch an exception: " << e.what() << std::endl; - return EXIT_FAILURE; - } -} - \endcode - - If you want to send a complex data type, you can either send the ASCII - representation or send directly the byte data. In the last case, you should - have to handle that both the server and the client have the same data type - representation. Be careful also with the endianness of the network / host. - - Here an example using a structure of data, assuming that both the server and - the client have the same architecture (probably you should write your own - serialization / deserialization functions for the data you want to send / - receive): - - \code -#include -#include -#include -#include - -struct vpDataType_t { - double double_val; - int int_val; - vpDataType_t() : double_val(0.0), int_val(0) {} - vpDataType_t(double dbl, int i) : double_val(dbl), int_val(i) {} -}; - -int main() { - try { - std::string servername = "127.0.0.1"; - unsigned int port = 50037; - vpUDPClient client(servername, port); - vpDataType_t data_type(1234.56789, 123450); - char data[sizeof(data_type.double_val)+sizeof(data_type.int_val)]; - - memcpy(data, &data_type.double_val, sizeof(data_type.double_val)); - memcpy(data+sizeof(data_type.double_val), &data_type.int_val, sizeof(data_type.int_val)); - - std::string msg(data, sizeof(data_type.double_val)+sizeof(data_type.int_val)); - if (client.send(msg) != (int) sizeof(data_type.double_val)+sizeof(data_type.int_val)) - std::cerr << "Error client.send()!" << std::endl; - if (client.receive(msg)) { - data_type.double_val = *reinterpret_cast(msg.c_str()); - data_type.int_val - = *reinterpret_cast(msg.c_str()+sizeof(data_type.double_val)); - std::cout << "Receive from the server double_val: " << data_type.double_val - << " ; int_val: " << data_type.int_val << std::endl; - } - return EXIT_SUCCESS; - } catch (const vpException &e) { - std::cerr << "Catch an exception: " << e.what() << std::endl; - return EXIT_FAILURE; - } -} - \endcode - - \sa vpUDPServer -*/ + * \class vpUDPClient + * + * \ingroup group_core_com_ethernet + * + * \brief This class implements a basic (IPv4) User Datagram Protocol (UDP) client. + * + * More information here, + * here + * or + * here: + *
+ * This User Datagram Protocol (UDP) is defined to make available a + * datagram mode of packet-switched computer communication in the + * environment of an interconnected set of computer networks. This + * protocol assumes that the Internet Protocol (IP) [1] is used as the + * underlying protocol. + * + * This protocol provides a procedure for application programs to send + * messages to other programs with a minimum of protocol mechanism. The + * protocol is transaction oriented, and delivery and duplicate protection + * are not guaranteed. Applications requiring ordered reliable delivery of + * streams of data should use the Transmission Control Protocol (TCP) [2]. + *
+ * + * Example of a client's code, sending a basic message and receiving the + * server answer: + * + * \code + * #include + * #include + * #include + * + * int main() { + * try { + * std::string servername = "127.0.0.1"; + * unsigned int port = 50037; + * vpUDPClient client(servername, port); + * + * while (true) { + * std::cout << "Enter the message to send:" << std::endl; + * std::string msg = ""; + * std::getline(std::cin, msg); + * if (client.send(msg) != (int) msg.size()) + * std::cerr << "Error client.send()!" << std::endl; + * if (client.receive(msg)) + * std::cout << "Receive from the server: " << msg << std::endl; + * } + * return EXIT_SUCCESS; + * } catch (const vpException &e) { + * std::cerr << "Catch an exception: " << e.what() << std::endl; + * return EXIT_FAILURE; + * } + * } + * \endcode + * + * If you want to send a complex data type, you can either send the ASCII + * representation or send directly the byte data. In the last case, you should + * have to handle that both the server and the client have the same data type + * representation. Be careful also with the endianness of the network / host. + * + * Here an example using a structure of data, assuming that both the server and + * the client have the same architecture (probably you should write your own + * serialization / deserialization functions for the data you want to send / + * receive): + * + * \code + * #include + * #include + * #include + * #include + * + * struct vpDataType_t { + * double double_val; + * int int_val; + * vpDataType_t() : double_val(0.0), int_val(0) {} + * vpDataType_t(double dbl, int i) : double_val(dbl), int_val(i) {} + * }; + * + * int main() { + * try { + * std::string servername = "127.0.0.1"; + * unsigned int port = 50037; + * vpUDPClient client(servername, port); + * vpDataType_t data_type(1234.56789, 123450); + * char data[sizeof(data_type.double_val)+sizeof(data_type.int_val)]; + * + * memcpy(data, &data_type.double_val, sizeof(data_type.double_val)); + * memcpy(data+sizeof(data_type.double_val), &data_type.int_val, sizeof(data_type.int_val)); + * + * std::string msg(data, sizeof(data_type.double_val)+sizeof(data_type.int_val)); + * if (client.send(msg) != (int) sizeof(data_type.double_val)+sizeof(data_type.int_val)) + * std::cerr << "Error client.send()!" << std::endl; + * if (client.receive(msg)) { + * data_type.double_val = *reinterpret_cast(msg.c_str()); + * data_type.int_val + * = *reinterpret_cast(msg.c_str()+sizeof(data_type.double_val)); + * std::cout << "Receive from the server double_val: " << data_type.double_val + * << " ; int_val: " << data_type.int_val << std::endl; + * } + * return EXIT_SUCCESS; + * } catch (const vpException &e) { + * std::cerr << "Catch an exception: " << e.what() << std::endl; + * return EXIT_FAILURE; + * } + * } + * \endcode + * + * \sa vpUDPServer + */ class VISP_EXPORT vpUDPClient { public: diff --git a/modules/core/include/visp3/core/vpVelocityTwistMatrix.h b/modules/core/include/visp3/core/vpVelocityTwistMatrix.h index 55530abdd6..d8a3ccf0ab 100644 --- a/modules/core/include/visp3/core/vpVelocityTwistMatrix.h +++ b/modules/core/include/visp3/core/vpVelocityTwistMatrix.h @@ -178,11 +178,6 @@ class VISP_EXPORT vpVelocityTwistMatrix : public vpArray2D vpVelocityTwistMatrix(const vpRotationMatrix &R); vpVelocityTwistMatrix(const vpThetaUVector &thetau); - /*! - Destructor. - */ - virtual ~vpVelocityTwistMatrix(){}; - vpVelocityTwistMatrix buildFrom(const vpTranslationVector &t, const vpRotationMatrix &R); vpVelocityTwistMatrix buildFrom(const vpTranslationVector &t, const vpThetaUVector &thetau); vpVelocityTwistMatrix buildFrom(const vpHomogeneousMatrix &M, bool full = true); @@ -217,7 +212,7 @@ class VISP_EXPORT vpVelocityTwistMatrix : public vpArray2D (void)ncols; (void)flagNullify; throw(vpException(vpException::fatalError, "Cannot resize a velocity twist matrix")); - }; + } #if defined(VISP_BUILD_DEPRECATED_FUNCTIONS) /*! @@ -228,7 +223,7 @@ class VISP_EXPORT vpVelocityTwistMatrix : public vpArray2D \deprecated Provided only for compat with previous releases. This function does nothing. */ - vp_deprecated void init(){}; + vp_deprecated void init() { } /*! \deprecated You should rather use eye(). */ diff --git a/modules/core/src/camera/vpCameraParameters.cpp b/modules/core/src/camera/vpCameraParameters.cpp index 1ef31179eb..8993f054d2 100644 --- a/modules/core/src/camera/vpCameraParameters.cpp +++ b/modules/core/src/camera/vpCameraParameters.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,17 +29,12 @@ * * Description: * Camera intrinsic parameters. - * - * Authors: - * Anthony Saunier - * -*****************************************************************************/ + */ /*! \file vpCameraParameters.cpp \brief Definition of the vpCameraParameters class member functions. Class vpCameraParameters define the camera intrinsic parameters - */ #include @@ -60,407 +54,418 @@ const double vpCameraParameters::DEFAULT_V0_PARAMETER = 144.0; const double vpCameraParameters::DEFAULT_KUD_PARAMETER = 0.0; const double vpCameraParameters::DEFAULT_KDU_PARAMETER = 0.0; const vpCameraParameters::vpCameraParametersProjType vpCameraParameters::DEFAULT_PROJ_TYPE = - vpCameraParameters::perspectiveProjWithoutDistortion; +vpCameraParameters::perspectiveProjWithoutDistortion; /*! - Default constructor. - By default, a perspective projection without distortion model is set. - - \sa init() -*/ + * Default constructor. + * By default, a perspective projection without distortion model is set. + * + * \sa init() + */ vpCameraParameters::vpCameraParameters() - : px(DEFAULT_PX_PARAMETER), py(DEFAULT_PY_PARAMETER), u0(DEFAULT_U0_PARAMETER), v0(DEFAULT_V0_PARAMETER), - kud(DEFAULT_KUD_PARAMETER), kdu(DEFAULT_KDU_PARAMETER), m_dist_coefs(), width(0), height(0), isFov(false), - m_hFovAngle(0), m_vFovAngle(0), fovNormals(), inv_px(1. / DEFAULT_PX_PARAMETER), inv_py(1. / DEFAULT_PY_PARAMETER), - projModel(DEFAULT_PROJ_TYPE) + : m_px(DEFAULT_PX_PARAMETER), m_py(DEFAULT_PY_PARAMETER), m_u0(DEFAULT_U0_PARAMETER), m_v0(DEFAULT_V0_PARAMETER), + m_kud(DEFAULT_KUD_PARAMETER), m_kdu(DEFAULT_KDU_PARAMETER), m_dist_coefs(), m_width(0), m_height(0), m_isFov(false), + m_hFovAngle(0), m_vFovAngle(0), m_fovNormals(), m_inv_px(1. / DEFAULT_PX_PARAMETER), m_inv_py(1. / DEFAULT_PY_PARAMETER), + m_projModel(DEFAULT_PROJ_TYPE) { init(); } /*! - Copy constructor + * Copy constructor */ vpCameraParameters::vpCameraParameters(const vpCameraParameters &c) - : px(DEFAULT_PX_PARAMETER), py(DEFAULT_PY_PARAMETER), u0(DEFAULT_U0_PARAMETER), v0(DEFAULT_V0_PARAMETER), - kud(DEFAULT_KUD_PARAMETER), kdu(DEFAULT_KDU_PARAMETER), m_dist_coefs(), width(0), height(0), isFov(false), - m_hFovAngle(0), m_vFovAngle(0), fovNormals(), inv_px(1. / DEFAULT_PX_PARAMETER), inv_py(1. / DEFAULT_PY_PARAMETER), - projModel(DEFAULT_PROJ_TYPE) + : m_px(DEFAULT_PX_PARAMETER), m_py(DEFAULT_PY_PARAMETER), m_u0(DEFAULT_U0_PARAMETER), m_v0(DEFAULT_V0_PARAMETER), + m_kud(DEFAULT_KUD_PARAMETER), m_kdu(DEFAULT_KDU_PARAMETER), m_dist_coefs(), m_width(0), m_height(0), m_isFov(false), + m_hFovAngle(0), m_vFovAngle(0), m_fovNormals(), m_inv_px(1. / DEFAULT_PX_PARAMETER), m_inv_py(1. / DEFAULT_PY_PARAMETER), + m_projModel(DEFAULT_PROJ_TYPE) { init(c); } /*! - Constructor for perspective projection without distortion model - - \param cam_px,cam_py : pixel size - \param cam_u0,cam_v0 : principal points - + * Constructor for perspective projection without distortion model + * + * \param cam_px : Pixel size along x axis (horizontal). + * \param cam_py : Pixel size along y axis (vertical) + * \param cam_u0 : Principal point coordinate in pixel along x. + * \param cam_v0 : Principal point coordinate in pixel along y. */ vpCameraParameters::vpCameraParameters(double cam_px, double cam_py, double cam_u0, double cam_v0) - : px(DEFAULT_PX_PARAMETER), py(DEFAULT_PY_PARAMETER), u0(DEFAULT_U0_PARAMETER), v0(DEFAULT_V0_PARAMETER), - kud(DEFAULT_KUD_PARAMETER), kdu(DEFAULT_KDU_PARAMETER), m_dist_coefs(), width(0), height(0), isFov(false), - m_hFovAngle(0), m_vFovAngle(0), fovNormals(), inv_px(1. / DEFAULT_PX_PARAMETER), inv_py(1. / DEFAULT_PY_PARAMETER), - projModel(DEFAULT_PROJ_TYPE) + : m_px(DEFAULT_PX_PARAMETER), m_py(DEFAULT_PY_PARAMETER), m_u0(DEFAULT_U0_PARAMETER), m_v0(DEFAULT_V0_PARAMETER), + m_kud(DEFAULT_KUD_PARAMETER), m_kdu(DEFAULT_KDU_PARAMETER), m_dist_coefs(), m_width(0), m_height(0), m_isFov(false), + m_hFovAngle(0), m_vFovAngle(0), m_fovNormals(), m_inv_px(1. / DEFAULT_PX_PARAMETER), m_inv_py(1. / DEFAULT_PY_PARAMETER), + m_projModel(DEFAULT_PROJ_TYPE) { initPersProjWithoutDistortion(cam_px, cam_py, cam_u0, cam_v0); } /*! - Constructor for perspective projection with distortion model - - \param cam_px,cam_py : pixel size - \param cam_u0,cam_v0 : principal points - \param cam_kud : undistorted to distorted radial distortion - \param cam_kdu : distorted to undistorted radial distortion - + * Constructor for perspective projection with distortion model + * + * \param cam_px : Pixel size along x axis (horizontal). + * \param cam_py : Pixel size along y axis (vertical) + * \param cam_u0 : Principal point coordinate in pixel along x. + * \param cam_v0 : Principal point coordinate in pixel along y. + * \param cam_kud : Undistorted to distorted radial distortion. + * \param cam_kdu : Distorted to undistorted radial distortion. */ vpCameraParameters::vpCameraParameters(double cam_px, double cam_py, double cam_u0, double cam_v0, double cam_kud, double cam_kdu) - : px(DEFAULT_PX_PARAMETER), py(DEFAULT_PY_PARAMETER), u0(DEFAULT_U0_PARAMETER), v0(DEFAULT_V0_PARAMETER), - kud(DEFAULT_KUD_PARAMETER), kdu(DEFAULT_KDU_PARAMETER), m_dist_coefs(), width(0), height(0), isFov(false), - m_hFovAngle(0), m_vFovAngle(0), fovNormals(), inv_px(1. / DEFAULT_PX_PARAMETER), inv_py(1. / DEFAULT_PY_PARAMETER), - projModel(DEFAULT_PROJ_TYPE) + : m_px(DEFAULT_PX_PARAMETER), m_py(DEFAULT_PY_PARAMETER), m_u0(DEFAULT_U0_PARAMETER), m_v0(DEFAULT_V0_PARAMETER), + m_kud(DEFAULT_KUD_PARAMETER), m_kdu(DEFAULT_KDU_PARAMETER), m_dist_coefs(), m_width(0), m_height(0), m_isFov(false), + m_hFovAngle(0), m_vFovAngle(0), m_fovNormals(), m_inv_px(1. / DEFAULT_PX_PARAMETER), m_inv_py(1. / DEFAULT_PY_PARAMETER), + m_projModel(DEFAULT_PROJ_TYPE) { initPersProjWithDistortion(cam_px, cam_py, cam_u0, cam_v0, cam_kud, cam_kdu); } /*! - Constructor for projection with Kannala-Brandt distortion model - - \param cam_px,cam_py : pixel size - \param cam_u0,cam_v0 : principal points - \param coefficients : distortion model coefficients - + * Constructor for projection with Kannala-Brandt distortion model + * + * \param cam_px : Pixel size along x axis (horizontal). + * \param cam_py : Pixel size along y axis (vertical) + * \param cam_u0 : Principal point coordinate in pixel along x. + * \param cam_v0 : Principal point coordinate in pixel along y. + * \param coefficients : distortion model coefficients */ vpCameraParameters::vpCameraParameters(double cam_px, double cam_py, double cam_u0, double cam_v0, const std::vector &coefficients) - : px(DEFAULT_PX_PARAMETER), py(DEFAULT_PY_PARAMETER), u0(DEFAULT_U0_PARAMETER), v0(DEFAULT_V0_PARAMETER), - kud(DEFAULT_KUD_PARAMETER), kdu(DEFAULT_KDU_PARAMETER), m_dist_coefs(), width(0), height(0), isFov(false), - m_hFovAngle(0), m_vFovAngle(0), fovNormals(), inv_px(1. / DEFAULT_PX_PARAMETER), inv_py(1. / DEFAULT_PY_PARAMETER), - projModel(DEFAULT_PROJ_TYPE) + : m_px(DEFAULT_PX_PARAMETER), m_py(DEFAULT_PY_PARAMETER), m_u0(DEFAULT_U0_PARAMETER), m_v0(DEFAULT_V0_PARAMETER), + m_kud(DEFAULT_KUD_PARAMETER), m_kdu(DEFAULT_KDU_PARAMETER), m_dist_coefs(), m_width(0), m_height(0), m_isFov(false), + m_hFovAngle(0), m_vFovAngle(0), m_fovNormals(), m_inv_px(1. / DEFAULT_PX_PARAMETER), m_inv_py(1. / DEFAULT_PY_PARAMETER), + m_projModel(DEFAULT_PROJ_TYPE) { initProjWithKannalaBrandtDistortion(cam_px, cam_py, cam_u0, cam_v0, coefficients); } /*! - \brief basic initialization with the default parameters -*/ + * \brief Basic initialization with the default parameters. + */ void vpCameraParameters::init() { - if (fabs(this->px) < 1e-6) { + if (fabs(this->m_px) < 1e-6) { throw(vpException(vpException::divideByZeroError, "Camera parameter px = 0")); } - if (fabs(this->py) < 1e-6) { + if (fabs(this->m_py) < 1e-6) { throw(vpException(vpException::divideByZeroError, "Camera parameter px = 0")); } - this->inv_px = 1. / this->px; - this->inv_py = 1. / this->py; + this->m_inv_px = 1. / this->m_px; + this->m_inv_py = 1. / this->m_py; } /*! - Initialization with specific parameters using perpective projection without - distortion model. - \param cam_px,cam_py : the ratio between the focal length and the size of a -pixel. \param cam_u0,cam_v0 : principal point coordinates in pixels. - - The following sample code shows how to use this function: - \code -#include -#include - -int main() -{ - vpImage I(480, 640); - double u0 = I.getWidth() / 2.; - double v0 = I.getHeight() / 2.; - double px = 600; - double py = 600; - vpCameraParameters cam; - cam.initPersProjWithoutDistortion(px, py, u0, v0); - cam.computeFov(I.getWidth(), I.getHeight()); - std::cout << cam << std::endl; - std::cout << "Field of view (horizontal: " << vpMath::deg(cam.getHorizontalFovAngle()) - << " and vertical: " << vpMath::deg(cam.getVerticalFovAngle()) - << " degrees)" << std::endl; -} - \endcode - It produces the following output: - \code -Camera parameters for perspective projection without distortion: - px = 600 py = 600 - u0 = 320 v0 = 240 - -Field of view (horizontal: 56.145 and vertical: 43.6028 degrees) - \endcode - + * Initialization with specific parameters using perspective projection without + * distortion model. + * + * \param cam_px : Pixel size along x axis (horizontal). + * \param cam_py : Pixel size along y axis (vertical) + * \param cam_u0 : Principal point coordinate in pixel along x. + * \param cam_v0 : Principal point coordinate in pixel along y. + * + * The following sample code shows how to use this function: + * \code + * #include + * #include + * + * int main() + * { + * vpImage I(480, 640); + * double u0 = I.getWidth() / 2.; + * double v0 = I.getHeight() / 2.; + * double px = 600; + * double py = 600; + * vpCameraParameters cam; + * cam.initPersProjWithoutDistortion(px, py, u0, v0); + * cam.computeFov(I.getWidth(), I.getHeight()); + * std::cout << cam << std::endl; + * std::cout << "Field of view (horizontal: " << vpMath::deg(cam.getHorizontalFovAngle()) + * << " and vertical: " << vpMath::deg(cam.getVerticalFovAngle()) + * << " degrees)" << std::endl; + * } + * \endcode + * It produces the following output: + * \code + * Camera parameters for perspective projection without distortion: + * px = 600 py = 600 + * u0 = 320 v0 = 240 + * + * Field of view (horizontal: 56.145 and vertical: 43.6028 degrees) + * \endcode */ void vpCameraParameters::initPersProjWithoutDistortion(double cam_px, double cam_py, double cam_u0, double cam_v0) { - this->projModel = vpCameraParameters::perspectiveProjWithoutDistortion; + this->m_projModel = vpCameraParameters::perspectiveProjWithoutDistortion; - this->px = cam_px; - this->py = cam_py; - this->u0 = cam_u0; - this->v0 = cam_v0; - this->kud = 0; - this->kdu = 0; + this->m_px = cam_px; + this->m_py = cam_py; + this->m_u0 = cam_u0; + this->m_v0 = cam_v0; + this->m_kud = 0; + this->m_kdu = 0; this->m_dist_coefs.clear(); - if (fabs(px) < 1e-6) { + if (fabs(m_px) < 1e-6) { throw(vpException(vpException::divideByZeroError, "Camera parameter px = 0")); } - if (fabs(py) < 1e-6) { + if (fabs(m_py) < 1e-6) { throw(vpException(vpException::divideByZeroError, "Camera parameter py = 0")); } - this->inv_px = 1. / px; - this->inv_py = 1. / py; + this->m_inv_px = 1. / m_px; + this->m_inv_py = 1. / m_py; } /*! - Initialization with specific parameters using perpective projection with - distortion model. - \param cam_px,cam_py : the ratio between the focal length and the size of a pixel. - \param cam_u0,cam_v0 : principal points coordinates in pixels. - \param cam_kud : undistorted to distorted radial distortion. - \param cam_kdu : distorted to undistorted radial distortion. - - The following sample code shows how to use this function: - \code -#include -#include - -int main() -{ - vpImage I(480, 640); - double u0 = I.getWidth() / 2.; - double v0 = I.getHeight() / 2.; - double px = 600; - double py = 600; - double kud = -0.19; - double kdu = 0.20; - vpCameraParameters cam; - cam.initPersProjWithDistortion(px, py, u0, v0, kud, kdu); - cam.computeFov(I.getWidth(), I.getHeight()); - std::cout << cam << std::endl; - std::cout << "Field of view (horizontal: " << vpMath::deg(cam.getHorizontalFovAngle()) - << " and vertical: " << vpMath::deg(cam.getVerticalFovAngle()) - << " degrees)" << std::endl; -} - \endcode - It produces the following output: - \code -Camera parameters for perspective projection with distortion: - px = 600 py = 600 - u0 = 320 v0 = 240 - kud = -0.19 - kdu = 0.2 - -Field of view (horizontal: 56.14497387 and vertical: 43.60281897 degrees) -\endcode -*/ + * Initialization with specific parameters using perspective projection with + * distortion model. + * + * \param cam_px : Pixel size along x axis (horizontal). + * \param cam_py : Pixel size along y axis (vertical) + * \param cam_u0 : Principal point coordinate in pixel along x. + * \param cam_v0 : Principal point coordinate in pixel along y. + * \param cam_kud : Undistorted to distorted radial distortion. + * \param cam_kdu : Distorted to undistorted radial distortion. + * + * The following sample code shows how to use this function: + * \code + * #include + * #include + * + * int main() + * { + * vpImage I(480, 640); + * double u0 = I.getWidth() / 2.; + * double v0 = I.getHeight() / 2.; + * double px = 600; + * double py = 600; + * double kud = -0.19; + * double kdu = 0.20; + * vpCameraParameters cam; + * cam.initPersProjWithDistortion(px, py, u0, v0, kud, kdu); + * cam.computeFov(I.getWidth(), I.getHeight()); + * std::cout << cam << std::endl; + * std::cout << "Field of view (horizontal: " << vpMath::deg(cam.getHorizontalFovAngle()) + * << " and vertical: " << vpMath::deg(cam.getVerticalFovAngle()) + * << " degrees)" << std::endl; + * } + * \endcode + * It produces the following output: + * \code + * Camera parameters for perspective projection with distortion: + * px = 600 py = 600 + * u0 = 320 v0 = 240 + * kud = -0.19 + * kdu = 0.2 + * + * Field of view (horizontal: 56.14497387 and vertical: 43.60281897 degrees) + * \endcode + */ void vpCameraParameters::initPersProjWithDistortion(double cam_px, double cam_py, double cam_u0, double cam_v0, double cam_kud, double cam_kdu) { - this->projModel = vpCameraParameters::perspectiveProjWithDistortion; - - this->px = cam_px; - this->py = cam_py; - this->u0 = cam_u0; - this->v0 = cam_v0; - this->kud = cam_kud; - this->kdu = cam_kdu; + this->m_projModel = vpCameraParameters::perspectiveProjWithDistortion; + + this->m_px = cam_px; + this->m_py = cam_py; + this->m_u0 = cam_u0; + this->m_v0 = cam_v0; + this->m_kud = cam_kud; + this->m_kdu = cam_kdu; this->m_dist_coefs.clear(); - if (fabs(px) < 1e-6) { + if (fabs(m_px) < 1e-6) { throw(vpException(vpException::divideByZeroError, "Camera parameter px = 0")); } - if (fabs(py) < 1e-6) { + if (fabs(m_py) < 1e-6) { throw(vpException(vpException::divideByZeroError, "Camera parameter px = 0")); } - this->inv_px = 1. / px; - this->inv_py = 1. / py; + this->m_inv_px = 1. / m_px; + this->m_inv_py = 1. / m_py; } /*! - Initialization with specific parameters using Kannala-Brandt distortion model - \param cam_px,cam_py : The ratio between the focal length and the size of a pixel. - \param cam_u0,cam_v0 : Principal points coordinates in pixels. - \param coefficients : Distortion coefficients. -*/ + * Initialization with specific parameters using Kannala-Brandt distortion model + * + * \param cam_px : Pixel size along x axis (horizontal). + * \param cam_py : Pixel size along y axis (vertical) + * \param cam_u0 : Principal point coordinate in pixel along x. + * \param cam_v0 : Principal point coordinate in pixel along y. + * \param coefficients : Distortion coefficients. + */ void vpCameraParameters::initProjWithKannalaBrandtDistortion(double cam_px, double cam_py, double cam_u0, double cam_v0, const std::vector &coefficients) { - this->projModel = vpCameraParameters::ProjWithKannalaBrandtDistortion; + this->m_projModel = vpCameraParameters::ProjWithKannalaBrandtDistortion; - this->px = cam_px; - this->py = cam_py; - this->u0 = cam_u0; - this->v0 = cam_v0; + this->m_px = cam_px; + this->m_py = cam_py; + this->m_u0 = cam_u0; + this->m_v0 = cam_v0; - this->kud = 0.0; - this->kdu = 0.0; + this->m_kud = 0.0; + this->m_kdu = 0.0; - if (fabs(px) < 1e-6) { + if (fabs(m_px) < 1e-6) { throw(vpException(vpException::divideByZeroError, "Camera parameter px = 0")); } - if (fabs(py) < 1e-6) { + if (fabs(m_py) < 1e-6) { throw(vpException(vpException::divideByZeroError, "Camera parameter px = 0")); } - this->inv_px = 1. / px; - this->inv_py = 1. / py; + this->m_inv_px = 1. / m_px; + this->m_inv_py = 1. / m_py; this->m_dist_coefs = coefficients; } /*! - destructor - - nothing much to destroy... -*/ -vpCameraParameters::~vpCameraParameters() {} + * Destructor that does nothing. + */ +vpCameraParameters::~vpCameraParameters() { } /*! - initialization from another vpCameraParameters object -*/ + * Initialization from another vpCameraParameters object. + */ void vpCameraParameters::init(const vpCameraParameters &c) { *this = c; } /*! - initialise the camera from a calibration matrix. - Using a calibration matrix leads to a camera without distortion - - The K matrix in parameters must be like: - - \f$ K = \left(\begin{array}{ccc} - p_x & 0 & u_0 \\ - 0 & p_y & v_0 \\ - 0 & 0 & 1 - \end{array} \right) \f$ - - \param _K : the 3by3 calibration matrix -*/ -void vpCameraParameters::initFromCalibrationMatrix(const vpMatrix &_K) + * Initialise the camera from a calibration matrix. + * Using a calibration matrix leads to a camera without distortion. + * + * The K matrix in parameters must be like: + * + * \f$ K = \left(\begin{array}{ccc} + * p_x & 0 & u_0 \\ + * 0 & p_y & v_0 \\ + * 0 & 0 & 1 + * \end{array} \right) \f$ + * + * \param K : the 3-by-3 calibration matrix + */ +void vpCameraParameters::initFromCalibrationMatrix(const vpMatrix &K) { - if (_K.getRows() != 3 || _K.getCols() != 3) { + if (K.getRows() != 3 || K.getCols() != 3) { throw vpException(vpException::dimensionError, "bad size for calibration matrix"); } - if (std::fabs(_K[2][2] - 1.0) > std::numeric_limits::epsilon()) { + if (std::fabs(K[2][2] - 1.0) > std::numeric_limits::epsilon()) { throw vpException(vpException::badValue, "bad value: K[2][2] must be equal to 1"); } - initPersProjWithoutDistortion(_K[0][0], _K[1][1], _K[0][2], _K[1][2]); + initPersProjWithoutDistortion(K[0][0], K[1][1], K[0][2], K[1][2]); } /*! - Initialize the camera model without distortion from the image dimension and -the camera field of view. \param w : Image width. \param h : Image height. - \param hfov : Camera horizontal field of view angle expressed in radians. - \param vfov : Camera vertical field of view angle expressed in radians. - - The following sample code shows how to use this function: - \code -#include -#include - -int main() -{ - vpImage I(480, 640); - vpCameraParameters cam; - double hfov = vpMath::rad(56); - double vfov = vpMath::rad(43); - cam.initFromFov(I.getWidth(), I.getHeight(), hfov, vfov); - - std::cout << cam << std::endl; - std::cout << "Field of view (horizontal: " << vpMath::deg(cam.getHorizontalFovAngle()) - << " and vertical: " << vpMath::deg(cam.getVerticalFovAngle()) << " degrees)" << std::endl; -} - \endcode - It produces the following output: - \code -Camera parameters for perspective projection without distortion: - px = 601.832 py = 609.275 - u0 = 320 v0 = 240 - -Field of view (horizontal: 56 and vertical: 43 degrees) - \endcode + * Initialize the camera model without distortion from the image dimension and + * the camera field of view. + * \param w : Image width. + * \param h : Image height. + * \param hfov : Camera horizontal field of view angle expressed in radians. + * \param vfov : Camera vertical field of view angle expressed in radians. + * + * The following sample code shows how to use this function: + * \code + * #include + * #include + * + * int main() + * { + * vpImage I(480, 640); + * vpCameraParameters cam; + * double hfov = vpMath::rad(56); + * double vfov = vpMath::rad(43); + * cam.initFromFov(I.getWidth(), I.getHeight(), hfov, vfov); + * + * std::cout << cam << std::endl; + * std::cout << "Field of view (horizontal: " << vpMath::deg(cam.getHorizontalFovAngle()) + * << " and vertical: " << vpMath::deg(cam.getVerticalFovAngle()) << " degrees)" << std::endl; + * } + * \endcode + * It produces the following output: + * \code + * Camera parameters for perspective projection without distortion: + * px = 601.832 py = 609.275 + * u0 = 320 v0 = 240 + * + * Field of view (horizontal: 56 and vertical: 43 degrees) + * \endcode */ void vpCameraParameters::initFromFov(const unsigned int &w, const unsigned int &h, const double &hfov, const double &vfov) { - projModel = vpCameraParameters::perspectiveProjWithoutDistortion; - u0 = (double)w / 2.; - v0 = (double)h / 2.; - px = u0 / tan(hfov / 2); - py = v0 / tan(vfov / 2); - kud = 0; - kdu = 0; - inv_px = 1. / px; - inv_py = 1. / py; + m_projModel = vpCameraParameters::perspectiveProjWithoutDistortion; + m_u0 = (double)w / 2.; + m_v0 = (double)h / 2.; + m_px = m_u0 / tan(hfov / 2); + m_py = m_v0 / tan(vfov / 2); + m_kud = 0; + m_kdu = 0; + m_inv_px = 1. / m_px; + m_inv_py = 1. / m_py; computeFov(w, h); } /*! - copy operator + * Copy operator. */ vpCameraParameters &vpCameraParameters::operator=(const vpCameraParameters &cam) { - projModel = cam.projModel; - px = cam.px; - py = cam.py; - u0 = cam.u0; - v0 = cam.v0; - kud = cam.kud; - kdu = cam.kdu; + m_projModel = cam.m_projModel; + m_px = cam.m_px; + m_py = cam.m_py; + m_u0 = cam.m_u0; + m_v0 = cam.m_v0; + m_kud = cam.m_kud; + m_kdu = cam.m_kdu; m_dist_coefs = cam.m_dist_coefs; - inv_px = cam.inv_px; - inv_py = cam.inv_py; + m_inv_px = cam.m_inv_px; + m_inv_py = cam.m_inv_py; - isFov = cam.isFov; + m_isFov = cam.m_isFov; m_hFovAngle = cam.m_hFovAngle; m_vFovAngle = cam.m_vFovAngle; - width = cam.width; - height = cam.height; - fovNormals = cam.fovNormals; + m_width = cam.m_width; + m_height = cam.m_height; + m_fovNormals = cam.m_fovNormals; return *this; } /*! - True if the two objects are absolutely identical. + * True if the two objects are absolutely identical. */ bool vpCameraParameters::operator==(const vpCameraParameters &c) const { - if (projModel != c.projModel) + if (m_projModel != c.m_projModel) return false; - if (!vpMath::equal(px, c.px, std::numeric_limits::epsilon()) || - !vpMath::equal(py, c.py, std::numeric_limits::epsilon()) || - !vpMath::equal(u0, c.u0, std::numeric_limits::epsilon()) || - !vpMath::equal(v0, c.v0, std::numeric_limits::epsilon()) || - !vpMath::equal(kud, c.kud, std::numeric_limits::epsilon()) || - !vpMath::equal(kdu, c.kdu, std::numeric_limits::epsilon()) || - !vpMath::equal(inv_px, c.inv_px, std::numeric_limits::epsilon()) || - !vpMath::equal(inv_py, c.inv_py, std::numeric_limits::epsilon())) + if (!vpMath::equal(m_px, c.m_px, std::numeric_limits::epsilon()) || + !vpMath::equal(m_py, c.m_py, std::numeric_limits::epsilon()) || + !vpMath::equal(m_u0, c.m_u0, std::numeric_limits::epsilon()) || + !vpMath::equal(m_v0, c.m_v0, std::numeric_limits::epsilon()) || + !vpMath::equal(m_kud, c.m_kud, std::numeric_limits::epsilon()) || + !vpMath::equal(m_kdu, c.m_kdu, std::numeric_limits::epsilon()) || + !vpMath::equal(m_inv_px, c.m_inv_px, std::numeric_limits::epsilon()) || + !vpMath::equal(m_inv_py, c.m_inv_py, std::numeric_limits::epsilon())) return false; - if(m_dist_coefs.size() != c.m_dist_coefs.size()) + if (m_dist_coefs.size() != c.m_dist_coefs.size()) return false; for (unsigned int i = 0; i < m_dist_coefs.size(); i++) if (!vpMath::equal(m_dist_coefs[i], c.m_dist_coefs[i], std::numeric_limits::epsilon())) return false; - if (isFov != c.isFov || !vpMath::equal(m_hFovAngle, c.m_hFovAngle, std::numeric_limits::epsilon()) || - !vpMath::equal(m_vFovAngle, c.m_vFovAngle, std::numeric_limits::epsilon()) || width != c.width || - height != c.height) + if (m_isFov != c.m_isFov || !vpMath::equal(m_hFovAngle, c.m_hFovAngle, std::numeric_limits::epsilon()) || + !vpMath::equal(m_vFovAngle, c.m_vFovAngle, std::numeric_limits::epsilon()) || m_width != c.m_width || + m_height != c.m_height) return false; - if (fovNormals.size() != c.fovNormals.size()) + if (m_fovNormals.size() != c.m_fovNormals.size()) return false; - std::vector::const_iterator it1 = fovNormals.begin(); - std::vector::const_iterator it2 = c.fovNormals.begin(); - for (; it1 != fovNormals.end() && it2 != c.fovNormals.end(); ++it1, ++it2) { + std::vector::const_iterator it1 = m_fovNormals.begin(); + std::vector::const_iterator it2 = c.m_fovNormals.begin(); + for (; it1 != m_fovNormals.end() && it2 != c.m_fovNormals.end(); ++it1, ++it2) { if (*it1 != *it2) return false; } @@ -469,30 +474,30 @@ bool vpCameraParameters::operator==(const vpCameraParameters &c) const } /*! - False if the two objects are absolutely identical. + * False if the two objects are absolutely identical. */ bool vpCameraParameters::operator!=(const vpCameraParameters &c) const { return !(*this == c); } /*! - Compute angles and normals of the FOV. - - \param w : Width of the image - \param h : Height of the image. -*/ + * Compute angles and normals of the FOV. + * + * \param w : Width of the image + * \param h : Height of the image. + */ void vpCameraParameters::computeFov(const unsigned int &w, const unsigned int &h) { - if ((!isFov || w != width || h != height) && w != 0 && h != 0) { - fovNormals = std::vector(4); + if ((!m_isFov || w != m_width || h != m_height) && w != 0 && h != 0) { + m_fovNormals = std::vector(4); - isFov = true; + m_isFov = true; - double hFovAngle = atan(((double)w - u0) * (1.0 / px)); - double vFovAngle = atan((v0) * (1.0 / py)); - double minushFovAngle = atan((u0) * (1.0 / px)); - double minusvFovAngle = atan(((double)h - v0) * (1.0 / py)); + double hFovAngle = atan(((double)w - m_u0) * (1.0 / m_px)); + double vFovAngle = atan((m_v0) * (1.0 / m_py)); + double minushFovAngle = atan((m_u0) * (1.0 / m_px)); + double minusvFovAngle = atan(((double)h - m_v0) * (1.0 / m_py)); - width = w; - height = h; + m_width = w; + m_height = h; vpColVector n(3); n = 0; @@ -504,10 +509,10 @@ void vpCameraParameters::computeFov(const unsigned int &w, const unsigned int &h vpColVector nLeft, nRight; nLeft = Rleft * (-n); - fovNormals[0] = nLeft.normalize(); + m_fovNormals[0] = nLeft.normalize(); nRight = Rright * n; - fovNormals[1] = nRight.normalize(); + m_fovNormals[1] = nRight.normalize(); n = 0; n[1] = 1.0; @@ -518,10 +523,10 @@ void vpCameraParameters::computeFov(const unsigned int &w, const unsigned int &h vpColVector nUp, nDown; nUp = Rup * (-n); - fovNormals[2] = nUp.normalize(); + m_fovNormals[2] = nUp.normalize(); nDown = Rdown * n; - fovNormals[3] = nDown.normalize(); + m_fovNormals[3] = nDown.normalize(); m_hFovAngle = hFovAngle + minushFovAngle; m_vFovAngle = vFovAngle + minusvFovAngle; @@ -529,72 +534,72 @@ void vpCameraParameters::computeFov(const unsigned int &w, const unsigned int &h } /*! - Return the camera matrix \f$K\f$ given by: - - \f$ K = \left[\begin{array}{ccc} - p_x & 0 & u_0 \\ - 0 & p_y & v_0 \\ - 0 & 0 & 1 - \end{array} \right] \f$ - - \sa get_K_inverse() -*/ + * Return the camera matrix \f$K\f$ given by: + * + * \f$ K = \left[\begin{array}{ccc} + * p_x & 0 & u_0 \\ + * 0 & p_y & v_0 \\ + * 0 & 0 & 1 + * \end{array} \right] \f$ + * + * \sa get_K_inverse() + */ vpMatrix vpCameraParameters::get_K() const { vpMatrix K(3, 3, 0.); - K[0][0] = px; - K[1][1] = py; - K[0][2] = u0; - K[1][2] = v0; + K[0][0] = m_px; + K[1][1] = m_py; + K[0][2] = m_u0; + K[1][2] = m_v0; K[2][2] = 1.0; return K; } /*! - Return the inverted camera matrix \f$K^{-1}\f$ given by: - - \f$ K^{-1} = \left[\begin{array}{ccc} - 1/p_x & 0 & -u_0/p_x \\ - 0 & 1/p_y & -v_0/p_y \\ - 0 & 0 & 1 - \end{array} \right] \f$ - - \sa get_K() -*/ + * Return the inverted camera matrix \f$K^{-1}\f$ given by: + * + * \f$ K^{-1} = \left[\begin{array}{ccc} + * 1/p_x & 0 & -u_0/p_x \\ + * 0 & 1/p_y & -v_0/p_y \\ + * 0 & 0 & 1 + * \end{array} \right] \f$ + * + * \sa get_K() + */ vpMatrix vpCameraParameters::get_K_inverse() const { vpMatrix K_inv(3, 3, 0.); - K_inv[0][0] = inv_px; - K_inv[1][1] = inv_py; - K_inv[0][2] = -u0 * inv_px; - K_inv[1][2] = -v0 * inv_py; + K_inv[0][0] = m_inv_px; + K_inv[1][1] = m_inv_py; + K_inv[0][2] = -m_u0 * m_inv_px; + K_inv[1][2] = -m_v0 * m_inv_py; K_inv[2][2] = 1.0; return K_inv; } /*! - Print the camera parameters on the standard output - - \sa operator<<(std::ostream &, const vpCameraParameters &) -*/ + * Print the camera parameters on the standard output. + * + * \sa operator<<(std::ostream &, const vpCameraParameters &) + */ void vpCameraParameters::printParameters() { std::ios::fmtflags original_flags(std::cout.flags()); - switch (projModel) { + switch (m_projModel) { case vpCameraParameters::perspectiveProjWithoutDistortion: std::cout.precision(10); std::cout << "Camera parameters for perspective projection without distortion:" << std::endl; - std::cout << " px = " << px << "\t py = " << py << std::endl; - std::cout << " u0 = " << u0 << "\t v0 = " << v0 << std::endl; + std::cout << " px = " << m_px << "\t py = " << m_py << std::endl; + std::cout << " u0 = " << m_u0 << "\t v0 = " << m_v0 << std::endl; break; case vpCameraParameters::perspectiveProjWithDistortion: std::cout.precision(10); std::cout << "Camera parameters for perspective projection with distortion:" << std::endl; - std::cout << " px = " << px << "\t py = " << py << std::endl; - std::cout << " u0 = " << u0 << "\t v0 = " << v0 << std::endl; - std::cout << " kud = " << kud << std::endl; - std::cout << " kdu = " << kdu << std::endl; + std::cout << " px = " << m_px << "\t py = " << m_py << std::endl; + std::cout << " u0 = " << m_u0 << "\t v0 = " << m_v0 << std::endl; + std::cout << " kud = " << m_kud << std::endl; + std::cout << " kdu = " << m_kdu << std::endl; break; case vpCameraParameters::ProjWithKannalaBrandtDistortion: std::cout << " Coefficients: "; @@ -606,13 +611,13 @@ void vpCameraParameters::printParameters() // Restore ostream format std::cout.flags(original_flags); } -/*! - Print on the output stream \e os the camera parameters. - - \param os : Output stream. - \param cam : Camera parameters. -*/ +/*! + * Print on the output stream \e os the camera parameters. + * + * \param os : Output stream. + * \param cam : Camera parameters. + */ VISP_EXPORT std::ostream &operator<<(std::ostream &os, const vpCameraParameters &cam) { switch (cam.get_projModel()) { diff --git a/modules/core/src/camera/vpMeterPixelConversion.cpp b/modules/core/src/camera/vpMeterPixelConversion.cpp index b8d452ad95..5c0f835b42 100644 --- a/modules/core/src/camera/vpMeterPixelConversion.cpp +++ b/modules/core/src/camera/vpMeterPixelConversion.cpp @@ -58,15 +58,15 @@ void vpMeterPixelConversion::convertLine(const vpCameraParameters &cam, const do { double co = cos(theta_m); double si = sin(theta_m); - double d = sqrt(vpMath::sqr(cam.py * co) + vpMath::sqr(cam.px * si)); + double d = sqrt(vpMath::sqr(cam.m_py * co) + vpMath::sqr(cam.m_px * si)); if (fabs(d) < 1e-6) { vpERROR_TRACE("division by zero"); throw(vpException(vpException::divideByZeroError, "division by zero")); } - theta_p = atan2(cam.px * si, cam.py * co); - rho_p = (cam.px * cam.py * rho_m + cam.u0 * cam.py * co + cam.v0 * cam.px * si); + theta_p = atan2(cam.m_px * si, cam.m_py * co); + rho_p = (cam.m_px * cam.m_py * rho_m + cam.m_u0 * cam.m_py * co + cam.m_v0 * cam.m_px * si); rho_p /= d; } @@ -82,7 +82,7 @@ void vpMeterPixelConversion::convertLine(const vpCameraParameters &cam, const do \param[in] circle : 3D circle with internal vector `circle.p[]` that contains the ellipse parameters expressed in the image plane. These parameters are internally updated after perspective projection of the sphere. \param[out] center_p : Center \f$(u_c, v_c)\f$ of the corresponding ellipse in the image with coordinates expressed in - pixels. + pixels. \param[out] n20_p, n11_p, n02_p : Second order centered moments of the ellipse normalized by its area (i.e., such that \f$n_{ij} = \mu_{ij}/a\f$ where \f$\mu_{ij}\f$ are the centered moments and a the area) expressed in pixels. @@ -125,7 +125,7 @@ void vpMeterPixelConversion::convertEllipse(const vpCameraParameters &cam, const \param[in] sphere : 3D sphere with internal vector `circle.p[]` that contains the ellipse parameters expressed in the image plane. These parameters are internally updated after perspective projection of the sphere. \param[out] center_p : Center \f$(u_c, v_c)\f$ of the corresponding ellipse in the image with coordinates expressed in - pixels. + pixels. \param[out] n20_p, n11_p, n02_p : Second order centered moments of the ellipse normalized by its area (i.e., such that \f$n_{ij} = \mu_{ij}/a\f$ where \f$\mu_{ij}\f$ are the centered moments and a the area) expressed in pixels. diff --git a/modules/core/src/camera/vpPixelMeterConversion.cpp b/modules/core/src/camera/vpPixelMeterConversion.cpp index adc130124a..a55ce1c4a0 100644 --- a/modules/core/src/camera/vpPixelMeterConversion.cpp +++ b/modules/core/src/camera/vpPixelMeterConversion.cpp @@ -80,14 +80,14 @@ void vpPixelMeterConversion::convertLine(const vpCameraParameters &cam, const do { double co = cos(theta_p); double si = sin(theta_p); - double d = vpMath::sqr(cam.px * co) + vpMath::sqr(cam.py * si); + double d = vpMath::sqr(cam.m_px * co) + vpMath::sqr(cam.m_py * si); if (fabs(d) < 1e-6) { vpERROR_TRACE("division by zero"); throw(vpException(vpException::divideByZeroError, "division by zero")); } - theta_m = atan2(si * cam.py, co * cam.px); - rho_m = (rho_p - cam.u0 * co - cam.v0 * si) / sqrt(d); + theta_m = atan2(si * cam.m_py, co * cam.m_px); + rho_m = (rho_p - cam.m_u0 * co - cam.m_v0 * si) / sqrt(d); } /*! @@ -131,8 +131,8 @@ void vpPixelMeterConversion::convertMoment(const vpCameraParameters &cam, unsign const vpMatrix &moment_pixel, vpMatrix &moment_meter) { vpMatrix m(order, order); - double yc = -cam.v0; - double xc = -cam.u0; + double yc = -cam.m_v0; + double xc = -cam.m_u0; for (unsigned int k = 0; k < order; k++) // iteration correspondant e l'ordre du moment { @@ -145,7 +145,7 @@ void vpPixelMeterConversion::convertMoment(const vpCameraParameters &cam, unsign for (unsigned int t = 0; t <= q; t++) // somme interne { m[p][q] += static_cast(vpMath::comb(p, r)) * static_cast(vpMath::comb(q, t)) * - pow(xc, (int)(p - r)) * pow(yc, (int)(q - t)) * moment_pixel[r][t]; + pow(xc, (int)(p - r)) * pow(yc, (int)(q - t)) * moment_pixel[r][t]; } } } @@ -154,7 +154,7 @@ void vpPixelMeterConversion::convertMoment(const vpCameraParameters &cam, unsign for (unsigned int p = 0; p < order; p++) for (unsigned int q = 0; q < order; q++) if (p + q == k) { - m[p][q] *= pow(cam.inv_px, (int)(1 + p)) * pow(cam.inv_py, (int)(1 + q)); + m[p][q] *= pow(cam.m_inv_px, (int)(1 + p)) * pow(cam.m_inv_py, (int)(1 + q)); } for (unsigned int k = 0; k < order; k++) // iteration correspondant e l'ordre du moment @@ -254,7 +254,7 @@ void vpPixelMeterConversion::convertMoment(const cv::Mat &cameraMatrix, unsigned for (unsigned int t = 0; t <= q; t++) // somme interne { m[p][q] += static_cast(vpMath::comb(p, r)) * static_cast(vpMath::comb(q, t)) * - pow(xc, static_cast(p - r)) * pow(yc, static_cast(q - t)) * moment_pixel[r][t]; + pow(xc, static_cast(p - r)) * pow(yc, static_cast(q - t)) * moment_pixel[r][t]; } } } diff --git a/modules/core/src/image/vpRGBa.cpp b/modules/core/src/image/vpRGBa.cpp index 85c60fb65c..69147272c4 100644 --- a/modules/core/src/image/vpRGBa.cpp +++ b/modules/core/src/image/vpRGBa.cpp @@ -91,7 +91,7 @@ vpRGBa &vpRGBa::operator=(const vpRGBa &&v) relation with respectively R, G, B and A. \exception vpException::dimensionError : If v is not a 4 four - dimention vector. + dimension vector. */ vpRGBa &vpRGBa::operator=(const vpColVector &v) { diff --git a/modules/core/src/image/vpRGBf.cpp b/modules/core/src/image/vpRGBf.cpp index dbb0d50afd..7822445047 100644 --- a/modules/core/src/image/vpRGBf.cpp +++ b/modules/core/src/image/vpRGBf.cpp @@ -88,7 +88,7 @@ vpRGBf &vpRGBf::operator=(const vpRGBf &&v) \param v : Input vector. v[0], v[1], v[2] are to make into relation with respectively R, G and B. - \exception vpException::dimensionError : If v is not a 3-dimentional vector. + \exception vpException::dimensionError : If v is not a 3-dimensional vector. */ vpRGBf &vpRGBf::operator=(const vpColVector &v) { diff --git a/modules/core/src/math/kalman/vpLinearKalmanFilterInstantiation.cpp b/modules/core/src/math/kalman/vpLinearKalmanFilterInstantiation.cpp index 9186d0a69c..ad4179240d 100644 --- a/modules/core/src/math/kalman/vpLinearKalmanFilterInstantiation.cpp +++ b/modules/core/src/math/kalman/vpLinearKalmanFilterInstantiation.cpp @@ -152,7 +152,7 @@ int main() // Does the filtering vpColVector vm(2); // Measured velocities for ( ; ; ) { - // Get the two dimentional velocity measures + // Get the two dimensional velocity measures // vm[0] = ...; // vm[1] = ...; @@ -187,7 +187,7 @@ void vpLinearKalmanFilterInstantiation::initFilter(unsigned int n_signal, vpColV } /*! - Modelisation of a constant speed state model with white noise. The + Modelization of a constant speed state model with white noise. The measure is assumed to be the position of the target. The considered state model is the following @@ -336,7 +336,7 @@ void vpLinearKalmanFilterInstantiation::initStateConstVel_MeasurePos(unsigned in /*! - Modelisation of a constant speed state model with colored noise. The + Modelization of a constant speed state model with colored noise. The measure is assumed to be the velocity of the target. This state model assume that there is some memory associated with @@ -472,7 +472,7 @@ int main() // Does the filtering vpColVector vm(2); // Measured velocities for ( ; ; ) { - // Get the two dimentional velocity measures + // Get the two dimensional velocity measures // vm[0] = ...; // vm[1] = ...; @@ -544,7 +544,7 @@ void vpLinearKalmanFilterInstantiation::initStateConstVelWithColoredNoise_Measur /*! - Modelisation of a constant acceleration state model with colored noise. The + Modelization of a constant acceleration state model with colored noise. The measure is assumed to be the velocity of the target. This state model assume that there is some memory associated with @@ -691,7 +691,7 @@ int main() // Does the filtering vpColVector vm(2); // Measured velocities for ( ; ; ) { - // Get the two dimentional velocity measures + // Get the two dimensional velocity measures // vm[0] = ...; // vm[1] = ...; diff --git a/modules/core/src/math/matrix/vpColVector.cpp b/modules/core/src/math/matrix/vpColVector.cpp index 7a485eba4a..01fac52a25 100644 --- a/modules/core/src/math/matrix/vpColVector.cpp +++ b/modules/core/src/math/matrix/vpColVector.cpp @@ -658,7 +658,7 @@ vpMatrix vpColVector::skew(const vpColVector &v) { vpMatrix M; if (v.getRows() != 3) { - throw(vpException(vpException::dimensionError, "Cannot compute skew vector of a non 3-dimention vector (%d)", + throw(vpException(vpException::dimensionError, "Cannot compute skew vector of a non 3-dimension vector (%d)", v.getRows())); } diff --git a/modules/core/src/math/matrix/vpSubColVector.cpp b/modules/core/src/math/matrix/vpSubColVector.cpp index 1602c74441..4d5875081e 100644 --- a/modules/core/src/math/matrix/vpSubColVector.cpp +++ b/modules/core/src/math/matrix/vpSubColVector.cpp @@ -42,32 +42,32 @@ #include //! Default constructor that creates an empty vector. -vpSubColVector::vpSubColVector() : vpColVector(), pRowNum(0), parent(NULL) {} +vpSubColVector::vpSubColVector() : vpColVector(), m_pRowNum(0), m_parent(NULL) { } /*! - Construct a sub-column vector from a parent column vector. - \param v : parent column vector. - \param offset : offset where the sub-column vector starts in the parent - column vector. \param nrows : size of the sub-column vector. -*/ + * Construct a sub-column vector from a parent column vector. + * \param v : parent column vector. + * \param offset : offset where the sub-column vector starts in the parent column vector. + * \param nrows : size of the sub-column vector. + */ vpSubColVector::vpSubColVector(vpColVector &v, const unsigned int &offset, const unsigned int &nrows) - : vpColVector(), pRowNum(0), parent(NULL) + : vpColVector(), m_pRowNum(0), m_parent(NULL) { init(v, offset, nrows); } /*! - Initialize a sub-column vector from a parent column vector. - \param v : parent column vector. - \param offset : offset where the sub-column vector starts in the parent - column vector. \param nrows : size of the sub-column vector. -*/ + * Initialize a sub-column vector from a parent column vector. + * \param v : parent column vector. + * \param offset : offset where the sub-column vector starts in the parent column vector. + * \param nrows : size of the sub-column vector. + */ void vpSubColVector::init(vpColVector &v, const unsigned int &offset, const unsigned int &nrows) { if (!v.data) { throw(vpException(vpException::fatalError, "Cannot initialize a " - "sub-column vector from an " - "empty parent column vector")); + "sub-column vector from an " + "empty parent column vector")); } if (offset + nrows <= v.getRows()) { @@ -76,49 +76,51 @@ void vpSubColVector::init(vpColVector &v, const unsigned int &offset, const unsi rowNum = nrows; colNum = 1; - pRowNum = v.getRows(); - parent = &v; + m_pRowNum = v.getRows(); + m_parent = &v; if (rowPtrs) { free(rowPtrs); } - rowPtrs = (double **)malloc(parent->getRows() * sizeof(double *)); + rowPtrs = (double **)malloc(m_parent->getRows() * sizeof(double *)); for (unsigned int i = 0; i < nrows; i++) rowPtrs[i] = v.data + i + offset; dsize = rowNum; - } else { + } + else { throw(vpException(vpException::dimensionError, "Cannot create a sub-column vector that is not " - "completely contained in the parrent column vector")); + "completely contained in the parent column vector")); } } -//! Destructor that set the pointer to the parrent column vector to NULL. +/*! + * Destructor that set the pointer to the parent column vector to NULL. + */ vpSubColVector::~vpSubColVector() { data = NULL; } /*! - This method can be used to detect if the parent column vector - always exits or its size have not changed. - If this not the case an exception is thrown. -*/ + * This method can be used to detect if the parent column vector + * always exits or its size have not changed. + * If this not the case an exception is thrown. + */ void vpSubColVector::checkParentStatus() const { if (!data) { throw(vpException(vpException::fatalError, "The parent of the current sub-column vector has been destroyed")); } - if (pRowNum != parent->getRows()) { + if (m_pRowNum != m_parent->getRows()) { throw(vpException(vpException::dimensionError, "The size of the parent sub-column vector has changed")); } } /*! - Allow to initialize a sub-column vector from an other one using operation A - = B. Notice that the sub-column vector is not resized to the dimension of \e - B. - - \param B : a sub-column vector. -*/ + * Allow to initialize a sub-column vector from an other one using operation A + * = B. Notice that the sub-column vector is not resized to the dimension of \e B. + * + * \param B : a sub-column vector. + */ vpSubColVector &vpSubColVector::operator=(const vpSubColVector &B) { if (rowNum != B.getRows()) { @@ -127,17 +129,18 @@ vpSubColVector &vpSubColVector::operator=(const vpSubColVector &B) "(%dx1) sub-column vector", rowNum, B.getRows())); } - pRowNum = B.pRowNum; + m_pRowNum = B.m_pRowNum; for (unsigned int i = 0; i < rowNum; i++) data[i] = B[i]; return *this; } /*! - Allow to initialize a sub-column vector from a column vector using operation - A = B. Notice that the sub-column vector is not resized to the dimension of - \e B. \param B : a column vector. -*/ + * Allow to initialize a sub-column vector from a column vector using operation + * A = B. Notice that the sub-column vector is not resized to the dimension of \e B. + * + * \param B : a column vector. + */ vpSubColVector &vpSubColVector::operator=(const vpColVector &B) { if (rowNum != B.getRows()) { @@ -154,10 +157,11 @@ vpSubColVector &vpSubColVector::operator=(const vpColVector &B) } /*! - Allow to initialize a sub-column vector from a m-by-1 matrix using operation - A = B. Notice that the sub-column vector is not resized to the dimension of - \e B. \param B : a matrix of size m-by-1. -*/ + * Allow to initialize a sub-column vector from a m-by-1 matrix using operation + * A = B. Notice that the sub-column vector is not resized to the dimension of \e B. + * + * \param B : a matrix of size m-by-1. + */ vpSubColVector &vpSubColVector::operator=(const vpMatrix &B) { if ((B.getCols() != 1) || (rowNum != B.getRows())) { @@ -171,9 +175,10 @@ vpSubColVector &vpSubColVector::operator=(const vpMatrix &B) } /*! - Set all the elements of the sub-column vector to \e x. - \param x : a scalar value. -*/ + * Set all the elements of the sub-column vector to \e x. + * + * \param x : a scalar value. + */ vpSubColVector &vpSubColVector::operator=(const double &x) { for (unsigned int i = 0; i < rowNum; i++) @@ -182,7 +187,7 @@ vpSubColVector &vpSubColVector::operator=(const double &x) } /*! - Operator that allows to convert a translation vector into a column vector. + * Operator that allows to convert a translation vector into a column vector. */ vpSubColVector &vpSubColVector::operator=(const vpTranslationVector &tv) { @@ -190,7 +195,8 @@ vpSubColVector &vpSubColVector::operator=(const vpTranslationVector &tv) if (rowNum != k) { try { resize(k); - } catch (...) { + } + catch (...) { throw; } } @@ -198,8 +204,9 @@ vpSubColVector &vpSubColVector::operator=(const vpTranslationVector &tv) memcpy(data, tv.data, rowNum * sizeof(double)); return *this; } + /*! - Operator that allows to convert a rotation vector into a column vector. + * Operator that allows to convert a rotation vector into a column vector. */ vpSubColVector &vpSubColVector::operator=(const vpRotationVector &rv) { @@ -207,7 +214,8 @@ vpSubColVector &vpSubColVector::operator=(const vpRotationVector &rv) if (rowNum != k) { try { resize(k); - } catch (...) { + } + catch (...) { throw; } } @@ -215,8 +223,9 @@ vpSubColVector &vpSubColVector::operator=(const vpRotationVector &rv) memcpy(data, rv.data, rowNum * sizeof(double)); return *this; } + /*! - Operator that allows to convert a pose vector into a column vector. + * Operator that allows to convert a pose vector into a column vector. */ vpSubColVector &vpSubColVector::operator=(const vpPoseVector &p) { @@ -224,7 +233,8 @@ vpSubColVector &vpSubColVector::operator=(const vpPoseVector &p) if (rowNum != k) { try { resize(k); - } catch (...) { + } + catch (...) { throw; } } diff --git a/modules/core/src/math/matrix/vpSubRowVector.cpp b/modules/core/src/math/matrix/vpSubRowVector.cpp index bb0b06f19d..6e64b314e0 100644 --- a/modules/core/src/math/matrix/vpSubRowVector.cpp +++ b/modules/core/src/math/matrix/vpSubRowVector.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Mask on a vpRowVector . - * - * Authors: - * Laneurit Jean - * -*****************************************************************************/ + */ #include @@ -42,32 +37,31 @@ #include //! Default constructor that creates an empty vector. -vpSubRowVector::vpSubRowVector() : vpRowVector(), pColNum(0), parent(NULL) {} +vpSubRowVector::vpSubRowVector() : vpRowVector(), m_pColNum(0), m_parent(NULL) { } /*! - Construct a sub-row vector from a parent row vector. - \param v : parent row vector. - \param offset : offset where the sub-row vector starts in the parent row - vector. \param ncols : size of the sub-row vector. -*/ + * Construct a sub-row vector from a parent row vector. + * \param v : parent row vector. + * \param offset : offset where the sub-row vector starts in the parent row vector. + * \param ncols : size of the sub-row vector. + */ vpSubRowVector::vpSubRowVector(vpRowVector &v, const unsigned int &offset, const unsigned int &ncols) - : vpRowVector(), pColNum(0), parent(NULL) + : vpRowVector(), m_pColNum(0), m_parent(NULL) { init(v, offset, ncols); } /*! - Initialize a sub-row vector from a parent row vector. - \param v : parent row vector. - \param offset : offset where the sub-row vector starts in the parent row - vector. \param ncols : size of the sub-row vector. -*/ + * Initialize a sub-row vector from a parent row vector. + * \param v : parent row vector. + * \param offset : offset where the sub-row vector starts in the parent row vector. + * \param ncols : size of the sub-row vector. + */ void vpSubRowVector::init(vpRowVector &v, const unsigned int &offset, const unsigned int &ncols) { if (!v.data) { throw(vpException(vpException::fatalError, "Cannot initialize a sub-row " - "vector from an empty parent " - "row vector")); + "vector from an empty parent row vector")); } if (offset + ncols <= v.getCols()) { @@ -76,8 +70,8 @@ void vpSubRowVector::init(vpRowVector &v, const unsigned int &offset, const unsi rowNum = 1; colNum = ncols; - pColNum = v.getCols(); - parent = &v; + m_pColNum = v.getCols(); + m_parent = &v; if (rowPtrs) free(rowPtrs); @@ -87,44 +81,47 @@ void vpSubRowVector::init(vpRowVector &v, const unsigned int &offset, const unsi rowPtrs[i] = v.data + i + offset; dsize = colNum; - } else { + } + else { throw(vpException(vpException::dimensionError, "Cannot create a sub-row vector that is not completely " - "contained in the parrent row vector")); + "contained in the parent row vector")); } } -//! Destructor that set the pointer to the parrent row vector to NULL. +/*! + * Destructor that set the pointer to the parent row vector to NULL. + */ vpSubRowVector::~vpSubRowVector() { data = NULL; } /*! - This method can be used to detect if the parent row vector - always exits or its size have not changed. - If this not the case an exception is thrown. -*/ + * This method can be used to detect if the parent row vector + * always exits or its size have not changed. + * If this not the case an exception is thrown. + */ void vpSubRowVector::checkParentStatus() const { if (!data) { throw(vpException(vpException::fatalError, "The parent of the current sub-row vector has been destroyed")); } - if (pColNum != parent->getCols()) { + if (m_pColNum != m_parent->getCols()) { throw(vpException(vpException::dimensionError, "The size of the parent sub-row vector has changed")); } } /*! - Allow to initialize a sub-row vector from an other one using operation A = - B. Notice that the sub-row vector is not resized to the dimension of \e B. - - \param B : a sub-row vector. -*/ + * Allow to initialize a sub-row vector from an other one using operation A = + * B. Notice that the sub-row vector is not resized to the dimension of \e B. + * + * \param B : a sub-row vector. + */ vpSubRowVector &vpSubRowVector::operator=(const vpSubRowVector &B) { if (colNum != B.getCols()) { throw(vpException(vpException::dimensionError, "Cannot initialize (1x%d) sub-row vector from (1x%d) sub-row vector", colNum, B.getCols())); } - pColNum = B.pColNum; - parent = B.parent; + m_pColNum = B.m_pColNum; + m_parent = B.m_parent; for (unsigned int i = 0; i < rowNum; i++) data[i] = B[i]; @@ -132,11 +129,11 @@ vpSubRowVector &vpSubRowVector::operator=(const vpSubRowVector &B) } /*! - Allow to initialize a sub-row vector from a row vector using operation A = - B. Notice that the sub-row vector is not resized to the dimension of \e B. - - \param B : a row vector. -*/ + * Allow to initialize a sub-row vector from a row vector using operation A = + * B. Notice that the sub-row vector is not resized to the dimension of \e B. + * + * \param B : a row vector. + */ vpSubRowVector &vpSubRowVector::operator=(const vpRowVector &B) { if (colNum != B.getCols()) { @@ -151,11 +148,11 @@ vpSubRowVector &vpSubRowVector::operator=(const vpRowVector &B) } /*! - Allow to initialize a sub-row vector from a matrix using operation A = B. - Notice that the sub-row vector is not resized to the dimension of \e B. - - \param B : a matrix of size 1-by-n. -*/ + * Allow to initialize a sub-row vector from a matrix using operation A = B. + * Notice that the sub-row vector is not resized to the dimension of \e B. + * + * \param B : a matrix of size 1-by-n. + */ vpSubRowVector &vpSubRowVector::operator=(const vpMatrix &B) { if ((B.getRows() != 1) || (colNum != B.getCols())) { @@ -167,10 +164,11 @@ vpSubRowVector &vpSubRowVector::operator=(const vpMatrix &B) data[i] = B[i][1]; return *this; } + /*! - Set all the elements of the sub-row vector to \e x. - \param x : a scalar value. -*/ + * Set all the elements of the sub-row vector to \e x. + * \param x : a scalar value. + */ vpSubRowVector &vpSubRowVector::operator=(const double &x) { for (unsigned int i = 0; i < rowNum; i++) diff --git a/modules/core/src/tools/network/vpClient.cpp b/modules/core/src/tools/network/vpClient.cpp index a8864d6140..55561dca5b 100644 --- a/modules/core/src/tools/network/vpClient.cpp +++ b/modules/core/src/tools/network/vpClient.cpp @@ -41,7 +41,7 @@ // inet_ntop() not supported on win XP #ifdef VISP_HAVE_FUNC_INET_NTOP -vpClient::vpClient() : vpNetwork(), numberOfAttempts(0) {} +vpClient::vpClient() : vpNetwork(), m_numberOfAttempts(0) { } /*! Disconnect the client from all the servers, and close the sockets. @@ -94,18 +94,18 @@ bool vpClient::connectToHostname(const std::string &hostname, const unsigned int serv.receptorIP = inet_ntoa(*(in_addr *)server->h_addr); return connectServer(serv); -} + } -/*! - Connect to the server represented by the given ip, and at a given port. + /*! + Connect to the server represented by the given ip, and at a given port. - \sa vpClient::connectToHostname() + \sa vpClient::connectToHostname() - \param ip : IP of the server. - \param port_serv : Port used for the connection. + \param ip : IP of the server. + \param port_serv : Port used for the connection. - \return True if the connection has been etablished, false otherwise. -*/ + \return True if the connection has been etablished, false otherwise. + */ bool vpClient::connectToIP(const std::string &ip, const unsigned int &port_serv) { vpNetwork::vpReceptor serv; @@ -127,13 +127,13 @@ bool vpClient::connectToIP(const std::string &ip, const unsigned int &port_serv) serv.receptorAddress.sin_port = htons((unsigned short)port_serv); return connectServer(serv); -} + } -/*! - Deconnect from the server at a specific index. + /*! + Deconnect from the server at a specific index. - \param index : Index of the server. -*/ + \param index : Index of the server. + */ void vpClient::deconnect(const unsigned int &index) { if (index < receptor_list.size()) { @@ -172,15 +172,15 @@ bool vpClient::connectServer(vpNetwork::vpReceptor &serv) { serv.receptorAddressSize = sizeof(serv.receptorAddress); - numberOfAttempts = 15; + m_numberOfAttempts = 15; unsigned int ind = 1; int connectionResult = -1; - while (ind <= numberOfAttempts) { + while (ind <= m_numberOfAttempts) { std::cout << "Attempt number " << ind << "..." << std::endl; connectionResult = - connect(serv.socketFileDescriptorReceptor, (sockaddr *)&serv.receptorAddress, serv.receptorAddressSize); + connect(serv.socketFileDescriptorReceptor, (sockaddr *)&serv.receptorAddress, serv.receptorAddressSize); if (connectionResult >= 0) break; @@ -204,7 +204,8 @@ bool vpClient::connectServer(vpNetwork::vpReceptor &serv) if (serv.socketFileDescriptorReceptor > 0) { int set_option = 1; if (0 == setsockopt(serv.socketFileDescriptorReceptor, SOL_SOCKET, SO_NOSIGPIPE, &set_option, sizeof(set_option))) { - } else { + } + else { std::cout << "Failed to set socket signal option" << std::endl; } } @@ -216,5 +217,5 @@ bool vpClient::connectServer(vpNetwork::vpReceptor &serv) #elif !defined(VISP_BUILD_SHARED_LIBS) // Work around to avoid warning: libvisp_core.a(vpClient.cpp.o) has no symbols -void dummy_vpClient(){}; +void dummy_vpClient() { }; #endif diff --git a/modules/core/src/tracking/forward-projection/vpCylinder.cpp b/modules/core/src/tracking/forward-projection/vpCylinder.cpp index 98a4596368..65b83613f7 100644 --- a/modules/core/src/tracking/forward-projection/vpCylinder.cpp +++ b/modules/core/src/tracking/forward-projection/vpCylinder.cpp @@ -120,11 +120,6 @@ vpCylinder::vpCylinder(double oA, double oB, double oC, double oX, double oY, do setWorldCoordinates(oA, oB, oC, oX, oY, oZ, R); } -/*! - Default constructor. - */ -vpCylinder::~vpCylinder() {} - /*! Perspective projection of the cylinder. diff --git a/modules/core/src/tracking/forward-projection/vpSphere.cpp b/modules/core/src/tracking/forward-projection/vpSphere.cpp index e4ccff3622..d915fe0638 100644 --- a/modules/core/src/tracking/forward-projection/vpSphere.cpp +++ b/modules/core/src/tracking/forward-projection/vpSphere.cpp @@ -109,11 +109,6 @@ vpSphere::vpSphere(double oX, double oY, double oZ, double R) setWorldCoordinates(oX, oY, oZ, R); } -/*! - * Destructor that does nothing. - */ -vpSphere::~vpSphere() {} - /*! * Perspective projection of the sphere. * This method updates internal parameters (cP and p). @@ -172,12 +167,14 @@ void vpSphere::projection(const vpColVector &cP_, vpColVector &p_) const E = e; A = a; B = b; - } else { + } + else { E = -1.0 / e; A = b; B = a; } - } else { + } + else { E = 0.0; A = r / sqrt(s); B = r * sqrt(y0 * y0 + z0 * z0 - r * r) / s; diff --git a/modules/core/src/tracking/moments/vpMomentCommon.cpp b/modules/core/src/tracking/moments/vpMomentCommon.cpp index 06cde95f3c..93082bcd30 100644 --- a/modules/core/src/tracking/moments/vpMomentCommon.cpp +++ b/modules/core/src/tracking/moments/vpMomentCommon.cpp @@ -54,7 +54,7 @@ vpMomentCommon::vpMomentCommon(double dstSurface, const std::vector &ref, double refAlpha, double dstZ, bool flg_sxsyfromnormalized) : vpMomentDatabase(), momentBasic(), momentGravity(), momentCentered(), momentGravityNormalized(), - momentSurfaceNormalized(dstSurface, dstZ), momentCInvariant(), momentAlpha(ref, refAlpha), momentArea() + momentSurfaceNormalized(dstSurface, dstZ), momentCInvariant(), momentAlpha(ref, refAlpha), momentArea() { momentCInvariant = new vpMomentCInvariant(flg_sxsyfromnormalized); @@ -69,59 +69,58 @@ vpMomentCommon::vpMomentCommon(double dstSurface, const std::vector &ref } /*! -Updates all moments in the database with the object and computes all their -values. This is possible because this particular database knows the link -between the moments it contains. The order of computation is as follows: -vpMomentGravityCenter,vpMomentCentered,vpMomentAlpha,vpMomentCInvariant,vpMomentSInvariant,vpMomentAreaNormalized,vpMomentGravityCenterNormalized -\param object : Moment object. - -Example of using a preconfigured database to compute one of the C-invariants: -\code -#include -#include -#include -#include -#include - -int main() -{ - // Define two discrete points - vpPoint p; - std::vector vec_p; // std::vector that contains the vertices of the contour polygon - - p.set_x(1); p.set_y(1); // coordinates in meters in the image plane (vertex 1) - vec_p.push_back(p); - p.set_x(2); p.set_y(2); // coordinates in meters in the image plane (vertex 2) - vec_p.push_back(p); - p.set_x(-3); - p.set_y(0); // coordinates in meters in the image plane (vertex 3) - vec_p.push_back(p); - p.set_x(-3); - p.set_y(1); // coordinates in meters in the image plane (vertex 4) - vec_p.push_back(p); - - vpMomentObject obj(5); // Object initialized up to order 5 to handle - // all computations required by vpMomentCInvariant - obj.setType(vpMomentObject::DENSE_POLYGON); // object is the inner part of a polygon - obj.fromstd::vector(vec_p); // Init the discrete object with two points - - //initialisation with default values - vpMomentCommon db(vpMomentCommon::getSurface(obj),vpMomentCommon::getMu3(obj),vpMomentCommon::getAlpha(obj),1.); - bool success; - - db.updateAll(obj); // Update AND compute all moments - - //get C-invariant - vpMomentCInvariant& C = static_cast(db.get("vpMomentCInvariant",success)); - if(success) { - std::cout << C.get(0) << std:: std::endl; - } else - std::cout << "vpMomentCInvariant not found." << std::endl; - - return 0; -} - -\endcode + Updates all moments in the database with the object and computes all their + values. This is possible because this particular database knows the link + between the moments it contains. The order of computation is as follows: + vpMomentGravityCenter,vpMomentCentered,vpMomentAlpha,vpMomentCInvariant,vpMomentSInvariant,vpMomentAreaNormalized,vpMomentGravityCenterNormalized + \param object : Moment object. + + Example of using a preconfigured database to compute one of the C-invariants: + \code + #include + #include + #include + #include + #include + + int main() + { + // Define two discrete points + vpPoint p; + std::vector vec_p; // std::vector that contains the vertices of the contour polygon + + p.set_x(1); p.set_y(1); // coordinates in meters in the image plane (vertex 1) + vec_p.push_back(p); + p.set_x(2); p.set_y(2); // coordinates in meters in the image plane (vertex 2) + vec_p.push_back(p); + p.set_x(-3); + p.set_y(0); // coordinates in meters in the image plane (vertex 3) + vec_p.push_back(p); + p.set_x(-3); + p.set_y(1); // coordinates in meters in the image plane (vertex 4) + vec_p.push_back(p); + + vpMomentObject obj(5); // Object initialized up to order 5 to handle + // all computations required by vpMomentCInvariant + obj.setType(vpMomentObject::DENSE_POLYGON); // object is the inner part of a polygon + obj.fromstd::vector(vec_p); // Init the discrete object with two points + + //initialisation with default values + vpMomentCommon db(vpMomentCommon::getSurface(obj),vpMomentCommon::getMu3(obj),vpMomentCommon::getAlpha(obj),1.); + bool success; + + db.updateAll(obj); // Update AND compute all moments + + //get C-invariant + vpMomentCInvariant& C = static_cast(db.get("vpMomentCInvariant",success)); + if(success) { + std::cout << C.get(0) << std:: std::endl; + } else + std::cout << "vpMomentCInvariant not found." << std::endl; + + return 0; + } + \endcode */ void vpMomentCommon::updateAll(vpMomentObject &object) { @@ -137,7 +136,8 @@ void vpMomentCommon::updateAll(vpMomentObject &object) momentGravityNormalized.compute(); momentArea.compute(); - } catch (const char *ex) { + } + catch (const char *ex) { std::cout << "exception:" << ex << std::endl; } } @@ -170,8 +170,8 @@ double vpMomentCommon::getSurface(vpMomentObject &object) } /*! -Gets a reference alpha of an object. -\param object : Moment object. + Gets a reference alpha of an object. + \param object : Moment object. */ double vpMomentCommon::getAlpha(vpMomentObject &object) { @@ -193,8 +193,8 @@ double vpMomentCommon::getAlpha(vpMomentObject &object) } /*! -Gets the reference 3rd order moments of an object. -\param object : Moment object. + Gets the reference 3rd order moments of an object. + \param object : Moment object. */ std::vector vpMomentCommon::getMu3(vpMomentObject &object) { diff --git a/modules/core/test/tools/xml/testXmlParser.cpp b/modules/core/test/tools/xml/testXmlParser.cpp index 1fc6af3d7f..5dbcecbc5f 100644 --- a/modules/core/test/tools/xml/testXmlParser.cpp +++ b/modules/core/test/tools/xml/testXmlParser.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,26 +29,24 @@ * * Description: * Example which describes how to use the xml parser class. - * -*****************************************************************************/ + */ /*! - \example testXmlParser.cpp - - XML parser example. - - This example contains the declaration of a class used to read and write data - in a xml file like: - \code - - 5.5 - 7 - 3 - Object - - \endcode - -*/ + * \example testXmlParser.cpp + * + * XML parser example. + * + * This example contains the declaration of a class used to read and write data + * in a xml file like: + * \code + * + * 5.5 + * 7 + * 3 + * Object + * + * \endcode + */ #include @@ -65,17 +62,11 @@ #ifndef DOXYGEN_SHOULD_SKIP_THIS -/* -------------------------------------------------------------------------- +/*! + * \class vpExampleDataParser + * \brief Class example used to show how to implement a xml parser based on the + * vpXmlParser */ - /* CLASS EXAMPLE */ - /* -------------------------------------------------------------------------- - */ - - /*! - \class vpExampleDataParser - \brief Class example used to show how to implement a xml parser based on the - vpXmlParser - */ class vpExampleDataParser : public vpXmlParser { protected: @@ -88,7 +79,6 @@ class vpExampleDataParser : public vpXmlParser public: vpExampleDataParser(); - virtual ~vpExampleDataParser(); // Data accessors. double getRange() const { return m_range; } @@ -107,11 +97,10 @@ class vpExampleDataParser : public vpXmlParser }; /*! - Constructor. - Initialise the map according to the data to parse, and initialise data to - default values. - -*/ + * Constructor. + * Initialise the map according to the data to parse, and initialise data to + * default values. + */ vpExampleDataParser::vpExampleDataParser() : m_range(0.), m_step(0), m_size_filter(0), m_name("") { nodeMap["config"] = config; @@ -122,19 +111,13 @@ vpExampleDataParser::vpExampleDataParser() : m_range(0.), m_step(0), m_size_filt } /*! - Destructor. - -*/ -vpExampleDataParser::~vpExampleDataParser() { } - -/*! - Read the main class. This method corresponds to the parsing of the main - document (which contains the whole data in the class). At this point, the - document exists and is open. - - \param doc : Pointer to the document to parse. - \param node : Pointer to the root node of the document. -*/ + * Read the main class. This method corresponds to the parsing of the main + * document (which contains the whole data in the class). At this point, the + * document exists and is open. + * + * \param doc : Pointer to the document to parse. + * \param node : Pointer to the root node of the document. + */ void vpExampleDataParser::readMainClass(xmlDocPtr doc, xmlNodePtr node) { for (xmlNodePtr dataNode = node->xmlChildrenNode; dataNode != NULL; dataNode = dataNode->next) { @@ -164,12 +147,12 @@ void vpExampleDataParser::readMainClass(xmlDocPtr doc, xmlNodePtr node) } /*! - Write the data in the file. - The file has already been opened or created in the save() method. And the - root node (corresponding to the main tag) has already been writen. - - \param node : Pointer to the root node. -*/ + * Write the data in the file. + * The file has already been opened or created in the save() method. And the + * root node (corresponding to the main tag) has already been written. + * + * \param node : Pointer to the root node. + */ void vpExampleDataParser::writeMainClass(xmlNodePtr node) { xmlWriteDoubleChild(node, (const char *)"range", m_range); @@ -180,11 +163,9 @@ void vpExampleDataParser::writeMainClass(xmlNodePtr node) #endif // doxygen -/* -------------------------------------------------------------------------- - */ - /* COMMAND LINE OPTIONS */ - /* -------------------------------------------------------------------------- - */ +/* -------------------------------------------------------------------------- */ +/* COMMAND LINE OPTIONS */ +/* -------------------------------------------------------------------------- */ // List of allowed command line options #define GETOPTARGS "cdo:h" @@ -193,14 +174,12 @@ void usage(const char *name, const char *badparam, const std::string &opath, con bool getOptions(int argc, const char **argv, std::string &opath, const std::string &user); /*! - -Print the program options. - -\param name : Program name. -\param badparam : Bad parameter name. -\param opath : Output image path. -\param user : Username. - + * Print the program options. + * + * \param name : Program name. + * \param badparam : Bad parameter name. + * \param opath : Output image path. + * \param user : Username. */ void usage(const char *name, const char *badparam, const std::string &opath, const std::string &user) { @@ -230,14 +209,14 @@ OPTIONS: Default\n\ } /*! - Set the program options. - - \param argc : Command line number of parameters. - \param argv : Array of command line parameters. - \param opath : Output data path. - \param user : Username. - \return false if the program has to be stopped, true otherwise. -*/ + * Set the program options. + * + * \param argc : Command line number of parameters. + * \param argv : Array of command line parameters. + * \param opath : Output data path. + * \param user : Username. + * \return false if the program has to be stopped, true otherwise. + */ bool getOptions(int argc, const char **argv, std::string &opath, const std::string &user) { const char *optarg_; @@ -275,11 +254,9 @@ bool getOptions(int argc, const char **argv, std::string &opath, const std::stri return true; } -/* -------------------------------------------------------------------------- - */ - /* MAIN FUNCTION */ - /* -------------------------------------------------------------------------- - */ +/* -------------------------------------------------------------------------- */ +/* MAIN FUNCTION */ +/* -------------------------------------------------------------------------- */ int main(int argc, const char **argv) { diff --git a/modules/detection/include/visp3/detection/vpDetectorAprilTag.h b/modules/detection/include/visp3/detection/vpDetectorAprilTag.h index 33d5748b08..0bb54ee988 100644 --- a/modules/detection/include/visp3/detection/vpDetectorAprilTag.h +++ b/modules/detection/include/visp3/detection/vpDetectorAprilTag.h @@ -47,176 +47,177 @@ #include /*! - \class vpDetectorAprilTag - \ingroup group_detection_tag - Base class for AprilTag detector. This class is a wrapper over AprilTag. There - is no need to download and install AprilTag from source code or existing - pre-built packages since the source code is embedded in ViSP. Reference papers - are AprilTag: A robust and flexible visual fiducial system - (\cite olson2011tags), AprilTag 2: Efficient and robust fiducial - detection (\cite wang2016iros) and Flexible Layouts for Fiducial Tags - (Under Review) (\cite krogius2019iros). - - The detect() function allows to detect multiple tags in an image. Once - detected, for each tag it is possible to retrieve the location of the corners - using getPolygon(), the encoded message using getMessage(), the bounding box - using getBBox() and the center of gravity using getCog(). - - If camera parameters and the size of the tag are provided, you can also estimate - the 3D pose of the tag in terms of position and orientation wrt the camera considering 2 cases: - - 1. If all the tags have the same size use - detect(const vpImage &, const double, const vpCameraParameters &, std::vector &) - - 2. If tag sizes differ, use rather getPose() - - The following sample code shows how to use this class to detect the location - of 36h11 AprilTag patterns in an image. -\code -#include -#include - -int main() -{ -#ifdef VISP_HAVE_APRILTAG - vpImage I; - vpImageIo::read(I, "image-tag36h11.pgm"); - - vpDetectorAprilTag detector(vpDetectorAprilTag::TAG_36h11); - - bool status = detector.detect(I); - if (status) { - for(size_t i=0; i < detector.getNbObjects(); i++) { - std::cout << "Tag code " << i << ":" << std::endl; - std::vector p = detector.getPolygon(i); - for(size_t j=0; j < p.size(); j++) - std::cout << " Point " << j << ": " << p[j] << std::endl; - std::cout << " Message: \"" << detector.getMessage(i) << "\"" << std::endl; - } - } -#endif -} - \endcode - - The previous example may produce results like: - \code -Tag code 0: - Point 0: 124.008, 442.226 - Point 1: 194.614, 441.237 - Point 2: 184.833, 540.386 - Point 3: 111.948, 533.634 - Message: "36h11 id: 0" -Tag code 1: - Point 0: 245.327, 438.801 - Point 1: 338.116, 437.221 - Point 2: 339.341, 553.539 - Point 3: 238.954, 543.855 - Message: "36h11 id: 1" - \endcode - - This other example shows how to estimate the 3D pose of 36h11 AprilTag - patterns considering that all the tags have the same size (in our example 0.053 m). -\code -#include -#include - -int main() -{ -#ifdef VISP_HAVE_APRILTAG - vpImage I; - vpImageIo::read(I, "image-tag36h11.pgm"); - - vpDetectorAprilTag detector(vpDetectorAprilTag::TAG_36h11); - std::vector cMo; - vpCameraParameters cam; - cam.initPersProjWithoutDistortion(615.1674805, 615.1675415, 312.1889954, 243.4373779); - double tagSize = 0.053; - - bool status = detector.detect(I, tagSize, cam, cMo); - if (status) { - for(size_t i=0; i < detector.getNbObjects(); i++) { - std::cout << "Tag number " << i << ":" << std::endl; - std::cout << " Message: \"" << detector.getMessage(i) << "\"" << std::endl; - std::cout << " Pose: " << vpPoseVector(cMo[i]).t() << std::endl; - std::size_t tag_id_pos = detector.getMessage(i).find("id: "); - if (tag_id_pos != std::string::npos) { - std::string tag_id = detector.getMessage(i).substr(tag_id_pos + 4); - std::cout << " Tag Id: " << tag_id << std::endl; - } - } - } -#endif -} - \endcode - The previous example may produce results like: - \code -Tag number 0: - Message: "36h11 id: 0" - Pose: 0.1015061088 -0.05239057228 0.3549037285 1.991474322 2.04143538 -0.9412360063 - Tag Id: 0 -Tag number 1: - Message: "36h11 id: 1" - Pose: 0.08951250829 0.02243780207 0.306540622 1.998073197 2.061488008 -0.8699567948 - Tag Id: 1 -\endcode - - In this other example we estimate the 3D pose of 36h11 AprilTag - patterns considering that tag 36h11 with id 0 (in that case the tag message is "36h11 id: 0") - has a size of 0.040 m, while all the others have a size of 0.053m. -\code -#include -#include - -int main() -{ -#ifdef VISP_HAVE_APRILTAG - vpImage I; - vpImageIo::read(I, "image-tag36h11.pgm"); - - vpDetectorAprilTag detector(vpDetectorAprilTag::TAG_36h11); - vpHomogeneousMatrix cMo; - vpCameraParameters cam; - cam.initPersProjWithoutDistortion(615.1674805, 615.1675415, 312.1889954, 243.4373779); - double tagSize_id_0 = 0.04; - double tagSize_id_others = 0.053; - - bool status = detector.detect(I); - if (status) { - for(size_t i=0; i < detector.getNbObjects(); i++) { - std::cout << "Tag code " << i << ":" << std::endl; - std::cout << " Message: \"" << detector.getMessage(i) << "\"" << std::endl; - if (detector.getMessage(i) == std::string("36h11 id: 0")) { - if (! detector.getPose(i, tagSize_id_0, cam, cMo)) { - std::cout << "Unable to get tag index " << i << " pose!" << std::endl; - } - } - else { - if (! detector.getPose(i, tagSize_id_others, cam, cMo)) { - std::cout << "Unable to get tag index " << i << " pose!" << std::endl; - } - } - std::cout << " Pose: " << vpPoseVector(cMo).t() << std::endl; - } - } -#endif -} -\endcode - With respect to the previous example, this example may now produce a different pose for tag with id 0: - \code -Tag code 0: - Message: "36h11 id: 0" - Pose: 0.07660838403 -0.03954005455 0.2678518706 1.991474322 2.04143538 -0.9412360063 -Tag code 1: - Message: "36h11 id: 1" - Pose: 0.08951250829 0.02243780207 0.306540622 1.998073197 2.061488008 -0.8699567948 -\endcode - - Other examples are also provided in tutorial-apriltag-detector.cpp and - tutorial-apriltag-detector-live.cpp -*/ + * \class vpDetectorAprilTag + * \ingroup group_detection_tag + * Base class for AprilTag detector. This class is a wrapper over AprilTag. There + * is no need to download and install AprilTag from source code or existing + * pre-built packages since the source code is embedded in ViSP. Reference papers + * are AprilTag: A robust and flexible visual fiducial system + * (\cite olson2011tags), AprilTag 2: Efficient and robust fiducial + * detection (\cite wang2016iros) and Flexible Layouts for Fiducial Tags + * (Under Review) (\cite krogius2019iros). + * + * The detect() function allows to detect multiple tags in an image. Once + * detected, for each tag it is possible to retrieve the location of the corners + * using getPolygon(), the encoded message using getMessage(), the bounding box + * using getBBox() and the center of gravity using getCog(). + * + * If camera parameters and the size of the tag are provided, you can also estimate + * the 3D pose of the tag in terms of position and orientation wrt the camera considering 2 cases: + * - 1. If all the tags have the same size use + * detect(const vpImage &, const double, const vpCameraParameters &, std::vector &) + * - 2. If tag sizes differ, use rather getPose() + * + * The following sample code shows how to use this class to detect the location + * of 36h11 AprilTag patterns in an image. + * \code + * #include + * #include + * + * int main() + * { + * #ifdef VISP_HAVE_APRILTAG + * vpImage I; + * vpImageIo::read(I, "image-tag36h11.pgm"); + * + * vpDetectorAprilTag detector(vpDetectorAprilTag::TAG_36h11); + * + * bool status = detector.detect(I); + * if (status) { + * for(size_t i=0; i < detector.getNbObjects(); i++) { + * std::cout << "Tag code " << i << ":" << std::endl; + * std::vector p = detector.getPolygon(i); + * for(size_t j=0; j < p.size(); j++) + * std::cout << " Point " << j << ": " << p[j] << std::endl; + * std::cout << " Message: \"" << detector.getMessage(i) << "\"" << std::endl; + * } + * } + * #endif + * } + * \endcode + * + * The previous example may produce results like: + * \code + * Tag code 0: + * Point 0: 124.008, 442.226 + * Point 1: 194.614, 441.237 + * Point 2: 184.833, 540.386 + * Point 3: 111.948, 533.634 + * Message: "36h11 id: 0" + * Tag code 1: + * Point 0: 245.327, 438.801 + * Point 1: 338.116, 437.221 + * Point 2: 339.341, 553.539 + * Point 3: 238.954, 543.855 + * Message: "36h11 id: 1" + * \endcode + * + * This other example shows how to estimate the 3D pose of 36h11 AprilTag + * patterns considering that all the tags have the same size (in our example 0.053 m). + * \code + * #include + * #include + * + * int main() + * { + * #ifdef VISP_HAVE_APRILTAG + * vpImage I; + * vpImageIo::read(I, "image-tag36h11.pgm"); + * + * vpDetectorAprilTag detector(vpDetectorAprilTag::TAG_36h11); + * std::vector cMo; + * vpCameraParameters cam; + * cam.initPersProjWithoutDistortion(615.1674805, 615.1675415, 312.1889954, 243.4373779); + * double tagSize = 0.053; + * + * bool status = detector.detect(I, tagSize, cam, cMo); + * if (status) { + * for(size_t i=0; i < detector.getNbObjects(); i++) { + * std::cout << "Tag number " << i << ":" << std::endl; + * std::cout << " Message: \"" << detector.getMessage(i) << "\"" << std::endl; + * std::cout << " Pose: " << vpPoseVector(cMo[i]).t() << std::endl; + * std::size_t tag_id_pos = detector.getMessage(i).find("id: "); + * if (tag_id_pos != std::string::npos) { + * std::string tag_id = detector.getMessage(i).substr(tag_id_pos + 4); + * std::cout << " Tag Id: " << tag_id << std::endl; + * } + * } + * } + * #endif + * } + * \endcode + * The previous example may produce results like: + * \code + * Tag number 0: + * Message: "36h11 id: 0" + * Pose: 0.1015061088 -0.05239057228 0.3549037285 1.991474322 2.04143538 -0.9412360063 + * Tag Id: 0 + * Tag number 1: + * Message: "36h11 id: 1" + * Pose: 0.08951250829 0.02243780207 0.306540622 1.998073197 2.061488008 -0.8699567948 + * Tag Id: 1 + * \endcode + * + * In this other example we estimate the 3D pose of 36h11 AprilTag + * patterns considering that tag 36h11 with id 0 (in that case the tag message is "36h11 id: 0") + * has a size of 0.040 m, while all the others have a size of 0.053m. + * \code + * #include + * #include + * + * int main() + * { + * #ifdef VISP_HAVE_APRILTAG + * vpImage I; + * vpImageIo::read(I, "image-tag36h11.pgm"); + * + * vpDetectorAprilTag detector(vpDetectorAprilTag::TAG_36h11); + * vpHomogeneousMatrix cMo; + * vpCameraParameters cam; + * cam.initPersProjWithoutDistortion(615.1674805, 615.1675415, 312.1889954, 243.4373779); + * double tagSize_id_0 = 0.04; + * double tagSize_id_others = 0.053; + * + * bool status = detector.detect(I); + * if (status) { + * for(size_t i=0; i < detector.getNbObjects(); i++) { + * std::cout << "Tag code " << i << ":" << std::endl; + * std::cout << " Message: \"" << detector.getMessage(i) << "\"" << std::endl; + * if (detector.getMessage(i) == std::string("36h11 id: 0")) { + * if (! detector.getPose(i, tagSize_id_0, cam, cMo)) { + * std::cout << "Unable to get tag index " << i << " pose!" << std::endl; + * } + * } + * else { + * if (! detector.getPose(i, tagSize_id_others, cam, cMo)) { + * std::cout << "Unable to get tag index " << i << " pose!" << std::endl; + * } + * } + * std::cout << " Pose: " << vpPoseVector(cMo).t() << std::endl; + * } + * } + * #endif + * } + * \endcode + * With respect to the previous example, this example may now produce a different pose for tag with id 0: + * \code + * Tag code 0: + * Message: "36h11 id: 0" + * Pose: 0.07660838403 -0.03954005455 0.2678518706 1.991474322 2.04143538 -0.9412360063 + * Tag code 1: + * Message: "36h11 id: 1" + * Pose: 0.08951250829 0.02243780207 0.306540622 1.998073197 2.061488008 -0.8699567948 + * \endcode + * + * Other examples are also provided in tutorial-apriltag-detector.cpp and + * tutorial-apriltag-detector-live.cpp + */ class VISP_EXPORT vpDetectorAprilTag : public vpDetectorBase { public: - enum vpAprilTagFamily { + enum vpAprilTagFamily + { TAG_36h11, ///< AprilTag 36h11 pattern (recommended) TAG_36h10, ///< DEPRECATED TAG_36ARTOOLKIT, ///< DEPRECATED AND WILL NOT DETECT ARTOOLKIT TAGS @@ -230,7 +231,8 @@ class VISP_EXPORT vpDetectorAprilTag : public vpDetectorBase TAG_STANDARD52h13 ///< AprilTag Standard52h13 pattern }; - enum vpPoseEstimationMethod { + enum vpPoseEstimationMethod + { HOMOGRAPHY, /*!< Pose from homography */ HOMOGRAPHY_VIRTUAL_VS, /*!< Non linear virtual visual servoing approach initialized by the homography approach */ @@ -248,9 +250,9 @@ class VISP_EXPORT vpDetectorAprilTag : public vpDetectorBase const vpPoseEstimationMethod &poseEstimationMethod = HOMOGRAPHY_VIRTUAL_VS); vpDetectorAprilTag(const vpDetectorAprilTag &o); vpDetectorAprilTag &operator=(vpDetectorAprilTag o); - virtual ~vpDetectorAprilTag(); + virtual ~vpDetectorAprilTag() override; - bool detect(const vpImage &I); + bool detect(const vpImage &I) override; bool detect(const vpImage &I, double tagSize, const vpCameraParameters &cam, std::vector &cMo_vec, std::vector *cMo_vec2 = NULL, std::vector *projErrors = NULL, std::vector *projErrors2 = NULL); @@ -269,8 +271,8 @@ class VISP_EXPORT vpDetectorAprilTag : public vpDetectorBase vpHomogeneousMatrix *cMo2 = NULL, double *projError = NULL, double *projError2 = NULL); /*! - Return the pose estimation method. - */ + * Return the pose estimation method. + */ inline vpPoseEstimationMethod getPoseEstimationMethod() const { return m_poseEstimationMethod; } std::vector > getTagsCorners() const; @@ -288,8 +290,10 @@ class VISP_EXPORT vpDetectorAprilTag : public vpDetectorBase void setAprilTagRefineEdges(bool refineEdges); void setAprilTagRefinePose(bool refinePose); - /*! Allow to enable the display of overlay tag information in the windows - * (vpDisplay) associated to the input image. */ + /*! + * Allow to enable the display of overlay tag information in the windows + * (vpDisplay) associated to the input image. + */ inline void setDisplayTag(bool display, const vpColor &color = vpColor::none, unsigned int thickness = 2) { m_displayTag = display; diff --git a/modules/detection/include/visp3/detection/vpDetectorBase.h b/modules/detection/include/visp3/detection/vpDetectorBase.h index 3f9b4e46c9..ceb1496a7c 100644 --- a/modules/detection/include/visp3/detection/vpDetectorBase.h +++ b/modules/detection/include/visp3/detection/vpDetectorBase.h @@ -46,17 +46,18 @@ #include /*! - \class vpDetectorBase - \ingroup group_detection_barcode group_detection_face - Base class for object detection. - - This class is a generic class that can be used to detect: - - bar codes like QRcodes of Data matrices. The example given in - tutorial-barcode-detector.cpp shows how to detect one or more bar codes in - an image. In tutorial-barcode-detector-live.cpp you will find an other - example that shows how to use this class to detect bar codes in images - acquired by a camera. - - faces. An example is provided in tutorial-face-detector-live.cpp. + * \class vpDetectorBase + * \ingroup group_detection_barcode group_detection_face + * Base class for object detection. + * + * This class is a generic class that can be used to detect: + * - bar codes like QRcodes of Data matrices using vpDetectorQRCode and vpDetectorDataMatrixCode classes respectively. + * The example given in tutorial-barcode-detector.cpp shows how to detect one or more bar codes in + * an image. In tutorial-barcode-detector-live.cpp you will find an other + * example that shows how to use this class to detect bar codes in images + * acquired by a camera. + * - AprilTags using vpDetectorAprilTag class + * - faces using vpDetectorFace. An example is provided in tutorial-face-detector-live.cpp. */ class VISP_EXPORT vpDetectorBase { @@ -68,19 +69,19 @@ class VISP_EXPORT vpDetectorBase public: /*! - Default constructor. + * Default constructor. */ vpDetectorBase(); vpDetectorBase(const vpDetectorBase &o); /*! - Default destructor. - */ - virtual ~vpDetectorBase() {} + * Default destructor. + */ + virtual ~vpDetectorBase() { } /*! - Detect objects in an image. - \param I : Image where to detect objects. - \return true if one or multiple objects are detected, false otherwise. + * Detect objects in an image. + * \param I : Image where to detect objects. + * \return true if one or multiple objects are detected, false otherwise. */ virtual bool detect(const vpImage &I) = 0; @@ -88,41 +89,43 @@ class VISP_EXPORT vpDetectorBase //@{ /*! - Return the bounding box of the ith object. + * Return the bounding box of the ith object. */ vpRect getBBox(size_t i) const; /*! - Return the center of gravity location of the ith object. + * Return the center of gravity location of the ith object. */ vpImagePoint getCog(size_t i) const; /*! - Returns the contained message of the ith object if there is one. + * Returns the contained message of the ith object if there is one. */ std::vector &getMessage() { return m_message; } /*! - Returns the contained message of the ith object if there is one. + * Returns the contained message of the ith object if there is one. */ std::string &getMessage(size_t i); /*! - Return the number of objects that are detected. - */ + * Return the number of objects that are detected. + */ size_t getNbObjects() const { return m_nb_objects; } /*! - Returns object container box as a vector of points. + * Returns object container box as a vector of points. */ std::vector > &getPolygon() { return m_polygon; } /*! - Returns ith object container box as a vector of points. + * Returns ith object container box as a vector of points. */ std::vector &getPolygon(size_t i); - /*! Set detector timeout in milli-seconds. When set to 0, there is no timeout. */ + /*! + * Set detector timeout in milli-seconds. When set to 0, there is no timeout. + */ inline void setTimeout(unsigned long timeout_ms) { m_timeout_ms = timeout_ms; } //@} diff --git a/modules/detection/include/visp3/detection/vpDetectorDNNOpenCV.h b/modules/detection/include/visp3/detection/vpDetectorDNNOpenCV.h index 09c8ef7603..4cdc12fb22 100644 --- a/modules/detection/include/visp3/detection/vpDetectorDNNOpenCV.h +++ b/modules/detection/include/visp3/detection/vpDetectorDNNOpenCV.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * DNN object detection using OpenCV DNN module. - * -*****************************************************************************/ + */ #ifndef _vpDetectorDNN_h_ #define _vpDetectorDNN_h_ @@ -56,27 +54,27 @@ using json = nlohmann::json; //! json namespace shortcut #endif /*! - \class vpDetectorDNNOpenCV - \ingroup group_detection_dnn - This class is a wrapper over the - OpenCV DNN module and specialized to handle object detection task. - - This class supports the following networks dedicated to object detection: - - - Faster-RCNN, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_faster_rcnn network - - SSD MobileNet, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_mobilenet_ssd network - - ResNet 10, see usage for \ref dnn_usecase_face_detection - - Yolo v3, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_yolov3 network - - Yolo v4, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_yolov4 network - - Yolo v5, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_yolov5 network - - Yolo v7, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_yolov7 network - - Yolo v8, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_yolov8 network - - This class can be initialized from a JSON file if ViSP has been compiled with NLOHMANN JSON (see \ref soft_tool_json to see how to do it). - Examples of such JSON files can be found in the tutorial folder. - - \sa \ref tutorial-detection-dnn -*/ + * \class vpDetectorDNNOpenCV + * \ingroup group_detection_dnn + * This class is a wrapper over the + * OpenCV DNN module and specialized to handle object detection task. + * + * This class supports the following networks dedicated to object detection: + * + * - Faster-RCNN, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_faster_rcnn network + * - SSD MobileNet, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_mobilenet_ssd network + * - ResNet 10, see usage for \ref dnn_usecase_face_detection + * - Yolo v3, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_yolov3 network + * - Yolo v4, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_yolov4 network + * - Yolo v5, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_yolov5 network + * - Yolo v7, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_yolov7 network + * - Yolo v8, see usage to detect objects belonging to the COCO dataset using \ref dnn_supported_yolov8 network + * + * This class can be initialized from a JSON file if ViSP has been compiled with NLOHMANN JSON (see \ref soft_tool_json to see how to do it). + * Examples of such JSON files can be found in the tutorial folder. + * + * \sa \ref tutorial-detection-dnn + */ class VISP_EXPORT vpDetectorDNNOpenCV { public: diff --git a/modules/detection/include/visp3/detection/vpDetectorDataMatrixCode.h b/modules/detection/include/visp3/detection/vpDetectorDataMatrixCode.h index ef8993ffff..a7005566e3 100644 --- a/modules/detection/include/visp3/detection/vpDetectorDataMatrixCode.h +++ b/modules/detection/include/visp3/detection/vpDetectorDataMatrixCode.h @@ -48,71 +48,69 @@ #include /*! - \class vpDetectorDataMatrixCode - \ingroup group_detection_barcode - Base class for bar code detector. This class is a wrapper over libdmtx - available from http://www.libdmtx.org. Installation instructions are - provided here https://visp.inria.fr/3rd_dmtx. - - The detect() function allows to detect multiple QR codes in an image. Once - detected, for each QR code it is possible to retrieve the location of the - corners using getPolygon(), the encoded message using getMessage(), the - bounding box using getBBox() and the center of gravity using getCog(). - - The following sample code shows how to use this class to detect QR codes in - an image. -\code -#include -#include - -int main() -{ -#ifdef VISP_HAVE_DMTX - vpImage I; - vpImageIo::read(I, "bar-code.pgm"); - - vpDetectorDataMatrixCode detector; - - bool status = detector.detect(I); - if (status) { - for(size_t i=0; i < detector.getNbObjects(); i++) { - std::cout << "Bar code " << i << ":" << std::endl; - std::vector p = detector.getPolygon(i); - for(size_t j=0; j < p.size(); j++) - std::cout << " Point " << j << ": " << p[j] << std::endl; - std::cout << " Message: \"" << detector.getMessage(i) << "\"" << std::endl; - } - } -#endif -} - \endcode - - The previous example may produce results like: - \code -Bar code 0: - Point 0: 273.21, 78.9799 - Point 1: 390.016, 85.1014 - Point 2: 388.024, 199.185 - Point 3: 269.23, 192.96 - Message: "datamatrix 1" -Bar code 1: - Point 0: 262.23, 396.404 - Point 1: 381.041, 402.631 - Point 2: 378.92, 524.188 - Point 3: 257.916, 519.962 - Message: "datamatrix 2" - \endcode - - Other examples are also provided in tutorial-barcode-detector.cpp and - tutorial-barcode-detector-live.cpp - + * \class vpDetectorDataMatrixCode + * \ingroup group_detection_barcode + * Base class for bar code detector. This class is a wrapper over libdmtx + * available from http://www.libdmtx.org. Installation instructions are + * provided here https://visp.inria.fr/3rd_dmtx. + * + * The detect() function allows to detect multiple QR codes in an image. Once + * detected, for each QR code it is possible to retrieve the location of the + * corners using getPolygon(), the encoded message using getMessage(), the + * bounding box using getBBox() and the center of gravity using getCog(). + * + * The following sample code shows how to use this class to detect QR codes in + * an image. + * \code + * #include + * #include + * + * int main() + * { + * #ifdef VISP_HAVE_DMTX + * vpImage I; + * vpImageIo::read(I, "bar-code.pgm"); + * + * vpDetectorDataMatrixCode detector; + * + * bool status = detector.detect(I); + * if (status) { + * for(size_t i=0; i < detector.getNbObjects(); i++) { + * std::cout << "Bar code " << i << ":" << std::endl; + * std::vector p = detector.getPolygon(i); + * for(size_t j=0; j < p.size(); j++) + * std::cout << " Point " << j << ": " << p[j] << std::endl; + * std::cout << " Message: \"" << detector.getMessage(i) << "\"" << std::endl; + * } + * } + * #endif + * } + * \endcode + * + * The previous example may produce results like: + * \code + * Bar code 0: + * Point 0: 273.21, 78.9799 + * Point 1: 390.016, 85.1014 + * Point 2: 388.024, 199.185 + * Point 3: 269.23, 192.96 + * Message: "datamatrix 1" + * Bar code 1: + * Point 0: 262.23, 396.404 + * Point 1: 381.041, 402.631 + * Point 2: 378.92, 524.188 + * Point 3: 257.916, 519.962 + * Message: "datamatrix 2" + * \endcode + * + * Other examples are also provided in tutorial-barcode-detector.cpp and + * tutorial-barcode-detector-live.cpp */ class VISP_EXPORT vpDetectorDataMatrixCode : public vpDetectorBase { public: vpDetectorDataMatrixCode(); - virtual ~vpDetectorDataMatrixCode(){}; - bool detect(const vpImage &I); + bool detect(const vpImage &I) override; }; #endif diff --git a/modules/detection/include/visp3/detection/vpDetectorFace.h b/modules/detection/include/visp3/detection/vpDetectorFace.h index 2a2fae5b45..313aab4d9a 100644 --- a/modules/detection/include/visp3/detection/vpDetectorFace.h +++ b/modules/detection/include/visp3/detection/vpDetectorFace.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Detect faces. - * -*****************************************************************************/ + */ #ifndef _vpDetectorFace_h_ #define _vpDetectorFace_h_ @@ -49,40 +47,40 @@ #include /*! - \class vpDetectorFace - \ingroup group_detection_face - The vpDetectorFace class is a wrapper over OpenCV Haar cascade face - detection capabilities. To use this class ViSP should be build against - OpenCV 2.2.0 or a more recent version. Installation instructions are provided - here https://visp.inria.fr/3rd_opencv. - - The following sample code shows how to use this class to detect the largest - face in the image. The cascade classifier file - "haarcascade_frontalface_alt.xml" can be found in ViSP source code or in - OpenCV. -\code -#include - -int main() -{ -#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_OBJDETECT) - vpImage I; - vpDetectorFace face_detector; - face_detector.setCascadeClassifierFile("haarcascade_frontalface_alt.xml"); - - while(1) { - // Acquire a new image in I - bool face_found = face_detector.detect(I); - if (face_found) { - vpRect face_bbox = face_detector.getBBox(0); // largest face has index 0 - } - } -#endif -} - \endcode - - A more complete example that works with images acquired from a camera is - provided in tutorial-face-detector-live.cpp. + * \class vpDetectorFace + * \ingroup group_detection_face + * The vpDetectorFace class is a wrapper over OpenCV Haar cascade face + * detection capabilities. To use this class ViSP should be build against + * OpenCV 2.2.0 or a more recent version. Installation instructions are provided + * here https://visp.inria.fr/3rd_opencv. + * + * The following sample code shows how to use this class to detect the largest + * face in the image. The cascade classifier file + * "haarcascade_frontalface_alt.xml" can be found in ViSP source code or in + * OpenCV. + * \code + * #include + * + * int main() + * { + * #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_OBJDETECT) + * vpImage I; + * vpDetectorFace face_detector; + * face_detector.setCascadeClassifierFile("haarcascade_frontalface_alt.xml"); + * + * while(1) { + * // Acquire a new image in I + * bool face_found = face_detector.detect(I); + * if (face_found) { + * vpRect face_bbox = face_detector.getBBox(0); // largest face has index 0 + * } + * } + * #endif + * } + * \endcode + * + * A more complete example that works with images acquired from a camera is + * provided in tutorial-face-detector-live.cpp. */ class VISP_EXPORT vpDetectorFace : public vpDetectorBase { @@ -93,12 +91,8 @@ class VISP_EXPORT vpDetectorFace : public vpDetectorBase public: vpDetectorFace(); - /*! - Default destructor. - */ - virtual ~vpDetectorFace() { } - bool detect(const vpImage &I); + bool detect(const vpImage &I) override; bool detect(const cv::Mat &frame_gray); void setCascadeClassifierFile(const std::string &filename); }; diff --git a/modules/detection/include/visp3/detection/vpDetectorQRCode.h b/modules/detection/include/visp3/detection/vpDetectorQRCode.h index 9650c0ff83..a2942cee5a 100644 --- a/modules/detection/include/visp3/detection/vpDetectorQRCode.h +++ b/modules/detection/include/visp3/detection/vpDetectorQRCode.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Base class for bar code detection. - * -*****************************************************************************/ + */ #ifndef _vpDetectorQRCode_h_ #define _vpDetectorQRCode_h_ @@ -50,63 +48,63 @@ #include /*! - \class vpDetectorQRCode - \ingroup group_detection_barcode - Base class for bar code detector. This class is a wrapper over libzbar - available from http://zbar.sourceforge.net. Installation instructions are -provided here https://visp.inria.fr/3rd_zbar. - - The detect() function allows to detect multiple QR codes in an image. Once -detected, for each QR code it is possible to retrieve the location of the -corners using getPolygon(), the encoded message using getMessage(), the -bounding box using getBBox() and the center of gravity using getCog(). - - The following sample code shows how to use this class to detect QR codes in -an image. -\code -#include -#include - -int main() -{ -#ifdef VISP_HAVE_ZBAR - vpImage I; - vpImageIo::read(I, "bar-code.pgm"); - - vpDetectorQRCode detector; - - bool status = detector.detect(I); - if (status) { - for(size_t i=0; i < detector.getNbObjects(); i++) { - std::cout << "Bar code " << i << ":" << std::endl; - std::vector p = detector.getPolygon(i); - for(size_t j=0; j < p.size(); j++) - std::cout << " Point " << j << ": " << p[j] << std::endl; - std::cout << " Message: \"" << detector.getMessage(i) << "\"" << std::endl; - } - } -#endif -} - \endcode - - The previous example may produce results like: - \code -Bar code 0: - Point 0: 48, 212 - Point 1: 57, 84 - Point 2: 188, 92 - Point 3: 183, 220 - Message: "qrcode 2" -Bar code 1: - Point 0: 26, 550 - Point 1: 35, 409 - Point 2: 174, 414 - Point 3: 167, 555 - Message: "qrcode 1" - \endcode - - Other examples are also provided in tutorial-barcode-detector.cpp and - tutorial-barcode-detector-live.cpp + * \class vpDetectorQRCode + * \ingroup group_detection_barcode + * Base class for bar code detector. This class is a wrapper over libzbar + * available from http://zbar.sourceforge.net. Installation instructions are + * provided here https://visp.inria.fr/3rd_zbar. + * + * The detect() function allows to detect multiple QR codes in an image. Once + * detected, for each QR code it is possible to retrieve the location of the + * corners using getPolygon(), the encoded message using getMessage(), the + * bounding box using getBBox() and the center of gravity using getCog(). + * + * The following sample code shows how to use this class to detect QR codes in + * an image. + * \code + * #include + * #include + * + * int main() + * { + * #ifdef VISP_HAVE_ZBAR + * vpImage I; + * vpImageIo::read(I, "bar-code.pgm"); + * + * vpDetectorQRCode detector; + * + * bool status = detector.detect(I); + * if (status) { + * for(size_t i=0; i < detector.getNbObjects(); i++) { + * std::cout << "Bar code " << i << ":" << std::endl; + * std::vector p = detector.getPolygon(i); + * for(size_t j=0; j < p.size(); j++) + * std::cout << " Point " << j << ": " << p[j] << std::endl; + * std::cout << " Message: \"" << detector.getMessage(i) << "\"" << std::endl; + * } + * } + * #endif + * } + * \endcode + * + * The previous example may produce results like: + * \code + * Bar code 0: + * Point 0: 48, 212 + * Point 1: 57, 84 + * Point 2: 188, 92 + * Point 3: 183, 220 + * Message: "qrcode 2" + * Bar code 1: + * Point 0: 26, 550 + * Point 1: 35, 409 + * Point 2: 174, 414 + * Point 3: 167, 555 + * Message: "qrcode 1" + * \endcode + * + * Other examples are also provided in tutorial-barcode-detector.cpp and + * tutorial-barcode-detector-live.cpp */ class VISP_EXPORT vpDetectorQRCode : public vpDetectorBase { @@ -115,8 +113,7 @@ class VISP_EXPORT vpDetectorQRCode : public vpDetectorBase public: vpDetectorQRCode(); - virtual ~vpDetectorQRCode() {} - bool detect(const vpImage &I); + bool detect(const vpImage &I) override; }; #endif diff --git a/modules/gui/include/visp3/gui/vpD3DRenderer.h b/modules/gui/include/visp3/gui/vpD3DRenderer.h index 6826dc71d3..06dcb884fc 100644 --- a/modules/gui/include/visp3/gui/vpD3DRenderer.h +++ b/modules/gui/include/visp3/gui/vpD3DRenderer.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * D3D renderer for windows 32 display - * - * Authors: - * Bruno Renier - * -*****************************************************************************/ + */ #ifndef DOXYGEN_SHOULD_SKIP_THIS @@ -56,15 +51,13 @@ #include /*! - \class vpD3DRenderer.h - - \brief Display under windows using Direct3D9. - Is used by vpDisplayD3D to do the drawing. - -*/ + * \class vpD3DRenderer.h + * + * \brief Display under windows using Direct3D9. + * Is used by vpDisplayD3D to do the drawing. + */ class VISP_EXPORT vpD3DRenderer : public vpWin32Renderer { - IDirect3D9 *pD3D; // The d3d device we will be working with. @@ -87,7 +80,7 @@ class VISP_EXPORT vpD3DRenderer : public vpWin32Renderer // The window's handle. HWND hWnd; - // Colors for overlay drawn with d3d directly. + // Colors for overlay drawn with d3d directly. unsigned long colors[vpColor::id_unknown]; // Colors for overlay drawn with GDI. @@ -101,7 +94,7 @@ class VISP_EXPORT vpD3DRenderer : public vpWin32Renderer bool render(); vpD3DRenderer(); - virtual ~vpD3DRenderer(); + virtual ~vpD3DRenderer() override; void setImg(const vpImage &im); void setImg(const vpImage &im); @@ -128,7 +121,7 @@ class VISP_EXPORT vpD3DRenderer : public vpWin32Renderer void drawArrow(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int w, unsigned int h, unsigned int thickness = 1); - void getImage(vpImage &I); + void getImage(vpImage &I) override; private: void initView(float, float); diff --git a/modules/gui/include/visp3/gui/vpDisplayD3D.h b/modules/gui/include/visp3/gui/vpDisplayD3D.h index 169a3c945c..b2cde98f8b 100644 --- a/modules/gui/include/visp3/gui/vpDisplayD3D.h +++ b/modules/gui/include/visp3/gui/vpDisplayD3D.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Windows 32 display using D3D - * - * Authors: - * Bruno Renier - * -*****************************************************************************/ + */ #include #include @@ -46,62 +41,62 @@ #include /*! - \class vpDisplayD3D - - \ingroup group_gui_display - - \brief Display for windows using Direct3D 3rd party. Thus to enable this -class Direct3D should be installed. Installation instructions are provided -here https://visp.inria.fr/3rd_d3d/ - - Direct3D is part of the DirectX API available under Windows - operating systems. - - \warning Requires DirectX9 SDK to compile and DirectX9 DLLs to run. - - The example below shows how to display an image with this video device. - \code -#include -#include - -int main() -{ -#if defined(VISP_HAVE_D3D9) - vpImage I; // Grey level image - - // Read an image in PGM P5 format - vpImageIo::read(I, "C:/Temp/visp-images/Klimt/Klimt.pgm"); - - vpDisplayD3D d; - - // Initialize the display with the image I. Display and image are - // now link together. - d.init(I); - - // Specify the window location - vpDisplay::setWindowPosition(I, 400, 100); - - // Set the display window title - vpDisplay::setTitle(I, "My Direct 3D display"); - - // Set the display background with image I content - vpDisplay::display(I); - - // Draw a red rectangle in the display overlay (foreground) - vpImagePoint topLeftCorner; - topLeftCorner.set_i(10); - topLeftCorner.set_j(20); - vpDisplay::displayRectangle(I, topLeftCorner, 100, 20, vpColor::red, true); - - // Flush the foreground and background display - vpDisplay::flush(I); - - // Wait for a click in the display window - vpDisplay::getClick(I); -#endif -} - \endcode -*/ + * \class vpDisplayD3D + * + * \ingroup group_gui_display + * + * \brief Display for windows using Direct3D 3rd party. Thus to enable this + * class Direct3D should be installed. Installation instructions are provided + * here https://visp.inria.fr/3rd_d3d/ + * + * Direct3D is part of the DirectX API available under Windows + * operating systems. + * + * \warning Requires DirectX9 SDK to compile and DirectX9 DLLs to run. + * + * The example below shows how to display an image with this video device. + * \code + * #include + * #include + * + * int main() + * { + * #if defined(VISP_HAVE_D3D9) + * vpImage I; // Grey level image + * + * // Read an image in PGM P5 format + * vpImageIo::read(I, "C:/Temp/visp-images/Klimt/Klimt.pgm"); + * + * vpDisplayD3D d; + * + * // Initialize the display with the image I. Display and image are + * // now link together. + * d.init(I); + * + * // Specify the window location + * vpDisplay::setWindowPosition(I, 400, 100); + * + * // Set the display window title + * vpDisplay::setTitle(I, "My Direct 3D display"); + * + * // Set the display background with image I content + * vpDisplay::display(I); + * + * // Draw a red rectangle in the display overlay (foreground) + * vpImagePoint topLeftCorner; + * topLeftCorner.set_i(10); + * topLeftCorner.set_j(20); + * vpDisplay::displayRectangle(I, topLeftCorner, 100, 20, vpColor::red, true); + * + * // Flush the foreground and background display + * vpDisplay::flush(I); + * + * // Wait for a click in the display window + * vpDisplay::getClick(I); + * #endif + * } + * \endcode + */ class VISP_EXPORT vpDisplayD3D : public vpDisplayWin32 { public: @@ -114,7 +109,6 @@ class VISP_EXPORT vpDisplayD3D : public vpDisplayWin32 vpDisplayD3D(vpImage &I, int winx = -1, int winy = -1, const std::string &title = "", vpScaleType type = SCALE_DEFAULT); - virtual ~vpDisplayD3D(); }; #endif #endif diff --git a/modules/gui/include/visp3/gui/vpDisplayGDI.h b/modules/gui/include/visp3/gui/vpDisplayGDI.h index 883ef2ff1d..597b893753 100644 --- a/modules/gui/include/visp3/gui/vpDisplayGDI.h +++ b/modules/gui/include/visp3/gui/vpDisplayGDI.h @@ -46,84 +46,84 @@ #include /*! - \class vpDisplayGDI - - \ingroup group_gui_display - - \brief Display for windows using GDI (available on any windows 32 platform). - - GDI stands for Graphics Device Interface and is a core component of -Microsoft Windows operating systems used for displaying graphics in a window. - - The example below shows how to display an image with this video device. - \code -#include -#include - -int main() -{ -#if defined(VISP_HAVE_GDI) - vpImage I; // Grey level image - - // Read an image in PGM P5 format -#ifdef _WIN32 - vpImageIo::read(I, "C:/Temp/visp-images/Klimt/Klimt.pgm"); -#else - vpImageIo::read(I, "/local/soft/ViSP/ViSP-images/Klimt/Klimt.pgm"); -#endif - - vpDisplayGDI d; - - // Initialize the display with the image I. Display and image are - // now link together. - d.init(I); - - // Specify the window location - vpDisplay::setWindowPosition(I, 400, 100); - - // Set the display window title - vpDisplay::setTitle(I, "My GDI display"); - - // Set the display background with image I content - vpDisplay::display(I); - - // Draw a red rectangle in the display overlay (foreground) - vpDisplay::displayRectangle(I, 10, 10, 100, 20, vpColor::red, true); - - // Draw a red rectangle in the display overlay (foreground) - vpImagePoint topLeftCorner; - topLeftCorner.set_i(50); - topLeftCorner.set_j(10); - vpDisplay::displayRectangle(I, topLeftCorner, 100, 20, vpColor::green, true); - - // Flush the foreground and background display - vpDisplay::flush(I); - - // Get non blocking keyboard events - std::cout << "Check keyboard events..." << std::endl; - char key[10]; - bool ret; - for (int i=0; i< 200; i++) { - bool ret = vpDisplay::getKeyboardEvent(I, key, false); - if (ret) - std::cout << "keyboard event: key: " << "\"" << key << "\"" << std::endl; - vpTime::wait(40); - } - - // Get a blocking keyboard event - std::cout << "Wait for a keyboard event..." << std::endl; - ret = vpDisplay::getKeyboardEvent(I, key, true); - std::cout << "keyboard event: " << ret << std::endl; - if (ret) - std::cout << "key: " << "\"" << key << "\"" << std::endl; - - // Wait for a click in the display window - std::cout << "Wait for a button click..." << std::endl; - vpDisplay::getClick(I); -#endif -} - \endcode -*/ + * \class vpDisplayGDI + * + * \ingroup group_gui_display + * + * \brief Display for windows using GDI (available on any windows 32 platform). + * + * GDI stands for Graphics Device Interface and is a core component of + * Microsoft Windows operating systems used for displaying graphics in a window. + * + * The example below shows how to display an image with this video device. + * \code + * #include + * #include + * + * int main() + * { + * #if defined(VISP_HAVE_GDI) + * vpImage I; // Grey level image + * + * // Read an image in PGM P5 format + * #ifdef _WIN32 + * vpImageIo::read(I, "C:/Temp/visp-images/Klimt/Klimt.pgm"); + * #else + * vpImageIo::read(I, "/local/soft/ViSP/ViSP-images/Klimt/Klimt.pgm"); + * #endif + * + * vpDisplayGDI d; + * + * // Initialize the display with the image I. Display and image are + * // now link together. + * d.init(I); + * + * // Specify the window location + * vpDisplay::setWindowPosition(I, 400, 100); + * + * // Set the display window title + * vpDisplay::setTitle(I, "My GDI display"); + * + * // Set the display background with image I content + * vpDisplay::display(I); + * + * // Draw a red rectangle in the display overlay (foreground) + * vpDisplay::displayRectangle(I, 10, 10, 100, 20, vpColor::red, true); + * + * // Draw a red rectangle in the display overlay (foreground) + * vpImagePoint topLeftCorner; + * topLeftCorner.set_i(50); + * topLeftCorner.set_j(10); + * vpDisplay::displayRectangle(I, topLeftCorner, 100, 20, vpColor::green, true); + * + * // Flush the foreground and background display + * vpDisplay::flush(I); + * + * // Get non blocking keyboard events + * std::cout << "Check keyboard events..." << std::endl; + * char key[10]; + * bool ret; + * for (int i=0; i< 200; i++) { + * bool ret = vpDisplay::getKeyboardEvent(I, key, false); + * if (ret) + * std::cout << "keyboard event: key: " << "\"" << key << "\"" << std::endl; + * vpTime::wait(40); + * } + * + * // Get a blocking keyboard event + * std::cout << "Wait for a keyboard event..." << std::endl; + * ret = vpDisplay::getKeyboardEvent(I, key, true); + * std::cout << "keyboard event: " << ret << std::endl; + * if (ret) + * std::cout << "key: " << "\"" << key << "\"" << std::endl; + * + * // Wait for a click in the display window + * std::cout << "Wait for a button click..." << std::endl; + * vpDisplay::getClick(I); + * #endif + * } + * \endcode + */ class VISP_EXPORT vpDisplayGDI : public vpDisplayWin32 { public: @@ -135,8 +135,6 @@ class VISP_EXPORT vpDisplayGDI : public vpDisplayWin32 vpDisplayGDI(vpImage &I, vpScaleType type); vpDisplayGDI(vpImage &I, int winx = -1, int winy = -1, const std::string &title = "", vpScaleType type = SCALE_DEFAULT); - - virtual ~vpDisplayGDI(); }; #endif diff --git a/modules/gui/include/visp3/gui/vpDisplayGTK.h b/modules/gui/include/visp3/gui/vpDisplayGTK.h index b0fcda5406..cef6d6611a 100644 --- a/modules/gui/include/visp3/gui/vpDisplayGTK.h +++ b/modules/gui/include/visp3/gui/vpDisplayGTK.h @@ -127,7 +127,8 @@ class VISP_EXPORT vpDisplayGTK : public vpDisplay { private: - typedef enum { + typedef enum + { id_black = 0, id_white, id_lightGray, @@ -159,66 +160,66 @@ class VISP_EXPORT vpDisplayGTK : public vpDisplay vpDisplayGTK(vpImage &I, int win_x = -1, int win_y = -1, const std::string &win_title = "", vpScaleType type = SCALE_DEFAULT); - virtual ~vpDisplayGTK(); + virtual ~vpDisplayGTK() override; - void getImage(vpImage &I); + void getImage(vpImage &I) override; unsigned int getScreenDepth(); - unsigned int getScreenHeight(); - void getScreenSize(unsigned int &screen_width, unsigned int &screen_height); - unsigned int getScreenWidth(); + unsigned int getScreenHeight() override; + void getScreenSize(unsigned int &screen_width, unsigned int &screen_height) override; + unsigned int getScreenWidth() override; - void init(vpImage &I, int win_x = -1, int win_y = -1, const std::string &win_title = ""); - void init(vpImage &I, int win_x = -1, int win_y = -1, const std::string &win_title = ""); + void init(vpImage &I, int win_x = -1, int win_y = -1, const std::string &win_title = "") override; + void init(vpImage &I, int win_x = -1, int win_y = -1, const std::string &win_title = "") override; void init(unsigned int win_width, unsigned int win_height, int win_x = -1, int win_y = -1, - const std::string &win_title = ""); + const std::string &win_title = "") override; protected: - void setFont(const std::string &fontname); - void setTitle(const std::string &win_title); - void setWindowPosition(int win_x, int win_y); + void setFont(const std::string &fontname) override; + void setTitle(const std::string &win_title) override; + void setWindowPosition(int win_x, int win_y) override; - void clearDisplay(const vpColor &color = vpColor::white); + void clearDisplay(const vpColor &color = vpColor::white) override; - void closeDisplay(); + void closeDisplay() override; void displayArrow(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color = vpColor::white, - unsigned int w = 4, unsigned int h = 2, unsigned int thickness = 1); - void displayCharString(const vpImagePoint &ip, const char *text, const vpColor &color = vpColor::green); + unsigned int w = 4, unsigned int h = 2, unsigned int thickness = 1) override; + void displayCharString(const vpImagePoint &ip, const char *text, const vpColor &color = vpColor::green) override; void displayCircle(const vpImagePoint ¢er, unsigned int radius, const vpColor &color, bool fill = false, - unsigned int thickness = 1); - void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness = 1); + unsigned int thickness = 1) override; + void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness = 1) override; void displayDotLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, - unsigned int thickness = 1); + unsigned int thickness = 1) override; - void displayImage(const vpImage &I); - void displayImage(const vpImage &I); + void displayImage(const vpImage &I) override; + void displayImage(const vpImage &I) override; void displayImage(const unsigned char *I); void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, - unsigned int height); - void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, unsigned int height); + unsigned int height) override; + void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, unsigned int height) override; - void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness = 1); + void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness = 1) override; - void displayPoint(const vpImagePoint &ip, const vpColor &color, unsigned int thickness = 1); + void displayPoint(const vpImagePoint &ip, const vpColor &color, unsigned int thickness = 1) override; void displayRectangle(const vpImagePoint &topLeft, unsigned int width, unsigned int height, const vpColor &color, - bool fill = false, unsigned int thickness = 1); + bool fill = false, unsigned int thickness = 1) override; void displayRectangle(const vpImagePoint &topLeft, const vpImagePoint &bottomRight, const vpColor &color, - bool fill = false, unsigned int thickness = 1); - void displayRectangle(const vpRect &rectangle, const vpColor &color, bool fill = false, unsigned int thickness = 1); - - void flushDisplay(); - void flushDisplayROI(const vpImagePoint &iP, unsigned int width, unsigned int height); - - bool getClick(bool blocking = true); - bool getClick(vpImagePoint &ip, bool blocking = true); - bool getClick(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true); - bool getClickUp(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true); - bool getKeyboardEvent(bool blocking = true); - bool getKeyboardEvent(std::string &key, bool blocking = true); - bool getPointerMotionEvent(vpImagePoint &ip); - bool getPointerPosition(vpImagePoint &ip); + bool fill = false, unsigned int thickness = 1) override; + void displayRectangle(const vpRect &rectangle, const vpColor &color, bool fill = false, unsigned int thickness = 1) override; + + void flushDisplay() override; + void flushDisplayROI(const vpImagePoint &iP, unsigned int width, unsigned int height) override; + + bool getClick(bool blocking = true) override; + bool getClick(vpImagePoint &ip, bool blocking = true) override; + bool getClick(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true) override; + bool getClickUp(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true) override; + bool getKeyboardEvent(bool blocking = true) override; + bool getKeyboardEvent(std::string &key, bool blocking = true) override; + bool getPointerMotionEvent(vpImagePoint &ip) override; + bool getPointerPosition(vpImagePoint &ip) override; private: // Implementation diff --git a/modules/gui/include/visp3/gui/vpDisplayOpenCV.h b/modules/gui/include/visp3/gui/vpDisplayOpenCV.h index 538cf5a176..1fdff217ee 100644 --- a/modules/gui/include/visp3/gui/vpDisplayOpenCV.h +++ b/modules/gui/include/visp3/gui/vpDisplayOpenCV.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Image display. - * -*****************************************************************************/ + */ #ifndef _vpDisplayOpenCV_h_ #define _vpDisplayOpenCV_h_ @@ -52,92 +50,90 @@ #include /*! - \file vpDisplayOpenCV.h - \brief Define the OpenCV console to display images. -*/ + * \file vpDisplayOpenCV.h + * \brief Define the OpenCV console to display images. + */ /*! - - \class vpDisplayOpenCV - - \ingroup group_gui_display - - \brief The vpDisplayOpenCV allows to display image using the OpenCV library. - Thus to enable this class OpenCV should be installed. Installation - instructions are provided here https://visp.inria.fr/3rd_opencv. - - \warning Since ViSP 3.3.1 or higher we introduce the alpha channel support for color - transparency. This new feature is only supported yet using vpDisplayOpenCV. See vpColor - header documentation and displayOpenCV.cpp example for usage displaying filled - transparent circles and rectangles. - - The example below shows how to display an image with this video device. - \code -#include -#include -#include - -int main() -{ -#if defined(VISP_HAVE_OPENCV) - vpImage I; // Grey level image - - // Read an image in PGM P5 format - vpImageIo::read(I, "/local/soft/ViSP/ViSP-images/Klimt/Klimt.pgm"); - - vpDisplayOpenCV d; - - // Initialize the display with the image I. Display and image are - // now link together. - d.init(I); - - // Specify the window location - vpDisplay::setWindowPosition(I, 400, 100); - - // Set the display window title - vpDisplay::setTitle(I, "My OpenCV display"); - - // Set the display background with image I content - vpDisplay::display(I); - - // Draw a red rectangle in the display overlay (foreground) - vpDisplay::displayRectangle(I, 10, 10, 100, 20, vpColor::red, true); - - // Draw a red rectangle in the display overlay (foreground) - vpImagePoint topLeftCorner; - topLeftCorner.set_i(10); - topLeftCorner.set_j(50); - vpDisplay::displayRectangle(I, topLeftCorner, 100, 20, vpColor::green, true); - - // Flush the foreground and background display - vpDisplay::flush(I); - - // Get non blocking keyboard events - std::cout << "Check keyboard events..." << std::endl; - char key[10]; - bool ret; - for (int i=0; i< 200; i++) { - bool ret = vpDisplay::getKeyboardEvent(I, key, false); - if (ret) - std::cout << "keyboard event: key: " << "\"" << key << "\"" << std::endl; - vpTime::wait(40); - } - - // Get a blocking keyboard event - std::cout << "Wait for a keyboard event..." << std::endl; - ret = vpDisplay::getKeyboardEvent(I, key, true); - std::cout << "keyboard event: " << ret << std::endl; - if (ret) - std::cout << "key: " << "\"" << key << "\"" << std::endl; - - // Wait for a click in the display window - std::cout << "Wait for a button click..." << std::endl; - vpDisplay::getClick(I); -#endif -} - \endcode -*/ - + * \class vpDisplayOpenCV + * + * \ingroup group_gui_display + * + * \brief The vpDisplayOpenCV allows to display image using the OpenCV library. + * Thus to enable this class OpenCV should be installed. Installation + * instructions are provided here https://visp.inria.fr/3rd_opencv. + * + * \warning Since ViSP 3.3.1 or higher we introduce the alpha channel support for color + * transparency. This new feature is only supported yet using vpDisplayOpenCV. See vpColor + * header documentation and displayOpenCV.cpp example for usage displaying filled + * transparent circles and rectangles. + * + * The example below shows how to display an image with this video device. + * \code + * #include + * #include + * #include + * + * int main() + * { + * #if defined(VISP_HAVE_OPENCV) + * vpImage I; // Grey level image + * + * // Read an image in PGM P5 format + * vpImageIo::read(I, "/local/soft/ViSP/ViSP-images/Klimt/Klimt.pgm"); + * + * vpDisplayOpenCV d; + * + * // Initialize the display with the image I. Display and image are + * // now link together. + * d.init(I); + * + * // Specify the window location + * vpDisplay::setWindowPosition(I, 400, 100); + * + * // Set the display window title + * vpDisplay::setTitle(I, "My OpenCV display"); + * + * // Set the display background with image I content + * vpDisplay::display(I); + * + * // Draw a red rectangle in the display overlay (foreground) + * vpDisplay::displayRectangle(I, 10, 10, 100, 20, vpColor::red, true); + * + * // Draw a red rectangle in the display overlay (foreground) + * vpImagePoint topLeftCorner; + * topLeftCorner.set_i(10); + * topLeftCorner.set_j(50); + * vpDisplay::displayRectangle(I, topLeftCorner, 100, 20, vpColor::green, true); + * + * // Flush the foreground and background display + * vpDisplay::flush(I); + * + * // Get non blocking keyboard events + * std::cout << "Check keyboard events..." << std::endl; + * char key[10]; + * bool ret; + * for (int i=0; i< 200; i++) { + * bool ret = vpDisplay::getKeyboardEvent(I, key, false); + * if (ret) + * std::cout << "keyboard event: key: " << "\"" << key << "\"" << std::endl; + * vpTime::wait(40); + * } + * + * // Get a blocking keyboard event + * std::cout << "Wait for a keyboard event..." << std::endl; + * ret = vpDisplay::getKeyboardEvent(I, key, true); + * std::cout << "keyboard event: " << ret << std::endl; + * if (ret) + * std::cout << "key: " << "\"" << key << "\"" << std::endl; + * + * // Wait for a click in the display window + * std::cout << "Wait for a button click..." << std::endl; + * vpDisplay::getClick(I); + * #endif + * } + * \endcode + */ class VISP_EXPORT vpDisplayOpenCV : public vpDisplay { private: @@ -208,66 +204,66 @@ class VISP_EXPORT vpDisplayOpenCV : public vpDisplay vpDisplayOpenCV(vpImage &I, int winx = -1, int winy = -1, const std::string &title = "", vpScaleType type = SCALE_DEFAULT); - virtual ~vpDisplayOpenCV(); + virtual ~vpDisplayOpenCV() override; - void getImage(vpImage &I); - unsigned int getScreenHeight(); - void getScreenSize(unsigned int &width, unsigned int &height); - unsigned int getScreenWidth(); + void getImage(vpImage &I) override; + unsigned int getScreenHeight() override; + void getScreenSize(unsigned int &width, unsigned int &height) override; + unsigned int getScreenWidth() override; - void init(vpImage &I, int winx = -1, int winy = -1, const std::string &title = ""); - void init(vpImage &I, int winx = -1, int winy = -1, const std::string &title = ""); - void init(unsigned int width, unsigned int height, int winx = -1, int winy = -1, const std::string &title = ""); + void init(vpImage &I, int winx = -1, int winy = -1, const std::string &title = "") override; + void init(vpImage &I, int winx = -1, int winy = -1, const std::string &title = "") override; + void init(unsigned int width, unsigned int height, int winx = -1, int winy = -1, const std::string &title = "") override; protected: - void setFont(const std::string &font); - void setTitle(const std::string &title); - void setWindowPosition(int winx, int winy); + void setFont(const std::string &font) override; + void setTitle(const std::string &title) override; + void setWindowPosition(int winx, int winy) override; - void clearDisplay(const vpColor &color = vpColor::white); + void clearDisplay(const vpColor &color = vpColor::white) override; - void closeDisplay(); + void closeDisplay() override; void displayArrow(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color = vpColor::white, - unsigned int w = 4, unsigned int h = 2, unsigned int thickness = 1); + unsigned int w = 4, unsigned int h = 2, unsigned int thickness = 1) override; - void displayCharString(const vpImagePoint &ip, const char *text, const vpColor &color = vpColor::green); + void displayCharString(const vpImagePoint &ip, const char *text, const vpColor &color = vpColor::green) override; void displayCircle(const vpImagePoint ¢er, unsigned int radius, const vpColor &color, bool fill = false, - unsigned int thickness = 1); - void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness = 1); + unsigned int thickness = 1) override; + void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness = 1) override; void displayDotLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, - unsigned int thickness = 1); + unsigned int thickness = 1) override; - void displayImage(const vpImage &I); - void displayImage(const vpImage &I); + void displayImage(const vpImage &I) override; + void displayImage(const vpImage &I) override; void displayImage(const unsigned char *I); void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, - unsigned int height); - void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, unsigned int height); + unsigned int height) override; + void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, unsigned int height) override; - void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness = 1); - void displayPoint(const vpImagePoint &ip, const vpColor &color, unsigned int thickness = 1); + void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness = 1) override; + void displayPoint(const vpImagePoint &ip, const vpColor &color, unsigned int thickness = 1) override; void displayRectangle(const vpImagePoint &topLeft, unsigned int width, unsigned int height, const vpColor &color, - bool fill = false, unsigned int thickness = 1); + bool fill = false, unsigned int thickness = 1) override; void displayRectangle(const vpImagePoint &topLeft, const vpImagePoint &bottomRight, const vpColor &color, - bool fill = false, unsigned int thickness = 1); - void displayRectangle(const vpRect &rectangle, const vpColor &color, bool fill = false, unsigned int thickness = 1); + bool fill = false, unsigned int thickness = 1) override; + void displayRectangle(const vpRect &rectangle, const vpColor &color, bool fill = false, unsigned int thickness = 1) override; - void flushDisplay(); - void flushDisplayROI(const vpImagePoint &iP, unsigned int width, unsigned int height); + void flushDisplay() override; + void flushDisplayROI(const vpImagePoint &iP, unsigned int width, unsigned int height) override; - bool getClick(bool blocking = true); - bool getClick(vpImagePoint &ip, bool blocking = true); - bool getClick(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true); - bool getClickUp(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true); + bool getClick(bool blocking = true) override; + bool getClick(vpImagePoint &ip, bool blocking = true) override; + bool getClick(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true) override; + bool getClickUp(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true) override; - bool getKeyboardEvent(bool blocking = true); - bool getKeyboardEvent(std::string &key, bool blocking = true); - bool getPointerMotionEvent(vpImagePoint &ip); - bool getPointerPosition(vpImagePoint &ip); + bool getKeyboardEvent(bool blocking = true) override; + bool getKeyboardEvent(std::string &key, bool blocking = true) override; + bool getPointerMotionEvent(vpImagePoint &ip) override; + bool getPointerPosition(vpImagePoint &ip) override; static void on_mouse(int event, int x, int y, int flags, void *param); diff --git a/modules/gui/include/visp3/gui/vpDisplayWin32.h b/modules/gui/include/visp3/gui/vpDisplayWin32.h index 92a1689f71..3d3c31f0fc 100644 --- a/modules/gui/include/visp3/gui/vpDisplayWin32.h +++ b/modules/gui/include/visp3/gui/vpDisplayWin32.h @@ -58,10 +58,11 @@ #ifndef DOXYGEN_SHOULD_SKIP_THIS /*! - Used to pass parameters to the window's thread. -*/ -struct threadParam { - //! Pointer to the display associated with the window. + * Used to pass parameters to the window's thread. + */ +struct threadParam +{ +//! Pointer to the display associated with the window. vpDisplayWin32 *vpDisp; //! X position of the window. @@ -82,17 +83,15 @@ struct threadParam { #endif /* DOXYGEN_SHOULD_SKIP_THIS */ /*! - \class vpDisplayWin32 - - \brief Base abstract class for Windows 32 displays. - Implements the window creation in a separate thread - and the associated event handling functions for - Windows 32 displays. - Uses calls to a renderer to do some display. - (i.e. all display methods are implemented in the renderer) - - \author Bruno Renier -*/ + * \class vpDisplayWin32 + * + * \brief Base abstract class for Windows 32 displays. + * Implements the window creation in a separate thread + * and the associated event handling functions for + * Windows 32 displays. + * Uses calls to a renderer to do some display. + * (i.e. all display methods are implemented in the renderer) + */ class VISP_EXPORT vpDisplayWin32 : public vpDisplay { protected: @@ -124,74 +123,72 @@ class VISP_EXPORT vpDisplayWin32 : public vpDisplay vpDisplayWin32(vpImage &I, int winx = -1, int winy = -1, const std::string &title = ""); - virtual ~vpDisplayWin32(); + virtual ~vpDisplayWin32() override; - void clearDisplay(const vpColor &color = vpColor::white); - void closeDisplay(); - void displayImage(const vpImage &I); - void displayImage(const vpImage &I); + void clearDisplay(const vpColor &color = vpColor::white) override; + void closeDisplay() override; + void displayImage(const vpImage &I) override; + void displayImage(const vpImage &I) override; void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, - unsigned int height); - void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, unsigned int height); + unsigned int height) override; + void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, unsigned int height) override; - void flushDisplay(); - void flushDisplayROI(const vpImagePoint &iP, unsigned int width, unsigned int height); + void flushDisplay() override; + void flushDisplayROI(const vpImagePoint &iP, unsigned int width, unsigned int height) override; - void getImage(vpImage &I); - unsigned int getScreenHeight(); - void getScreenSize(unsigned int &width, unsigned int &height); - unsigned int getScreenWidth(); + void getImage(vpImage &I) override; + unsigned int getScreenHeight() override; + void getScreenSize(unsigned int &width, unsigned int &height) override; + unsigned int getScreenWidth() override; - void init(vpImage &I, int winx = -1, int winy = -1, const std::string &title = ""); - void init(vpImage &I, int winx = -1, int winy = -1, const std::string &title = ""); - void init(unsigned int width, unsigned int height, int winx = -1, int winy = -1, const std::string &title = ""); + void init(vpImage &I, int winx = -1, int winy = -1, const std::string &title = "") override; + void init(vpImage &I, int winx = -1, int winy = -1, const std::string &title = "") override; + void init(unsigned int width, unsigned int height, int winx = -1, int winy = -1, const std::string &title = "") override; - void setFont(const std::string &fontname); + void setFont(const std::string &fontname) override; void setDownScalingFactor(unsigned int scale) { window.setScale(scale); m_scale = scale; } void setDownScalingFactor(vpScaleType scaleType) { m_scaleType = scaleType; } - void setTitle(const std::string &windowtitle); - void setWindowPosition(int winx, int winy); + void setTitle(const std::string &windowtitle) override; + void setWindowPosition(int winx, int winy) override; protected: void displayArrow(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color = vpColor::white, - unsigned int w = 4, unsigned int h = 2, unsigned int thickness = 1); + unsigned int w = 4, unsigned int h = 2, unsigned int thickness = 1) override; - void displayCharString(const vpImagePoint &ip, const char *text, const vpColor &color = vpColor::green); + void displayCharString(const vpImagePoint &ip, const char *text, const vpColor &color = vpColor::green) override; void displayCircle(const vpImagePoint ¢er, unsigned int radius, const vpColor &color, bool fill = false, - unsigned int thickness = 1); + unsigned int thickness = 1) override; - void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness = 1); + void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness = 1); override - void displayDotLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, - unsigned int thickness = 1); + void displayDotLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, + unsigned int thickness = 1) override; - void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness = 1); + void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness = 1) override; - void displayPoint(const vpImagePoint &ip, const vpColor &color, unsigned int thickness = 1); + void displayPoint(const vpImagePoint &ip, const vpColor &color, unsigned int thickness = 1) override; void displayRectangle(const vpImagePoint &topLeft, unsigned int width, unsigned int height, const vpColor &color, - bool fill = false, unsigned int thickness = 1); + bool fill = false, unsigned int thickness = 1) override; void displayRectangle(const vpImagePoint &topLeft, const vpImagePoint &bottomRight, const vpColor &color, - bool fill = false, unsigned int thickness = 1); - void displayRectangle(const vpRect &rectangle, const vpColor &color, bool fill = false, unsigned int thickness = 1); - - bool getClick(bool blocking = true); - - bool getClick(vpImagePoint &ip, bool blocking = true); - - bool getClick(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true); - - bool getClickUp(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true); - bool getKeyboardEvent(bool blocking = true); - bool getKeyboardEvent(std::string &key, bool blocking); - bool getPointerMotionEvent(vpImagePoint &ip); - bool getPointerPosition(vpImagePoint &ip); + bool fill = false, unsigned int thickness = 1) override; + void displayRectangle(const vpRect &rectangle, const vpColor &color, bool fill = false, unsigned int thickness = 1) override; + + bool getClick(bool blocking = true) override; + bool getClick(vpImagePoint &ip, bool blocking = true) override; + bool getClick(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true) override; + bool getClickUp(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true) override; + + bool getKeyboardEvent(bool blocking = true) override; + bool getKeyboardEvent(std::string &key, bool blocking) override; + bool getPointerMotionEvent(vpImagePoint &ip) override; + bool getPointerPosition(vpImagePoint &ip) override; void waitForInit(); }; diff --git a/modules/gui/include/visp3/gui/vpDisplayX.h b/modules/gui/include/visp3/gui/vpDisplayX.h index 554a93a550..feaadfc8bb 100644 --- a/modules/gui/include/visp3/gui/vpDisplayX.h +++ b/modules/gui/include/visp3/gui/vpDisplayX.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Image display. - * -*****************************************************************************/ + */ #ifndef vpDisplayX_h #define vpDisplayX_h @@ -156,69 +154,69 @@ class VISP_EXPORT vpDisplayX : public vpDisplay vpDisplayX(vpImage &I, int winx = -1, int winy = -1, const std::string &title = "", vpScaleType type = SCALE_DEFAULT); - virtual ~vpDisplayX(); + virtual ~vpDisplayX() override; - void getImage(vpImage &I); + void getImage(vpImage &I) override; unsigned int getScreenDepth(); - unsigned int getScreenHeight(); - void getScreenSize(unsigned int &width, unsigned int &height); - unsigned int getScreenWidth(); + unsigned int getScreenHeight() override; + void getScreenSize(unsigned int &width, unsigned int &height) override; + unsigned int getScreenWidth() override; - void init(vpImage &I, int win_x = -1, int win_y = -1, const std::string &win_title = ""); - void init(vpImage &I, int win_x = -1, int win_y = -1, const std::string &win_title = ""); + void init(vpImage &I, int win_x = -1, int win_y = -1, const std::string &win_title = "") override; + void init(vpImage &I, int win_x = -1, int win_y = -1, const std::string &win_title = "") override; void init(unsigned int win_width, unsigned int win_height, int win_x = -1, int win_y = -1, - const std::string &win_title = ""); + const std::string &win_title = "") override; protected: - void clearDisplay(const vpColor &color = vpColor::white); + void clearDisplay(const vpColor &color = vpColor::white) override; - void closeDisplay(); + void closeDisplay() override; void displayArrow(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color = vpColor::white, - unsigned int w = 4, unsigned int h = 2, unsigned int thickness = 1); + unsigned int w = 4, unsigned int h = 2, unsigned int thickness = 1) override; - void displayCharString(const vpImagePoint &ip, const char *text, const vpColor &color = vpColor::green); + void displayCharString(const vpImagePoint &ip, const char *text, const vpColor &color = vpColor::green) override; void displayCircle(const vpImagePoint ¢er, unsigned int radius, const vpColor &color, bool fill = false, - unsigned int thickness = 1); - void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness = 1); + unsigned int thickness = 1) override; + void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness = 1) override; void displayDotLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, - unsigned int thickness = 1); + unsigned int thickness = 1) override; - void displayImage(const vpImage &I); - void displayImage(const vpImage &I); + void displayImage(const vpImage &I) override; + void displayImage(const vpImage &I) override; void displayImage(const unsigned char *I); void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, - unsigned int height); - void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, unsigned int height); + unsigned int height) override; + void displayImageROI(const vpImage &I, const vpImagePoint &iP, unsigned int width, unsigned int height) override; - void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness = 1); - void displayPoint(const vpImagePoint &ip, const vpColor &color, unsigned int thickness = 1); + void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness = 1) override; + void displayPoint(const vpImagePoint &ip, const vpColor &color, unsigned int thickness = 1) override; void displayRectangle(const vpImagePoint &topLeft, unsigned int width, unsigned int height, const vpColor &color, - bool fill = false, unsigned int thickness = 1); + bool fill = false, unsigned int thickness = 1) override; void displayRectangle(const vpImagePoint &topLeft, const vpImagePoint &bottomRight, const vpColor &color, - bool fill = false, unsigned int thickness = 1); - void displayRectangle(const vpRect &rectangle, const vpColor &color, bool fill = false, unsigned int thickness = 1); + bool fill = false, unsigned int thickness = 1) override; + void displayRectangle(const vpRect &rectangle, const vpColor &color, bool fill = false, unsigned int thickness = 1) override; - void flushDisplay(); - void flushDisplayROI(const vpImagePoint &iP, unsigned int width, unsigned int height); + void flushDisplay() override; + void flushDisplayROI(const vpImagePoint &iP, unsigned int width, unsigned int height) override; - bool getClick(bool blocking = true); - bool getClick(vpImagePoint &ip, bool blocking = true); - bool getClick(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true); - bool getClickUp(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true); + bool getClick(bool blocking = true) override; + bool getClick(vpImagePoint &ip, bool blocking = true) override; + bool getClick(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true) override; + bool getClickUp(vpImagePoint &ip, vpMouseButton::vpMouseButtonType &button, bool blocking = true) override; - bool getKeyboardEvent(bool blocking = true); - bool getKeyboardEvent(std::string &key, bool blocking = true); + bool getKeyboardEvent(bool blocking = true) override; + bool getKeyboardEvent(std::string &key, bool blocking = true) override; - bool getPointerMotionEvent(vpImagePoint &ip); - bool getPointerPosition(vpImagePoint &ip); + bool getPointerMotionEvent(vpImagePoint &ip) override; + bool getPointerPosition(vpImagePoint &ip) override; - void setFont(const std::string &font); - void setTitle(const std::string &title); - void setWindowPosition(int win_x, int win_y); + void setFont(const std::string &font) override; + void setTitle(const std::string &title) override; + void setWindowPosition(int win_x, int win_y) override; private: // Implementation diff --git a/modules/gui/include/visp3/gui/vpGDIRenderer.h b/modules/gui/include/visp3/gui/vpGDIRenderer.h index 6b82fdd76e..203cc6d065 100644 --- a/modules/gui/include/visp3/gui/vpGDIRenderer.h +++ b/modules/gui/include/visp3/gui/vpGDIRenderer.h @@ -110,7 +110,7 @@ class VISP_EXPORT vpGDIRenderer : public vpWin32Renderer void drawArrow(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int w, unsigned int h, unsigned int thickness = 1); - void getImage(vpImage &I); + void getImage(vpImage &I) override; private: // updates the renderer hbitmaps. diff --git a/modules/gui/include/visp3/gui/vpPlot.h b/modules/gui/include/visp3/gui/vpPlot.h index ac3e57e1ee..0ec40b738f 100644 --- a/modules/gui/include/visp3/gui/vpPlot.h +++ b/modules/gui/include/visp3/gui/vpPlot.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Plot curves. - * -*****************************************************************************/ + */ /*! \file vpPlot.h @@ -45,70 +43,68 @@ #include #include -/*! - \class vpPlot - \ingroup group_gui_plotter - - \brief This class enables real time drawing of 2D or 3D graphics. An - instance of the class open a window which contains between 1 and 4 - graphics. Each one contains a desired number of curves. - - \warning This class is only available if one of the display functionalities - (X11, GDI, GTK, OpenCV or Direct3D) is available. In visp3/core/vpConfig.h - header file, you should have VISP_HAVE_DISPLAY define. - - The example below shows how to use the vpPlot class. An other example - provided in tutorial-ibvs-plotter.cpp and described in \ref tutorial-plotter - shows how to use this class to plot in real-time some curves during an - image-based visual servo. - - \code -#include - -int main () -{ #if defined(VISP_HAVE_DISPLAY) - // Create a window (700 by 700) at position (100, 200) with two graphics - vpPlot A(2, 700, 700, 100, 200, "Curves..."); - - // The first graphic contains 1 curve and the second graphic contains 2 curves - A.initGraph(0,1); A.initGraph(1,2); - - // The color of the curve in the first graphic is red - A.setColor(0,0,vpColor::red); - // The first curve in the second graphic is green - A.setColor(1,0,vpColor::green); - // The second curve in the second graphic is blue - A.setColor(1,1,vpColor::blue); - - // Add the point (0,0) in the first graphic - A.plot(0,0,0,0); - - // Add the point (0,1) to the first curve of the second graphic - A.plot(1,0,0,1); - - // Add the point (0,2) to the second curve of the second graphic - A.plot(1,1,0,2); - - for (int i = 0; i < 50; i++) { - // Add the point (i,sin(i*pi/10) in the first graphic - A.plot(0,0,i,sin(i*M_PI/10)); - - // Add the point (i,1) to the first curve of the second graphic - A.plot(1,0,i,1); - - // Add the point (i,2) to the second curve of the second graphic - A.plot(1,1,i,2); - } - - return 0; -#endif -} - \endcode -*/ - -#if defined(VISP_HAVE_DISPLAY) - +/*! + * \class vpPlot + * \ingroup group_gui_plotter + * + * \brief This class enables real time drawing of 2D or 3D graphics. An + * instance of the class open a window which contains between 1 and 4 + * graphics. Each one contains a desired number of curves. + * + * \warning This class is only available if one of the display functionalities + * (X11, GDI, GTK, OpenCV or Direct3D) is available. In visp3/core/vpConfig.h + * header file, you should have VISP_HAVE_DISPLAY define. + * + * The example below shows how to use the vpPlot class. An other example + * provided in tutorial-ibvs-plotter.cpp and described in \ref tutorial-plotter + * shows how to use this class to plot in real-time some curves during an + * image-based visual servo. + * + * \code + * #include + * + * int main () + * { + * #if defined(VISP_HAVE_DISPLAY) + * // Create a window (700 by 700) at position (100, 200) with two graphics + * vpPlot A(2, 700, 700, 100, 200, "Curves..."); + * + * // The first graphic contains 1 curve and the second graphic contains 2 curves + * A.initGraph(0,1); A.initGraph(1,2); + * + * // The color of the curve in the first graphic is red + * A.setColor(0,0,vpColor::red); + * // The first curve in the second graphic is green + * A.setColor(1,0,vpColor::green); + * // The second curve in the second graphic is blue + * A.setColor(1,1,vpColor::blue); + * + * // Add the point (0,0) in the first graphic + * A.plot(0,0,0,0); + * + * // Add the point (0,1) to the first curve of the second graphic + * A.plot(1,0,0,1); + * + * // Add the point (0,2) to the second curve of the second graphic + * A.plot(1,1,0,2); + * + * for (int i = 0; i < 50; i++) { + * // Add the point (i,sin(i*pi/10) in the first graphic + * A.plot(0,0,i,sin(i*M_PI/10)); + * + * // Add the point (i,1) to the first curve of the second graphic + * A.plot(1,0,i,1); + * + * // Add the point (i,2) to the second curve of the second graphic + * A.plot(1,1,i,2); + * } + * + * return 0; + $ #endif + * } + * \endcode + */ class VISP_EXPORT vpPlot { public: @@ -173,12 +169,13 @@ class VISP_EXPORT vpPlot void setColor(unsigned int graphNum, unsigned int curveNum, vpColor color); void setGraphThickness(unsigned int graphNum, unsigned int thickness); void setGridThickness(unsigned int graphNum, unsigned int thickness); - /*! - Set the font of the characters. The display should be initialized before. - To know which font are available, on Unix you can use xfontsel or xlsfonts - utilities. - */ + /*! + * Set the font of the characters. The display should be initialized before. + * + * To know which font are available, on Unix you can use xfontsel or xlsfonts + * utilities. + */ void setFont(const std::string &font) { if (display->isInitialised()) diff --git a/modules/gui/src/display/vpDisplayGTK.cpp b/modules/gui/src/display/vpDisplayGTK.cpp index a920903096..e9a849c8d7 100644 --- a/modules/gui/src/display/vpDisplayGTK.cpp +++ b/modules/gui/src/display/vpDisplayGTK.cpp @@ -73,13 +73,12 @@ class vpDisplayGTK::Impl public: Impl() : m_widget(NULL), m_background(NULL), m_gc(NULL), m_blue(), m_red(), m_yellow(), m_green(), m_cyan(), m_orange(), - m_white(), m_black(), m_gdkcolor(), m_lightBlue(), m_darkBlue(), m_lightRed(), m_darkRed(), m_lightGreen(), - m_darkGreen(), m_purple(), m_lightGray(), m_gray(), m_darkGray(), m_colormap(NULL), m_font(NULL), m_vectgtk(NULL), - m_col(NULL) - { - } + m_white(), m_black(), m_gdkcolor(), m_lightBlue(), m_darkBlue(), m_lightRed(), m_darkRed(), m_lightGreen(), + m_darkGreen(), m_purple(), m_lightGray(), m_gray(), m_darkGray(), m_colormap(NULL), m_font(NULL), m_vectgtk(NULL), + m_col(NULL) + { } - ~Impl() {} + ~Impl() { } void init(unsigned int win_width, unsigned int win_height, int win_x, int win_y, const std::string &title) { @@ -211,7 +210,8 @@ class vpDisplayGTK::Impl if (scale == 1) { /* Copie de l'image dans le pixmap fond */ gdk_draw_gray_image(m_background, m_gc, 0, 0, width, height, GDK_RGB_DITHER_NONE, I.bitmap, width); - } else { + } + else { vpImage sampled; I.subsample(scale, scale, sampled); gdk_draw_gray_image(m_background, m_gc, 0, 0, width, height, GDK_RGB_DITHER_NONE, sampled.bitmap, width); @@ -227,7 +227,8 @@ class vpDisplayGTK::Impl /* Copie de l'image dans le pixmap fond */ gdk_draw_rgb_32_image(m_background, m_gc, 0, 0, width, height, GDK_RGB_DITHER_NONE, (unsigned char *)I.bitmap, 4 * width); - } else { + } + else { vpImage sampled; I.subsample(scale, scale, sampled); gdk_draw_rgb_32_image(m_background, m_gc, 0, 0, width, height, GDK_RGB_DITHER_NONE, @@ -369,7 +370,8 @@ class vpDisplayGTK::Impl if (thickness == 1) { gdk_draw_point(m_background, m_gc, vpMath::round(ip.get_u() / scale), vpMath::round(ip.get_v() / scale)); - } else { + } + else { gdk_draw_rectangle(m_background, m_gc, TRUE, vpMath::round(ip.get_u() / scale), vpMath::round(ip.get_v() / scale), static_cast(thickness), static_cast(thickness)); } @@ -531,7 +533,8 @@ class vpDisplayGTK::Impl w = static_cast(gdk_screen_get_width(screen_)); h = static_cast(gdk_screen_get_height(screen_)); gtk_widget_destroy(widget_); - } else { + } + else { GdkScreen *screen_ = gdk_window_get_screen(m_widget->window); w = static_cast(gdk_screen_get_width(screen_)); h = static_cast(gdk_screen_get_height(screen_)); @@ -714,7 +717,7 @@ int main() } \endcode */ -vpDisplayGTK::vpDisplayGTK() : vpDisplay(), m_impl(new Impl()) {} +vpDisplayGTK::vpDisplayGTK() : vpDisplay(), m_impl(new Impl()) { } /*! Destructor. @@ -837,7 +840,8 @@ void vpDisplayGTK::setTitle(const std::string &title) if (!title.empty()) { m_impl->setTitle(title); } - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -855,7 +859,8 @@ void vpDisplayGTK::setWindowPosition(int win_x, int win_y) { if (m_displayHasBeenInitialized) { m_impl->setWindowPosition(win_x, win_y); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -875,7 +880,8 @@ void vpDisplayGTK::displayImage(const vpImage &I) { if (m_displayHasBeenInitialized) { m_impl->displayImage(I, m_scale, static_cast(m_width), static_cast(m_height)); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -910,7 +916,8 @@ void vpDisplayGTK::displayImageROI(const vpImage &I, const vpImag m_impl->displayImageROI(Itemp, static_cast(j_min), static_cast(i_min), static_cast(Itemp.getWidth()), static_cast(Itemp.getHeight())); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -930,7 +937,8 @@ void vpDisplayGTK::displayImage(const vpImage &I) { if (m_displayHasBeenInitialized) { m_impl->displayImage(I, m_scale, static_cast(m_width), static_cast(m_height)); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -964,7 +972,8 @@ void vpDisplayGTK::displayImageROI(const vpImage &I, const vpImagePoint m_impl->displayImageROI(Itemp, static_cast(j_min), static_cast(i_min), static_cast(Itemp.getWidth()), static_cast(Itemp.getHeight())); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -998,7 +1007,8 @@ void vpDisplayGTK::flushDisplay() { if (m_displayHasBeenInitialized) { m_impl->flushDisplay(); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1012,7 +1022,8 @@ void vpDisplayGTK::flushDisplayROI(const vpImagePoint & /*iP*/, const unsigned i { if (m_displayHasBeenInitialized) { m_impl->flushDisplay(); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1061,7 +1072,8 @@ void vpDisplayGTK::displayArrow(const vpImagePoint &ip1, const vpImagePoint &ip2 displayLine(ip1, ip2, color, thickness); } - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1081,7 +1093,8 @@ void vpDisplayGTK::displayCharString(const vpImagePoint &ip, const char *text, c { if (m_displayHasBeenInitialized) { m_impl->displayCharString(ip, text, color, m_scale); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1102,7 +1115,8 @@ void vpDisplayGTK::displayCircle(const vpImagePoint ¢er, unsigned int radius thickness = 0; m_impl->displayCircle(center, radius, color, fill, thickness, m_scale); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1153,7 +1167,8 @@ void vpDisplayGTK::displayDotLine(const vpImagePoint &ip1, const vpImagePoint &i thickness = 0; m_impl->displayDotLine(ip1, ip2, color, thickness, m_scale); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1172,7 +1187,8 @@ void vpDisplayGTK::displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, thickness = 0; m_impl->displayLine(ip1, ip2, color, thickness, m_scale); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1187,7 +1203,8 @@ void vpDisplayGTK::displayPoint(const vpImagePoint &ip, const vpColor &color, un { if (m_displayHasBeenInitialized) { m_impl->displayPoint(ip, color, thickness, m_scale); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1213,7 +1230,8 @@ void vpDisplayGTK::displayRectangle(const vpImagePoint &topLeft, unsigned int w, thickness = 0; m_impl->displayRectangle(topLeft, w, h, color, fill, thickness, m_scale); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1241,7 +1259,8 @@ void vpDisplayGTK::displayRectangle(const vpImagePoint &topLeft, const vpImagePo unsigned int h = static_cast(vpMath::round(bottomRight.get_v() - topLeft.get_v())); m_impl->displayRectangle(topLeft, w, h, color, fill, thickness, m_scale); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1268,7 +1287,8 @@ void vpDisplayGTK::displayRectangle(const vpRect &rectangle, const vpColor &colo unsigned int w = static_cast(vpMath::round(rectangle.getWidth())); unsigned int h = static_cast(vpMath::round(rectangle.getRight())); m_impl->displayRectangle(topLeft, w, h, color, fill, thickness, m_scale); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1296,7 +1316,8 @@ bool vpDisplayGTK::getClick(bool blocking) vpImagePoint ip; vpMouseButton::vpMouseButtonType button; ret = m_impl->getClick(ip, button, blocking, m_scale, GDK_BUTTON_PRESS); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } return ret; @@ -1325,7 +1346,8 @@ bool vpDisplayGTK::getClick(vpImagePoint &ip, bool blocking) if (m_displayHasBeenInitialized) { vpMouseButton::vpMouseButtonType button; ret = m_impl->getClick(ip, button, blocking, m_scale, GDK_BUTTON_PRESS); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } return ret; @@ -1356,7 +1378,8 @@ bool vpDisplayGTK::getClick(vpImagePoint &ip, vpMouseButton::vpMouseButtonType & if (m_displayHasBeenInitialized) { ret = m_impl->getClick(ip, button, blocking, m_scale, GDK_BUTTON_PRESS); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } return ret; @@ -1391,7 +1414,8 @@ bool vpDisplayGTK::getClickUp(vpImagePoint &ip, vpMouseButton::vpMouseButtonType if (m_displayHasBeenInitialized) { ret = m_impl->getClick(ip, button, blocking, m_scale, GDK_BUTTON_RELEASE); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } return ret; @@ -1406,7 +1430,8 @@ void vpDisplayGTK::getImage(vpImage &I) // should certainly be optimized. if (m_displayHasBeenInitialized) { m_impl->getImage(I, static_cast(m_width), static_cast(m_height)); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } } @@ -1445,7 +1470,8 @@ bool vpDisplayGTK::getKeyboardEvent(bool blocking) if (m_displayHasBeenInitialized) { std::string key; ret = m_impl->getKeyboardEvent(key, blocking); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } return ret; @@ -1476,7 +1502,8 @@ bool vpDisplayGTK::getKeyboardEvent(std::string &key, bool blocking) if (m_displayHasBeenInitialized) { ret = m_impl->getKeyboardEvent(key, blocking); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } return ret; @@ -1500,7 +1527,8 @@ bool vpDisplayGTK::getPointerMotionEvent(vpImagePoint &ip) if (m_displayHasBeenInitialized) { ret = m_impl->getPointerMotionEvent(ip, m_scale); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } return ret; @@ -1520,7 +1548,8 @@ bool vpDisplayGTK::getPointerPosition(vpImagePoint &ip) { if (m_displayHasBeenInitialized) { m_impl->getPointerPosition(ip, m_scale); - } else { + } + else { throw(vpDisplayException(vpDisplayException::notInitializedError, "GTK not initialized")); } @@ -1561,5 +1590,5 @@ unsigned int vpDisplayGTK::getScreenHeight() #elif !defined(VISP_BUILD_SHARED_LIBS) // Work around to avoid warning: libvisp_core.a(vpDisplayGTK.cpp.o) has no // symbols -void dummy_vpDisplayGTK(){}; +void dummy_vpDisplayGTK() { }; #endif diff --git a/modules/gui/src/display/windows/vpDisplayD3D.cpp b/modules/gui/src/display/windows/vpDisplayD3D.cpp index ce84122dc5..a00ac23d59 100644 --- a/modules/gui/src/display/windows/vpDisplayD3D.cpp +++ b/modules/gui/src/display/windows/vpDisplayD3D.cpp @@ -50,7 +50,7 @@ /*! \brief Basic constructor. */ -vpDisplayD3D::vpDisplayD3D() : vpDisplayWin32(new vpD3DRenderer()) {} +vpDisplayD3D::vpDisplayD3D() : vpDisplayWin32(new vpD3DRenderer()) { } /*! @@ -188,13 +188,8 @@ vpDisplayD3D::vpDisplayD3D(vpImage &I, int winx, int winy, const init(I, winx, winy, title); } -/*! - \brief Basic destructor. -*/ -vpDisplayD3D::~vpDisplayD3D() {} - #elif !defined(VISP_BUILD_SHARED_LIBS) // Work around to avoid warning: libvisp_core.a(vpDisplayD3D.cpp.o) has no // symbols -void dummy_vpDisplayD3D(){}; +void dummy_vpDisplayD3D() { }; #endif diff --git a/modules/gui/src/display/windows/vpDisplayGDI.cpp b/modules/gui/src/display/windows/vpDisplayGDI.cpp index 5c1a6a45d4..5140f60cbc 100644 --- a/modules/gui/src/display/windows/vpDisplayGDI.cpp +++ b/modules/gui/src/display/windows/vpDisplayGDI.cpp @@ -52,7 +52,7 @@ /*! \brief Basic constructor. */ -vpDisplayGDI::vpDisplayGDI() : vpDisplayWin32(new vpGDIRenderer()) {} +vpDisplayGDI::vpDisplayGDI() : vpDisplayWin32(new vpGDIRenderer()) { } /*! @@ -188,13 +188,8 @@ vpDisplayGDI::vpDisplayGDI(vpImage &I, int winx, int winy, const init(I, winx, winy, title); } -/*! - \brief Basic destructor. -*/ -vpDisplayGDI::~vpDisplayGDI() {} - #elif !defined(VISP_BUILD_SHARED_LIBS) // Work around to avoid warning: libvisp_core.a(vpDisplayGDI.cpp.o) has no // symbols -void dummy_vpDisplayGDI(){}; +void dummy_vpDisplayGDI() { }; #endif diff --git a/modules/imgproc/src/vpCircleHoughTransform.cpp b/modules/imgproc/src/vpCircleHoughTransform.cpp index dadfff341e..c39f9aacb6 100644 --- a/modules/imgproc/src/vpCircleHoughTransform.cpp +++ b/modules/imgproc/src/vpCircleHoughTransform.cpp @@ -483,7 +483,7 @@ vpCircleHoughTransform::computeCircleCandidates() float scalProd = rx * gx + ry * gy; float scalProd2 = scalProd * scalProd; if (scalProd2 >= circlePerfectness2 * r2 * grad2) { - // Look for the Radius Candidate Bin RCB_k to which d_ij is "the closest" will have an additionnal vote + // Look for the Radius Candidate Bin RCB_k to which d_ij is "the closest" will have an additional vote float r = static_cast(std::sqrt(r2)); unsigned int r_bin = static_cast(std::ceil((r - m_algoParams.m_minRadius)/ m_algoParams.m_centerMinDist)); r_bin = std::min(r_bin, nbBins - 1); diff --git a/modules/io/include/visp3/io/vpDiskGrabber.h b/modules/io/include/visp3/io/vpDiskGrabber.h index 1548e92a5f..221dc48806 100644 --- a/modules/io/include/visp3/io/vpDiskGrabber.h +++ b/modules/io/include/visp3/io/vpDiskGrabber.h @@ -32,9 +32,9 @@ */ /*! - \file vpDiskGrabber.h - \brief Class to load image sequence from the disk. -*/ + * \file vpDiskGrabber.h + * \brief Class to load image sequence from the disk. + */ #ifndef vpDiskGrabber_hh #define vpDiskGrabber_hh @@ -46,60 +46,60 @@ #include /*! - \class vpDiskGrabber - - \ingroup group_io_video - - \brief Class to grab (ie. read) images from the disk. - - Defined a virtual video device. "Grab" the images from the disk. - Derived from the vpFrameGrabber class. - - \sa vpFrameGrabber - - Here an example of capture from the directory - "/local/soft/ViSP/ViSP-images/cube". We want to acquire 10 images - from the first named "image.0001.pgm" by steps of 2. - - \code -#include -#include - -int main(){ - vpImage I; // Grey level image - - // Declare a framegrabber able to read a sequence of successive - // images from the disk - vpDiskGrabber g; - - // Set the path to the directory containing the sequence - g.setDirectory("/local/soft/ViSP/ViSP-images/cube"); - // Set the image base name. The directory and the base name constitute - // the constant part of the full filename - g.setBaseName("image."); - // Set the step between two images of the sequence - g.setStep(2); - // Set the number of digits to build the image number - g.setNumberOfZero(4); - // Set the first frame number of the sequence - g.setImageNumber(1); - // Set the image file extension - g.setExtension("pgm"); - - // Open the framegrabber by loading the first image of the sequence - g.open(I) ; - - unsigned int cpt = 1; - // this is the loop over the image sequence - while(cpt ++ < 10) - { - // read the image and then increment the image counter so that the next - // call to acquire(I) will get the next image - g.acquire(I) ; - } -} - \endcode -*/ + * \class vpDiskGrabber + * + * \ingroup group_io_video + * + * \brief Class to grab (ie. read) images from the disk. + * + * Defined a virtual video device. "Grab" the images from the disk. + * Derived from the vpFrameGrabber class. + * + * \sa vpFrameGrabber + * + * Here an example of capture from the directory + * "/local/soft/ViSP/ViSP-images/cube". We want to acquire 10 images + * from the first named "image.0001.pgm" by steps of 2. + * + * \code + * #include + * #include + * + * int main(){ + * vpImage I; // Grey level image + * + * // Declare a framegrabber able to read a sequence of successive + * // images from the disk + * vpDiskGrabber g; + * + * // Set the path to the directory containing the sequence + * g.setDirectory("/local/soft/ViSP/ViSP-images/cube"); + * // Set the image base name. The directory and the base name constitute + * // the constant part of the full filename + * g.setBaseName("image."); + * // Set the step between two images of the sequence + * g.setStep(2); + * // Set the number of digits to build the image number + * g.setNumberOfZero(4); + * // Set the first frame number of the sequence + * g.setImageNumber(1); + * // Set the image file extension + * g.setExtension("pgm"); + * + * // Open the framegrabber by loading the first image of the sequence + * g.open(I) ; + * + * unsigned int cpt = 1; + * // this is the loop over the image sequence + * while(cpt ++ < 10) + * { + * // read the image and then increment the image counter so that the next + * // call to acquire(I) will get the next image + * g.acquire(I) ; + * } + * } + * \endcode + */ class VISP_EXPORT vpDiskGrabber : public vpFrameGrabber { private: @@ -128,6 +128,12 @@ class VISP_EXPORT vpDiskGrabber : public vpFrameGrabber */ explicit vpDiskGrabber(const std::string &genericName); + /*! + * Destructor. + * In fact nothing to destroy... + */ + virtual ~vpDiskGrabber() { }; + /*! * Constructor. * @@ -141,12 +147,6 @@ class VISP_EXPORT vpDiskGrabber : public vpFrameGrabber explicit vpDiskGrabber(const std::string &dir, const std::string &basename, long number, int step, unsigned int noz, const std::string &ext); - /*! - * Destructor. - * In fact nothing to destroy... - */ - virtual ~vpDiskGrabber(); - /*! * Acquire an image reading the next image from the disk. * After this call, the image number is incremented considering the step. diff --git a/modules/io/include/visp3/io/vpVideoReader.h b/modules/io/include/visp3/io/vpVideoReader.h index ba4a294df0..a6ea82161e 100644 --- a/modules/io/include/visp3/io/vpVideoReader.h +++ b/modules/io/include/visp3/io/vpVideoReader.h @@ -49,117 +49,117 @@ #endif /*! - \class vpVideoReader - - \ingroup group_io_video - - \brief Class that enables to manipulate easily a video file or a sequence of - images. As it inherits from the vpFrameGrabber Class, it can be used like an - other frame grabber class. - - This class has its own implementation to read a sequence of PGM and PPM - images. - - This class may benefit from optional 3rd parties: - - libpng: If installed this optional 3rd party is used to read a sequence of - PNG images. Installation instructions are provided here - https://visp.inria.fr/3rd_png. - - libjpeg: If installed this optional 3rd party is used to read a sequence - of JPEG images. Installation instructions are provided here - https://visp.inria.fr/3rd_jpeg. - - OpenCV: If installed this optional 3rd party is used to read a sequence of - images where images could be in TIFF, BMP, DIB, PBM, RASTER, JPEG2000 format. - If libpng or libjpeg is not installed, OpenCV is also used to consider these - image formats. OpenCV allows also to consider AVI, MPEG, MPEG4, MOV, OGV, WMV, - FLV, MKV video formats. Installation instructions are provided here - https://visp.inria.fr/3rd_opencv. - - The following example available in tutorial-video-reader.cpp shows how this - class is really easy to use. It enables to read a video file named - video.mpeg. - \include tutorial-video-reader.cpp - - As shown in the next example, this class allows also to access to a specific - frame. But be careful, for video files, the getFrame() method is not precise - and returns the nearest intra key frame from the expected frame. You can use - the getFrame() method to position the reader in the video and then use the - acquire() method to get the following frames one by one. - \code -#include - -int main() -{ -#ifdef VISP_HAVE_OPENCV - vpImage I; - - vpVideoReader reader; - - // Initialize the reader. - reader.setFileName("video.mpeg"); - reader.open(I); - - // Read the nearest key frame from the 3th frame - reader.getFrame(I, 2); - - // After positioning the video reader use acquire to read the video frame by frame - reader.acquire(I); - - return 0; -#endif -} - \endcode - - The other following example explains how to use the class to read a - sequence of images. The images are stored in the folder "./image" and are - named "image0000.jpeg", "image0001.jpeg", "image0002.jpeg", ... As explained - in setFirstFrameIndex() and setLastFrameIndex() it is also possible to set - the first and last image numbers to read a portion of the sequence. If these - two functions are not used, first and last image numbers are set automatically - to match the first and image images of the sequence. - - \code -#include - -int main() -{ - vpImage I; - - vpVideoReader reader; - - // Initialize the reader. - reader.setFileName("./image/image%04d.jpeg"); - reader.setFirstFrameIndex(10); - reader.setLastFrameIndex(20); - reader.open(I); - - while (! reader.end() ) - reader.acquire(I); - - return 0; -} - \endcode - - Note that it is also possible to access to a specific frame using getFrame(). -\code -#include - -int main() -{ - vpImage I; - - vpVideoReader reader; - - // Initialize the reader. - reader.setFileName("./image/image%04d.jpeg"); - reader.open(I); - - // Read the 3th frame - reader.getFrame(I,2); - - return 0; -} - \endcode -*/ + * \class vpVideoReader + * + * \ingroup group_io_video + * + * \brief Class that enables to manipulate easily a video file or a sequence of + * images. As it inherits from the vpFrameGrabber Class, it can be used like an + * other frame grabber class. + * + * This class has its own implementation to read a sequence of PGM and PPM + * images. + * + * This class may benefit from optional 3rd parties: + * - libpng: If installed this optional 3rd party is used to read a sequence of + * PNG images. Installation instructions are provided here + * https://visp.inria.fr/3rd_png. + * - libjpeg: If installed this optional 3rd party is used to read a sequence + * of JPEG images. Installation instructions are provided here + * https://visp.inria.fr/3rd_jpeg. + * - OpenCV: If installed this optional 3rd party is used to read a sequence of + * images where images could be in TIFF, BMP, DIB, PBM, RASTER, JPEG2000 format. + * If libpng or libjpeg is not installed, OpenCV is also used to consider these + * image formats. OpenCV allows also to consider AVI, MPEG, MPEG4, MOV, OGV, WMV, + * FLV, MKV video formats. Installation instructions are provided here + * https://visp.inria.fr/3rd_opencv. + * + * The following example available in tutorial-video-reader.cpp shows how this + * class is really easy to use. It enables to read a video file named + * video.mpeg. + * \include tutorial-video-reader.cpp + * + * As shown in the next example, this class allows also to access to a specific + * frame. But be careful, for video files, the getFrame() method is not precise + * and returns the nearest intra key frame from the expected frame. You can use + * the getFrame() method to position the reader in the video and then use the + * acquire() method to get the following frames one by one. + * \code + * #include + * + * int main() + * { + * #ifdef VISP_HAVE_OPENCV + * vpImage I; + * + * vpVideoReader reader; + * + * // Initialize the reader. + * reader.setFileName("video.mpeg"); + * reader.open(I); + * + * // Read the nearest key frame from the 3th frame + * reader.getFrame(I, 2); + * + * // After positioning the video reader use acquire to read the video frame by frame + * reader.acquire(I); + * + * return 0; + * #endif + * } + * \endcode + * + * The other following example explains how to use the class to read a + * sequence of images. The images are stored in the folder "./image" and are + * named "image0000.jpeg", "image0001.jpeg", "image0002.jpeg", ... As explained + * in setFirstFrameIndex() and setLastFrameIndex() it is also possible to set + * the first and last image numbers to read a portion of the sequence. If these + * two functions are not used, first and last image numbers are set automatically + * to match the first and image images of the sequence. + * + * \code + * #include + * + * int main() + * { + * vpImage I; + * + * vpVideoReader reader; + * + * // Initialize the reader. + * reader.setFileName("./image/image%04d.jpeg"); + * reader.setFirstFrameIndex(10); + * reader.setLastFrameIndex(20); + * reader.open(I); + * + * while (! reader.end() ) + * reader.acquire(I); + * + * return 0; + * } + * \endcode + * + * Note that it is also possible to access to a specific frame using getFrame(). + * \code + * #include + * + * int main() + * { + * vpImage I; + * + * vpVideoReader reader; + * + * // Initialize the reader. + * reader.setFileName("./image/image%04d.jpeg"); + * reader.open(I); + * + * // Read the 3th frame + * reader.getFrame(I,2); + * + * return 0; + * } + * \endcode + */ class VISP_EXPORT vpVideoReader : public vpFrameGrabber { @@ -173,7 +173,8 @@ class VISP_EXPORT vpVideoReader : public vpFrameGrabber bool m_lastframe_unknown; #endif //! Types of available formats - typedef enum { + typedef enum + { FORMAT_PGM, FORMAT_PPM, FORMAT_JPEG, @@ -229,14 +230,15 @@ class VISP_EXPORT vpVideoReader : public vpFrameGrabber void close() { ; } /*! - \return true if the end of the sequence is reached. - */ + * \return true if the end of the sequence is reached. + */ inline bool end() { if (m_frameStep > 0) { if (m_frameCount + m_frameStep > m_lastFrame) return true; - } else if (m_frameStep < 0) { + } + else if (m_frameStep < 0) { if (m_frameCount + m_frameStep < m_firstFrame) return true; } @@ -244,11 +246,12 @@ class VISP_EXPORT vpVideoReader : public vpFrameGrabber } bool getFrame(vpImage &I, long frame); bool getFrame(vpImage &I, long frame); - /*! - Return the frame rate in Hz used to encode the video stream. - If the video is a sequence of images, return -1. - */ + /*! + * Return the frame rate in Hz used to encode the video stream. + * + * If the video is a sequence of images, return -1. + */ double getFramerate() { if (!m_isOpen) { @@ -258,26 +261,26 @@ class VISP_EXPORT vpVideoReader : public vpFrameGrabber } /*! - Get the frame index of the current image. This index is updated at each - call of the acquire method. It can be used to detect the end of a file - (comparison with getLastFrameIndex()). - - \return Returns the current frame index. - - \sa end() - */ + * Get the frame index of the current image. This index is updated at each + * call of the acquire method. It can be used to detect the end of a file + * (comparison with getLastFrameIndex()). + * + * \return Returns the current frame index. + * + * \sa end() + */ inline long getFrameIndex() const { return m_frameCount; } /*! - Return the name of the file in which the last frame was read. - */ + * Return the name of the file in which the last frame was read. + */ inline std::string getFrameName() const { return m_frameName; } /*! - Gets the first frame index. - - \return Returns the first frame index. - */ + * Gets the first frame index. + * + * \return Returns the first frame index. + */ inline long getFirstFrameIndex() { if (!m_isOpen) { @@ -285,11 +288,12 @@ class VISP_EXPORT vpVideoReader : public vpFrameGrabber } return m_firstFrame; } - /*! - Gets the last frame index. - \return Returns the last frame index. - */ + /*! + * Gets the last frame index. + * + * \return Returns the last frame index. + */ inline long getLastFrameIndex() { if (!m_isOpen) { @@ -297,11 +301,12 @@ class VISP_EXPORT vpVideoReader : public vpFrameGrabber } return m_lastFrame; } - /*! - Gets the frame step. - \return Returns the frame step value. - */ + /*! + * Gets the frame step. + * + * \return Returns the frame step value. + */ inline long getFrameStep() const { return m_frameStep; } bool isVideoFormat() const; @@ -312,35 +317,37 @@ class VISP_EXPORT vpVideoReader : public vpFrameGrabber vpVideoReader &operator>>(vpImage &I); /*! - Reset the frame counter and sets it to the first image index. - - By default the first frame index is set to 0. - - This method is useful if you use the class like a frame grabber (ie with - the acquire method). - */ + * Reset the frame counter and sets it to the first image index. + * + * By default the first frame index is set to 0. + * + * This method is useful if you use the class like a frame grabber (ie with + * the acquire method). + */ inline void resetFrameCounter() { m_frameCount = m_firstFrame; } void setFileName(const std::string &filename); - /*! - Enables to set the first frame index if you want to use the class like a - grabber (ie with the acquire method). - - \param first_frame : The first frame index. - \sa setLastFrameIndex() - */ + /*! + * Enables to set the first frame index if you want to use the class like a + * grabber (ie with the acquire method). + * + * \param first_frame : The first frame index. + * + * \sa setLastFrameIndex() + */ inline void setFirstFrameIndex(const long first_frame) { m_firstFrameIndexIsSet = true; m_firstFrame = first_frame; } - /*! - Enables to set the last frame index. - \param last_frame : The last frame index. - - \sa setFirstFrameIndex() - */ + /*! + * Enables to set the last frame index. + * + * \param last_frame : The last frame index. + * + * \sa setFirstFrameIndex() + */ inline void setLastFrameIndex(const long last_frame) { this->m_lastFrameIndexIsSet = true; @@ -348,13 +355,13 @@ class VISP_EXPORT vpVideoReader : public vpFrameGrabber } /*! - Sets the frame step index. - The default frame step is 1 - - \param frame_step : The frame index step. - - \sa setFrameStep() - */ + * Sets the frame step index. + * The default frame step is 1 + * + * \param frame_step : The frame index step. + * + * \sa setFrameStep() + */ inline void setFrameStep(const long frame_step) { m_frameStep = frame_step; } private: diff --git a/modules/io/src/video/vpDiskGrabber.cpp b/modules/io/src/video/vpDiskGrabber.cpp index 474809e416..f8c923d542 100644 --- a/modules/io/src/video/vpDiskGrabber.cpp +++ b/modules/io/src/video/vpDiskGrabber.cpp @@ -236,8 +236,6 @@ void vpDiskGrabber::acquire(vpImage &I, long image_number) height = I.getHeight(); } -vpDiskGrabber::~vpDiskGrabber() { } - void vpDiskGrabber::setImageNumber(long number) { m_image_number = number; diff --git a/modules/io/src/video/vpVideoReader.cpp b/modules/io/src/video/vpVideoReader.cpp index b687c48204..134db3a30a 100644 --- a/modules/io/src/video/vpVideoReader.cpp +++ b/modules/io/src/video/vpVideoReader.cpp @@ -32,9 +32,9 @@ */ /*! -\file vpVideoReader.cpp -\brief Read videos and image sequences -*/ + * \file vpVideoReader.cpp + * \brief Read videos and image sequences + */ #include #include @@ -51,13 +51,12 @@ vpVideoReader::vpVideoReader() : vpFrameGrabber(), m_imSequence(NULL), #if defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_VIDEOIO) - m_capture(), m_frame(), m_lastframe_unknown(false), + m_capture(), m_frame(), m_lastframe_unknown(false), #endif - m_formatType(FORMAT_UNKNOWN), m_videoName(), m_frameName(), m_initFileName(false), m_isOpen(false), m_frameCount(0), - m_firstFrame(0), m_lastFrame(0), m_firstFrameIndexIsSet(false), m_lastFrameIndexIsSet(false), m_frameStep(1), - m_frameRate(0.) -{ -} + m_formatType(FORMAT_UNKNOWN), m_videoName(), m_frameName(), m_initFileName(false), m_isOpen(false), m_frameCount(0), + m_firstFrame(0), m_lastFrame(0), m_firstFrameIndexIsSet(false), m_lastFrameIndexIsSet(false), m_frameStep(1), + m_frameRate(0.) +{ } /*! Basic destructor. @@ -131,7 +130,8 @@ void vpVideoReader::getProperties() m_imSequence->setImageNumber(m_firstFrame); } m_frameRate = -1.; - } else if (isVideoExtensionSupported()) { + } + else if (isVideoExtensionSupported()) { #if defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_VIDEOIO) m_capture.open(m_videoName.c_str()); @@ -150,14 +150,15 @@ void vpVideoReader::getProperties() #else throw(vpException(vpException::fatalError, "To read video files ViSP should be build with opencv " - "3rd >= 2.1.0 party libraries.")); + "3rd >= 2.1.0 party libraries.")); #endif - } else if (m_formatType == FORMAT_UNKNOWN) { - // vpERROR_TRACE("The format of the file does not correspond to a readable - // format."); + } + else if (m_formatType == FORMAT_UNKNOWN) { + // vpERROR_TRACE("The format of the file does not correspond to a readable + // format."); throw(vpException(vpException::fatalError, "The format of the file does " - "not correspond to a readable " - "format supported by ViSP.")); + "not correspond to a readable " + "format supported by ViSP.")); } findFirstFrameIndex(); @@ -253,7 +254,8 @@ void vpVideoReader::acquire(vpImage &I) try { m_imSequence->acquire(I); skip_frame = false; - } catch (...) { + } + catch (...) { skip_frame = true; } } while (skip_frame && m_imSequence->getImageNumber() < m_lastFrame); @@ -261,7 +263,8 @@ void vpVideoReader::acquire(vpImage &I) m_frameName = m_imSequence->getImageName(); if (m_frameCount + m_frameStep > m_lastFrame) { m_imSequence->setImageNumber(m_frameCount); - } else if (m_frameCount + m_frameStep < m_firstFrame) { + } + else if (m_frameCount + m_frameStep < m_firstFrame) { m_imSequence->setImageNumber(m_frameCount); } } @@ -270,19 +273,23 @@ void vpVideoReader::acquire(vpImage &I) m_capture >> m_frame; if (m_frameStep == 1) { m_frameCount++; - } else { + } + else { #if VISP_HAVE_OPENCV_VERSION >= 0x030000 m_frameCount = (long)m_capture.get(cv::CAP_PROP_POS_FRAMES); if (m_frameStep > 0) { if (m_frameCount + m_frameStep <= m_lastFrame) { m_capture.set(cv::CAP_PROP_POS_FRAMES, m_frameCount + m_frameStep - 1); - } else { + } + else { m_capture.set(cv::CAP_PROP_POS_FRAMES, m_frameCount - 1); } - } else if (m_frameStep < 0) { + } + else if (m_frameStep < 0) { if (m_frameCount + m_frameStep >= m_firstFrame) { m_capture.set(cv::CAP_PROP_POS_FRAMES, m_frameCount + m_frameStep - 1); - } else { + } + else { m_capture.set(cv::CAP_PROP_POS_FRAMES, m_firstFrame - 1); } } @@ -291,13 +298,16 @@ void vpVideoReader::acquire(vpImage &I) if (m_frameStep > 0) { if (m_frameCount + m_frameStep <= m_lastFrame) { m_capture.set(CV_CAP_PROP_POS_FRAMES, m_frameCount + m_frameStep - 1); - } else { + } + else { m_capture.set(CV_CAP_PROP_POS_FRAMES, m_frameCount - 1); } - } else if (m_frameStep < 0) { + } + else if (m_frameStep < 0) { if (m_frameCount + m_frameStep >= m_firstFrame) { m_capture.set(CV_CAP_PROP_POS_FRAMES, m_frameCount + m_frameStep - 1); - } else { + } + else { m_capture.set(CV_CAP_PROP_POS_FRAMES, m_firstFrame - 1); } } @@ -310,7 +320,8 @@ void vpVideoReader::acquire(vpImage &I) // Set last frame to this image index setLastFrameIndex(m_frameCount - m_frameStep); } - } else { + } + else { vpImageConvert::convert(m_frame, I); } } @@ -338,7 +349,8 @@ void vpVideoReader::acquire(vpImage &I) try { m_imSequence->acquire(I); skip_frame = false; - } catch (...) { + } + catch (...) { skip_frame = true; } } while (skip_frame && m_imSequence->getImageNumber() < m_lastFrame); @@ -346,7 +358,8 @@ void vpVideoReader::acquire(vpImage &I) m_frameName = m_imSequence->getImageName(); if (m_frameCount + m_frameStep > m_lastFrame) { m_imSequence->setImageNumber(m_frameCount); - } else if (m_frameCount + m_frameStep < m_firstFrame) { + } + else if (m_frameCount + m_frameStep < m_firstFrame) { m_imSequence->setImageNumber(m_frameCount); } } @@ -355,19 +368,23 @@ void vpVideoReader::acquire(vpImage &I) m_capture >> m_frame; if (m_frameStep == 1) { m_frameCount++; - } else { + } + else { #if VISP_HAVE_OPENCV_VERSION >= 0x030000 m_frameCount = (long)m_capture.get(cv::CAP_PROP_POS_FRAMES); if (m_frameStep > 0) { if (m_frameCount + m_frameStep <= m_lastFrame) { m_capture.set(cv::CAP_PROP_POS_FRAMES, m_frameCount + m_frameStep - 1); - } else { + } + else { m_capture.set(cv::CAP_PROP_POS_FRAMES, m_frameCount - 1); } - } else if (m_frameStep < 0) { + } + else if (m_frameStep < 0) { if (m_frameCount + m_frameStep >= m_firstFrame) { m_capture.set(cv::CAP_PROP_POS_FRAMES, m_frameCount + m_frameStep - 1); - } else { + } + else { m_capture.set(cv::CAP_PROP_POS_FRAMES, m_firstFrame - 1); } } @@ -376,13 +393,16 @@ void vpVideoReader::acquire(vpImage &I) if (m_frameStep > 0) { if (m_frameCount + m_frameStep <= m_lastFrame) { m_capture.set(CV_CAP_PROP_POS_FRAMES, m_frameCount + m_frameStep - 1); - } else { + } + else { m_capture.set(CV_CAP_PROP_POS_FRAMES, m_frameCount - 1); } - } else if (m_frameStep < 0) { + } + else if (m_frameStep < 0) { if (m_frameCount + m_frameStep >= m_firstFrame) { m_capture.set(CV_CAP_PROP_POS_FRAMES, m_frameCount + m_frameStep - 1); - } else { + } + else { m_capture.set(CV_CAP_PROP_POS_FRAMES, m_firstFrame - 1); } } @@ -391,7 +411,8 @@ void vpVideoReader::acquire(vpImage &I) if (m_frame.empty()) { std::cout << "Warning: Unable to decode image " << m_frameCount - m_frameStep << std::endl; - } else { + } + else { vpImageConvert::convert(m_frame, I); } } @@ -422,14 +443,17 @@ bool vpVideoReader::getFrame(vpImage &I, long frame_index) m_imSequence->setImageNumber(m_frameCount); // to not increment vpDiskGrabber next image if (m_frameCount + m_frameStep > m_lastFrame) { m_imSequence->setImageNumber(m_frameCount); - } else if (m_frameCount + m_frameStep < m_firstFrame) { + } + else if (m_frameCount + m_frameStep < m_firstFrame) { m_imSequence->setImageNumber(m_frameCount); } - } catch (...) { + } + catch (...) { vpERROR_TRACE("Couldn't find the %u th frame", frame_index); return false; } - } else { + } + else { #if defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_VIDEOIO) #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) if (!m_capture.set(cv::CAP_PROP_POS_FRAMES, frame_index)) { @@ -446,10 +470,12 @@ bool vpVideoReader::getFrame(vpImage &I, long frame_index) if (m_frame.empty()) { setLastFrameIndex(m_frameCount - m_frameStep); return false; - } else { + } + else { vpImageConvert::convert(m_frame, I); } - } else + } + else vpImageConvert::convert(m_frame, I); #else if (!m_capture.set(CV_CAP_PROP_POS_FRAMES, frame_index)) { @@ -494,14 +520,17 @@ bool vpVideoReader::getFrame(vpImage &I, long frame_index) m_imSequence->setImageNumber(m_frameCount); // to not increment vpDiskGrabber next image if (m_frameCount + m_frameStep > m_lastFrame) { m_imSequence->setImageNumber(m_frameCount); - } else if (m_frameCount + m_frameStep < m_firstFrame) { + } + else if (m_frameCount + m_frameStep < m_firstFrame) { m_imSequence->setImageNumber(m_frameCount); } - } catch (...) { + } + catch (...) { vpERROR_TRACE("Couldn't find the %u th frame", frame_index); return false; } - } else { + } + else { #if defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_VIDEOIO) #if VISP_HAVE_OPENCV_VERSION >= 0x030000 if (!m_capture.set(cv::CAP_PROP_POS_FRAMES, frame_index)) { @@ -515,10 +544,12 @@ bool vpVideoReader::getFrame(vpImage &I, long frame_index) if (m_frame.empty()) { setLastFrameIndex(m_frameCount - m_frameStep); return false; - } else { + } + else { vpImageConvert::convert(m_frame, I); } - } else { + } + else { vpImageConvert::convert(m_frame, I); } #else @@ -532,7 +563,8 @@ bool vpVideoReader::getFrame(vpImage &I, long frame_index) if (m_frameStep > 1) { m_frameCount += m_frameStep - 1; // next index m_capture.set(CV_CAP_PROP_POS_FRAMES, m_frameCount); - } else if (m_frameStep < -1) { + } + else if (m_frameStep < -1) { m_frameCount += m_frameStep - 1; // next index m_capture.set(CV_CAP_PROP_POS_FRAMES, m_frameCount); } diff --git a/modules/io/src/video/vpVideoWriter.cpp b/modules/io/src/video/vpVideoWriter.cpp index 189c9f356f..d21829e200 100644 --- a/modules/io/src/video/vpVideoWriter.cpp +++ b/modules/io/src/video/vpVideoWriter.cpp @@ -74,7 +74,7 @@ vpVideoWriter::~vpVideoWriter() {} which will be saved. If you want to write a sequence of images, `filename` corresponds to - the path followed by the image name template. For exemple, if you want to + the path followed by the image name template. For example, if you want to write different images named `image0001.jpeg`, `image0002.jpg`, ... and located in the folder `/local/image`, `filename` will be `/local/image/image%04d.jpg`. diff --git a/modules/robot/include/visp3/robot/vpAfma4.h b/modules/robot/include/visp3/robot/vpAfma4.h index b857c146b7..f203268fe7 100644 --- a/modules/robot/include/visp3/robot/vpAfma4.h +++ b/modules/robot/include/visp3/robot/vpAfma4.h @@ -40,7 +40,7 @@ \file vpAfma4.h - Modelisation of Irisa's cylindrical robot named Afma4. + Modelization of Irisa's cylindrical robot named Afma4. */ @@ -50,7 +50,7 @@ \ingroup group_robot_real_cylindrical - \brief Modelisation of Irisa's cylindrical robot named Afma4. + \brief Modelization of Irisa's cylindrical robot named Afma4. This robot has five degrees of freedom, but only four motorized joints (joint 3 is not motorized). Joint 2 and 3 are prismatic. The diff --git a/modules/robot/include/visp3/robot/vpAfma6.h b/modules/robot/include/visp3/robot/vpAfma6.h index d1e698cb98..f916f45d46 100644 --- a/modules/robot/include/visp3/robot/vpAfma6.h +++ b/modules/robot/include/visp3/robot/vpAfma6.h @@ -40,7 +40,7 @@ \file vpAfma6.h - Modelisation of Irisa's gantry robot named Afma6. + Modelization of Irisa's gantry robot named Afma6. */ @@ -50,7 +50,7 @@ \ingroup group_robot_real_gantry group_robot_simu_gantry - \brief Modelisation of Irisa's gantry robot named Afma6. + \brief Modelization of Irisa's gantry robot named Afma6. In this modelization, different frames have to be considered. diff --git a/modules/robot/include/visp3/robot/vpPioneer.h b/modules/robot/include/visp3/robot/vpPioneer.h index ee58018703..6c953d1581 100644 --- a/modules/robot/include/visp3/robot/vpPioneer.h +++ b/modules/robot/include/visp3/robot/vpPioneer.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Common features for Pioneer unicycle mobile robots. - * -*****************************************************************************/ + */ #ifndef VPPIONEER_H #define VPPIONEER_H @@ -40,73 +38,66 @@ #include /*! - - \class vpPioneer - - \ingroup group_robot_real_unicycle group_robot_simu_unicycle - - \brief Generic functions for Pioneer mobile robots. - - This class provides common features for Pioneer mobile robots. - This robot has two control velocities \f$(v_x, w_z)\f$, the translational - and rotational velocities of the mobile platform respectively. - - The figure below shows the position of the frames that are used to model the - robot. The end effector frame is here located at the middle point between - the two wheels. - - \image html pioneer.png - - The robot jacobian at the end effector frame, the point located at the - middle between the two wheels is given by: - - \f[ - {^e}{\bf J}_e = \left(\begin{array}{cc} - 1 & 0 \\ - 0 & 0 \\ - 0 & 0 \\ - 0 & 0 \\ - 0 & 0 \\ - 0 & 1 \\ - \end{array} - \right) - \f] - - Considering \f$(v_x, w_z)\f$, it is possible to compute \f$\bf v\f$ the six - dimention velocity skew expressed at the end effector frame by: - - \f[ - {\bf v} = {^e}{\bf J}_e \; - \left(\begin{array}{c} - v_x \\ - w_z \\ - \end{array} - \right) - \f]. - -*/ + * \class vpPioneer + * + * \ingroup group_robot_real_unicycle group_robot_simu_unicycle + * + * \brief Generic functions for Pioneer mobile robots. + * + * This class provides common features for Pioneer mobile robots. + * This robot has two control velocities \f$(v_x, w_z)\f$, the translational + * and rotational velocities of the mobile platform respectively. + * + * The figure below shows the position of the frames that are used to model the + * robot. The end effector frame is here located at the middle point between + * the two wheels. + * + * \image html pioneer.png + * + * The robot jacobian at the end effector frame, the point located at the + * middle between the two wheels is given by: + * + * \f[ + * {^e}{\bf J}_e = \left(\begin{array}{cc} + * 1 & 0 \\ + * 0 & 0 \\ + * 0 & 0 \\ + * 0 & 0 \\ + * 0 & 0 \\ + * 0 & 1 \\ + * \end{array} + * \right) + * \f] + * + * Considering \f$(v_x, w_z)\f$, it is possible to compute \f$\bf v\f$ the six + * dimension velocity skew expressed at the end effector frame by: + * + * \f[ + * {\bf v} = {^e}{\bf J}_e \; + * \left(\begin{array}{c} + * v_x \\ + * w_z \\ + * \end{array} + * \right) + * \f]. + */ class VISP_EXPORT vpPioneer : public vpUnicycle { public: /*! - Create a default Pioneer robot. - */ + * Create a default Pioneer robot. + */ vpPioneer() : vpUnicycle() { set_cMe(); set_eJe(); } - /*! - Destructor that does nothing. - */ - virtual ~vpPioneer(){}; - private: /*! - Set the transformation between the camera frame and the mobile platform - end effector frame. - */ + * Set the transformation between the camera frame and the mobile platform + * end effector frame. + */ void set_cMe() { // Position of mobile platform end effector frame in the camera frame @@ -119,27 +110,26 @@ class VISP_EXPORT vpPioneer : public vpUnicycle } /*! - Set the robot jacobian at the end effector frame, the point located at the - middle between the two wheels. - - Considering \f${\bf v} = {^e}{\bf J}_e \; [v_x, w_z]\f$ with - \f$(v_x, w_z)\f$ respectively the translational and rotational control - velocities of the mobile robot and \f$\bf v\f$ the six dimention velocity - skew expressed at the end effector frame, the robot jacobian is given by: - - \f[ - {^e}{\bf J}_e = \left(\begin{array}{cc} - 1 & 0 \\ - 0 & 0 \\ - 0 & 0 \\ - 0 & 0 \\ - 0 & 0 \\ - 0 & 1 \\ - \end{array} - \right) - \f] - - */ + * Set the robot jacobian at the end effector frame, the point located at the + * middle between the two wheels. + * + * Considering \f${\bf v} = {^e}{\bf J}_e \; [v_x, w_z]\f$ with + * \f$(v_x, w_z)\f$ respectively the translational and rotational control + * velocities of the mobile robot and \f$\bf v\f$ the six dimension velocity + * skew expressed at the end effector frame, the robot jacobian is given by: + * + * \f[ + * {^e}{\bf J}_e = \left(\begin{array}{cc} + * 1 & 0 \\ + * 0 & 0 \\ + * 0 & 0 \\ + * 0 & 0 \\ + * 0 & 0 \\ + * 0 & 1 \\ + * \end{array} + * \right) + * \f] + */ void set_eJe() { eJe_.resize(6, 2); // pioneer jacobian expressed at point M diff --git a/modules/robot/include/visp3/robot/vpPioneerPan.h b/modules/robot/include/visp3/robot/vpPioneerPan.h index 6377766b06..1fa48f110a 100644 --- a/modules/robot/include/visp3/robot/vpPioneerPan.h +++ b/modules/robot/include/visp3/robot/vpPioneerPan.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Common features for Pioneer unicycle mobile robots. - * -*****************************************************************************/ + */ #ifndef VPPIONEERPAN_H #define VPPIONEERPAN_H @@ -41,62 +39,60 @@ #include /*! - - \class vpPioneerPan - - \ingroup group_robot_real_unicycle group_robot_simu_unicycle - - \brief Generic functions for Pioneer mobile robots equiped with a pan head. - - This class provides common features for Pioneer mobile robots equiped with a - pan head. - - This robot has three control velocities \f$(v_x, w_z, \dot{q_1})\f$, the - translational and rotational velocities of the mobile platform, the pan head - velocity respectively. - - The figure below shows the position of the frames that are used to model the - robot. The end effector frame is here located at the pan axis. - - \image html pioneer-pan.png - - Considering - \f[{\bf v} = {^e}{\bf J}_e \; - \left(\begin{array}{c} - v_x \\ - w_z \\ - \dot{q_1} \\ - \end{array} - \right) - \f] - with - \f$(v_x, w_z)\f$ respectively the translational and rotational control - velocities of the mobile platform, \f$\dot{q_1}\f$ the joint velocity of the - pan head and \f$\bf v\f$ the six dimention velocity skew expressed at point - E in frame E, the robot jacobian is given by: - - \f[ - {^e}{\bf J}_e = \left(\begin{array}{ccc} - c_1 & -c_1*p_y - s_1*p_x & 0 \\ - 0 & 0 & 0 \\ - s_1 & -s_1*p_y + c_1*p_x & 0 \\ - 0 & 0 & 0 \\ - 0 & -1 & 1 \\ - 0 & 0 & 0 \\ - \end{array} - \right) - \f] - - with \f$p_x, p_y\f$ the position of the head base frame in the mobile - platform frame located at the middle point between the two weels. - -*/ + * \class vpPioneerPan + * + * \ingroup group_robot_real_unicycle group_robot_simu_unicycle + * + * \brief Generic functions for Pioneer mobile robots equipped with a pan head. + * + * This class provides common features for Pioneer mobile robots equipped with a + * pan head. + * + * This robot has three control velocities \f$(v_x, w_z, \dot{q_1})\f$, the + * translational and rotational velocities of the mobile platform, the pan head + * velocity respectively. + * + * The figure below shows the position of the frames that are used to model the + * robot. The end effector frame is here located at the pan axis. + * + * \image html pioneer-pan.png + * + * Considering + * \f[{\bf v} = {^e}{\bf J}_e \; + * \left(\begin{array}{c} + * v_x \\ + * w_z \\ + * \dot{q_1} \\ + * \end{array} + * \right) + * \f] + * with + * \f$(v_x, w_z)\f$ respectively the translational and rotational control + * velocities of the mobile platform, \f$\dot{q_1}\f$ the joint velocity of the + * pan head and \f$\bf v\f$ the six dimension velocity skew expressed at point + * E in frame E, the robot jacobian is given by: + * + * \f[ + * {^e}{\bf J}_e = \left(\begin{array}{ccc} + * c_1 & -c_1*p_y - s_1*p_x & 0 \\ + * 0 & 0 & 0 \\ + * s_1 & -s_1*p_y + c_1*p_x & 0 \\ + * 0 & 0 & 0 \\ + * 0 & -1 & 1 \\ + * 0 & 0 & 0 \\ + * \end{array} + * \right) + * \f] + * + * with \f$p_x, p_y\f$ the position of the head base frame in the mobile + * platform frame located at the middle point between the two wheels. + */ class VISP_EXPORT vpPioneerPan : public vpUnicycle { public: /*! - Create a pioneer mobile robot equiped with a pan head. - */ + * Create a pioneer mobile robot equipped with a pan head. + */ vpPioneerPan() : mMp_(), pMe_() { double q = 0; // Initial position of the pan axis @@ -106,40 +102,34 @@ class VISP_EXPORT vpPioneerPan : public vpUnicycle set_eJe(q); } - /*! - Destructor that does nothing. - */ - virtual ~vpPioneerPan(){}; - /** @name Inherited functionalities from vpPioneerPan */ //@{ /*! - Set the robot jacobian expressed at point E the end effector frame located - on the pan head. - - Considering \f${\bf v} = {^e}{\bf J}_e \; [v_x, w_z, \dot{q_1}]\f$ with - \f$(v_x, w_z)\f$ respectively the translational and rotational control - velocities of the mobile platform, \f$\dot{q_1}\f$ the joint velocity of - the pan head and \f$\bf v\f$ the six dimention velocity skew expressed at - point E in frame E, the robot jacobian is given by: - - \f[ - {^e}{\bf J}_e = \left(\begin{array}{ccc} - c_1 & -c_1*p_y - s_1*p_x & 0 \\ - 0 & 0 & 0 \\ - s_1 & -s_1*p_y + c_1*p_x & 0 \\ - 0 & 0 & 0 \\ - 0 & -1 & 1 \\ - 0 & 0 & 0 \\ - \end{array} - \right) - \f] - - with \f$p_x, p_y\f$ the position of the head base frame in the mobile - platform frame located at the middle point between the two weels. - - */ + * Set the robot jacobian expressed at point E the end effector frame located + * on the pan head. + * + * Considering \f${\bf v} = {^e}{\bf J}_e \; [v_x, w_z, \dot{q_1}]\f$ with + * \f$(v_x, w_z)\f$ respectively the translational and rotational control + * velocities of the mobile platform, \f$\dot{q_1}\f$ the joint velocity of + * the pan head and \f$\bf v\f$ the six dimension velocity skew expressed at + * point E in frame E, the robot jacobian is given by: + * + * \f[ + * {^e}{\bf J}_e = \left(\begin{array}{ccc} + * c_1 & -c_1*p_y - s_1*p_x & 0 \\ + * 0 & 0 & 0 \\ + * s_1 & -s_1*p_y + c_1*p_x & 0 \\ + * 0 & 0 & 0 \\ + * 0 & -1 & 1 \\ + * 0 & 0 & 0 \\ + * \end{array} + * \right) + * \f] + * + * with \f$p_x, p_y\f$ the position of the head base frame in the mobile + * platform frame located at the middle point between the two wheels. + */ void set_eJe(double q_pan) { double px = mMp_[0][3]; @@ -166,9 +156,9 @@ class VISP_EXPORT vpPioneerPan : public vpUnicycle /** @name Protected Member Functions Inherited from vpPioneerPan */ //@{ /*! - Set the transformation between the camera frame and the pan head end - effector frame. - */ + * Set the transformation between the camera frame and the pan head end + * effector frame. + */ void set_cMe() { // Position of pan head end effector frame in the camera frame @@ -189,10 +179,10 @@ class VISP_EXPORT vpPioneerPan : public vpUnicycle } /*! - Set the transformation between the mobile platform frame - located at the middle point between the two weels and the base frame of - the pan head. - */ + * Set the transformation between the mobile platform frame + * located at the middle point between the two wheels and the base frame of + * the pan head. + */ void set_mMp() { // Position of the pan head in the mobile platform frame @@ -210,12 +200,11 @@ class VISP_EXPORT vpPioneerPan : public vpUnicycle } /*! - Set the transformation between the pan head reference frame and the - end-effector frame. - - \param q : Position in rad of the pan axis. - - */ + * Set the transformation between the pan head reference frame and the + * end-effector frame. + * + * \param q : Position in rad of the pan axis. + */ void set_pMe(const double q) { vpRotationMatrix pRe; diff --git a/modules/robot/include/visp3/robot/vpQbSoftHand.h b/modules/robot/include/visp3/robot/vpQbSoftHand.h index 002a20d6c4..d4dbd9995a 100644 --- a/modules/robot/include/visp3/robot/vpQbSoftHand.h +++ b/modules/robot/include/visp3/robot/vpQbSoftHand.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Interface for the qb robotics qbSoftHand device. - * -*****************************************************************************/ + */ #ifndef _vpQbSoftHand_h_ #define _vpQbSoftHand_h_ @@ -43,56 +41,53 @@ #include /*! - - \class vpQbSoftHand - - \ingroup group_robot_haptic - - Interface for qbSoftHand [device](https://qbrobotics.com/products/qb-softhand/). - - See https://qbrobotics.com/ for more details. - - \note Before using this class under Linux (Ubuntu, Debian, Fedora...) it is mandatory to add - user to the dialout group. To do so, you must execute: - \code - $ sudo adduser user_name dialout - \endcode - otherwise you will get an error: - \code - vpQbDevice fails while opening [/dev/ttyUSB0] and sets errno [Permission denied]. - \endcode - - The following example shows how to close and open the SoftHand with a given speed factor and stiffness used to stop -the command applied to the motors when the measured current is larger than the stiffness multiplied by the maximum -allowed current that can be applied to the motors. - - \code -#include - -int main() -{ - vpQbSoftHand qbsofthand; - - vpColVector q(1); - - double speed_factor = 0.5; // half speed - double stiffness = 0.7; // 70% of the max allowed current supported by the motors - std::cout << "** Close the hand with blocking positioning function" << std::endl; - q[0] = 1; - qbsofthand.setPosition(q, speed_factor, stiffness); - - std::cout << "** Open the hand with blocking positioning function" << std::endl; - q[0] = 0; - qbsofthand.setPosition(q, speed_factor, stiffness); -} - \endcode - + * \class vpQbSoftHand + * + * \ingroup group_robot_haptic + * + * Interface for qbSoftHand [device](https://qbrobotics.com/products/qb-softhand/). + * + * See https://qbrobotics.com/ for more details. + * + * \note Before using this class under Linux (Ubuntu, Debian, Fedora...) it is mandatory to add + * user to the dialout group. To do so, you must execute: + * \code + * $ sudo adduser user_name dialout + * \endcode + * otherwise you will get an error: + * \code + * vpQbDevice fails while opening [/dev/ttyUSB0] and sets errno [Permission denied]. + * \endcode + * + * The following example shows how to close and open the SoftHand with a given speed factor and stiffness used to stop + * the command applied to the motors when the measured current is larger than the stiffness multiplied by the maximum + * allowed current that can be applied to the motors. + * + * \code + * #include + * + * int main() + * { + * vpQbSoftHand qbsofthand; + * + * vpColVector q(1); + * + * double speed_factor = 0.5; // half speed + * double stiffness = 0.7; // 70% of the max allowed current supported by the motors + * std::cout << "** Close the hand with blocking positioning function" << std::endl; + * q[0] = 1; + * qbsofthand.setPosition(q, speed_factor, stiffness); + * + * std::cout << "** Open the hand with blocking positioning function" << std::endl; + * q[0] = 0; + * qbsofthand.setPosition(q, speed_factor, stiffness); + * } + * \endcode */ class VISP_EXPORT vpQbSoftHand : public vpQbDevice { public: vpQbSoftHand(); - virtual ~vpQbSoftHand(); void getCurrent(vpColVector ¤t, const int &id = 1); void getPosition(vpColVector &position, const int &id = 1); diff --git a/modules/robot/include/visp3/robot/vpRobotAfma4.h b/modules/robot/include/visp3/robot/vpRobotAfma4.h index 32c33c8044..eade24c243 100644 --- a/modules/robot/include/visp3/robot/vpRobotAfma4.h +++ b/modules/robot/include/visp3/robot/vpRobotAfma4.h @@ -219,7 +219,7 @@ class VISP_EXPORT vpRobotAfma4 : public vpAfma4, public vpRobot virtual ~vpRobotAfma4(void); void getDisplacement(vpRobot::vpControlFrameType frame, vpColVector &displacement); - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position) override; void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position, double ×tamp); double getPositioningVelocity(void); @@ -236,8 +236,8 @@ class VISP_EXPORT vpRobotAfma4 : public vpAfma4, public vpRobot void get_cMe(vpHomogeneousMatrix &cMe) const; void get_cVe(vpVelocityTwistMatrix &cVe) const; void get_cVf(vpVelocityTwistMatrix &cVf) const; - void get_eJe(vpMatrix &eJe); - void get_fJe(vpMatrix &fJe); + void get_eJe(vpMatrix &eJe) override; + void get_fJe(vpMatrix &fJe) override; void init(void); @@ -250,7 +250,7 @@ class VISP_EXPORT vpRobotAfma4 : public vpAfma4, public vpRobot static bool savePosFile(const std::string &filename, const vpColVector &q); /* --- POSITIONNEMENT --------------------------------------------------- */ - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position) override; void setPosition(const vpRobot::vpControlFrameType frame, const double q1, const double q2, const double q4, const double q5); void setPosition(const char *filename); @@ -262,7 +262,7 @@ class VISP_EXPORT vpRobotAfma4 : public vpAfma4, public vpRobot /* --- VITESSE ---------------------------------------------------------- */ - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity) override; void stopMotion(); }; diff --git a/modules/robot/include/visp3/robot/vpRobotAfma6.h b/modules/robot/include/visp3/robot/vpRobotAfma6.h index 8918fb21ca..41f821b1c8 100644 --- a/modules/robot/include/visp3/robot/vpRobotAfma6.h +++ b/modules/robot/include/visp3/robot/vpRobotAfma6.h @@ -258,7 +258,7 @@ class VISP_EXPORT vpRobotAfma6 : public vpAfma6, public vpRobot void getDisplacement(vpRobot::vpControlFrameType frame, vpColVector &displacement); - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position) override; void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position, double ×tamp); void getPosition(const vpRobot::vpControlFrameType frame, vpPoseVector &position); void getPosition(const vpRobot::vpControlFrameType frame, vpPoseVector &position, double ×tamp); @@ -275,15 +275,15 @@ class VISP_EXPORT vpRobotAfma6 : public vpAfma6, public vpRobot void get_cMe(vpHomogeneousMatrix &_cMe) const; void get_cVe(vpVelocityTwistMatrix &_cVe) const; - void get_eJe(vpMatrix &_eJe); - void get_fJe(vpMatrix &_fJe); + void get_eJe(vpMatrix &_eJe) override; + void get_fJe(vpMatrix &_fJe) override; void init(void); void init(vpAfma6::vpAfma6ToolType tool, const vpHomogeneousMatrix &eMc); void init(vpAfma6::vpAfma6ToolType tool, const std::string &filename); void - init(vpAfma6::vpAfma6ToolType tool, - vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); + init(vpAfma6::vpAfma6ToolType tool, + vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); void move(const std::string &filename); void move(const std::string &filename, double velocity); @@ -298,7 +298,7 @@ class VISP_EXPORT vpRobotAfma6 : public vpAfma6, public vpRobot /* --- POSITIONNEMENT --------------------------------------------------- */ void setPosition(const vpRobot::vpControlFrameType frame, const vpPoseVector &pose); - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position) override; void setPosition(const vpRobot::vpControlFrameType frame, double pos1, double pos2, double pos3, double pos4, double pos5, double pos6); void setPosition(const std::string &filename); @@ -311,7 +311,7 @@ class VISP_EXPORT vpRobotAfma6 : public vpAfma6, public vpRobot /* --- VITESSE ---------------------------------------------------------- */ - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity) override; void stopMotion(); }; diff --git a/modules/robot/include/visp3/robot/vpRobotBiclops.h b/modules/robot/include/visp3/robot/vpRobotBiclops.h index 356e3c4c94..e082116672 100644 --- a/modules/robot/include/visp3/robot/vpRobotBiclops.h +++ b/modules/robot/include/visp3/robot/vpRobotBiclops.h @@ -168,7 +168,7 @@ class VISP_EXPORT vpRobotBiclops : public vpBiclops, public vpRobot * \exception vpRobotException::constructionError If the config file cannot be * opened. */ - void init(); + void init() override; /*! * Get the homogeneous matrix corresponding to the transformation between the @@ -198,7 +198,7 @@ class VISP_EXPORT vpRobotBiclops : public vpBiclops, public vpRobot * \param eJe : Jacobian between end effector frame and end effector frame (on * tilt axis). */ - void get_eJe(vpMatrix &eJe); + void get_eJe(vpMatrix &eJe) override; /*! * Get the robot jacobian expressed in the robot reference frame @@ -206,7 +206,7 @@ class VISP_EXPORT vpRobotBiclops : public vpBiclops, public vpRobot * \param fJe : Jacobian between reference frame (or fix frame) and end * effector frame (on tilt axis). */ - void get_fJe(vpMatrix &fJe); + void get_fJe(vpMatrix &fJe) override; /*! * Get the robot displacement since the last call of this method. @@ -228,7 +228,7 @@ class VISP_EXPORT vpRobotBiclops : public vpBiclops, public vpRobot * \exception vpRobotException::wrongStateError If a not supported frame type * is given. */ - void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &d); + void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &d) override; /*! * Return the position of each axis. @@ -243,7 +243,7 @@ class VISP_EXPORT vpRobotBiclops : public vpBiclops, public vpRobot * \exception vpRobotException::wrongStateError : If a not supported frame type * is given. */ - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q) override; /*! * Get the velocity in % used for a position control. @@ -315,7 +315,7 @@ class VISP_EXPORT vpRobotBiclops : public vpBiclops, public vpRobot * \exception vpRobotException::wrongStateError : If a not supported frame * type is given. */ - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q) override; /*! * Move the robot in position control. @@ -359,7 +359,7 @@ class VISP_EXPORT vpRobotBiclops : public vpBiclops, public vpRobot * Change the state of the robot either to stop them, or to set position or * speed control. */ - vpRobot::vpRobotStateType setRobotState(const vpRobot::vpRobotStateType newState); + vpRobot::vpRobotStateType setRobotState(const vpRobot::vpRobotStateType newState) override; /*! * Send a velocity on each axis. @@ -385,7 +385,7 @@ class VISP_EXPORT vpRobotBiclops : public vpBiclops, public vpRobot * \warning Velocities could be saturated if one of them exceed the maximal * authorized speed (see vpRobot::maxRotationVelocity). */ - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &q_dot); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &q_dot) override; /*! * Halt all the axis. diff --git a/modules/robot/include/visp3/robot/vpRobotCamera.h b/modules/robot/include/visp3/robot/vpRobotCamera.h index 06615ffcbe..84be9b7b93 100644 --- a/modules/robot/include/visp3/robot/vpRobotCamera.h +++ b/modules/robot/include/visp3/robot/vpRobotCamera.h @@ -111,27 +111,26 @@ class VISP_EXPORT vpRobotCamera : public vpRobotSimulator public: vpRobotCamera(); - virtual ~vpRobotCamera(); /** @name Inherited functionalities from vpRobotCamera */ //@{ void get_cVe(vpVelocityTwistMatrix &cVe) const; - void get_eJe(vpMatrix &eJe); + void get_eJe(vpMatrix &eJe) override; void getPosition(vpHomogeneousMatrix &cMw) const; - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q) override; void setPosition(const vpHomogeneousMatrix &cMw); - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &v); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &v) override; //@} private: - void init(); + void init() override; // Non implemented virtual pure functions - void get_fJe(vpMatrix & /*_fJe */){}; - void getDisplacement(const vpRobot::vpControlFrameType /* frame */, vpColVector & /* q */){}; - void setPosition(const vpRobot::vpControlFrameType /* frame */, const vpColVector & /* q */){}; + void get_fJe(vpMatrix & /*_fJe */) override { }; + void getDisplacement(const vpRobot::vpControlFrameType /* frame */, vpColVector & /* q */) override { }; + void setPosition(const vpRobot::vpControlFrameType /* frame */, const vpColVector & /* q */) override { }; }; #endif diff --git a/modules/robot/include/visp3/robot/vpRobotFlirPtu.h b/modules/robot/include/visp3/robot/vpRobotFlirPtu.h index bb9aad8d38..10a14bd979 100644 --- a/modules/robot/include/visp3/robot/vpRobotFlirPtu.h +++ b/modules/robot/include/visp3/robot/vpRobotFlirPtu.h @@ -99,9 +99,9 @@ class VISP_EXPORT vpRobotFlirPtu : public vpRobot void connect(const std::string &portname, int baudrate = 9600); void disconnect(); - void get_eJe(vpMatrix &eJe); + void get_eJe(vpMatrix &eJe) override; vpMatrix get_eJe(); - void get_fJe(vpMatrix &fJe); + void get_fJe(vpMatrix &fJe) override; vpMatrix get_fJe(); vpMatrix get_fMe(); @@ -112,13 +112,13 @@ class VISP_EXPORT vpRobotFlirPtu : public vpRobot vpHomogeneousMatrix get_eMc() const { return m_eMc; } vpVelocityTwistMatrix get_cVe() const; - void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &q); + void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &q) override; std::string getNetworkIP(); std::string getNetworkGateway(); std::string getNetworkHostName(); - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q) override; vpColVector getPanPosLimits(); vpColVector getTiltPosLimits(); vpColVector getPanTiltVelMax(); @@ -130,13 +130,13 @@ class VISP_EXPORT vpRobotFlirPtu : public vpRobot If your tool is a camera, this transformation is obtained by hand-eye calibration. */ void set_eMc(vpHomogeneousMatrix &eMc) { m_eMc = eMc; } - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q) override; void setPanPosLimits(const vpColVector &pan_limits); void setTiltPosLimits(const vpColVector &tilt_limits); void setPositioningVelocity(double velocity); vpRobot::vpRobotStateType setRobotState(vpRobot::vpRobotStateType newState); - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel) override; void stopMotion(); static void emergencyStop(int signo); diff --git a/modules/robot/include/visp3/robot/vpRobotFranka.h b/modules/robot/include/visp3/robot/vpRobotFranka.h index e38b7ef5a8..8f1d1fd765 100644 --- a/modules/robot/include/visp3/robot/vpRobotFranka.h +++ b/modules/robot/include/visp3/robot/vpRobotFranka.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Interface for the Franka robot. - * -*****************************************************************************/ + */ #ifndef _vpRobotFranka_h_ #define _vpRobotFranka_h_ @@ -57,179 +55,180 @@ #include /*! - \class vpRobotFranka - - \ingroup group_robot_real_arm - - This class is a wrapper over the [libfranka](https://github.com/frankaemika/libfranka) - component part of the [Franka Control Interface](https://frankaemika.github.io/docs/) (FCI). - - Before using vpRobotFranka follow the - [installation instructions](https://frankaemika.github.io/docs/installation.html#) to install - libfranka. We suggest to - [build libfranka from source](https://frankaemika.github.io/docs/installation.html#building-libfranka) - if you are not using ROS. - - Moreover, you need also to setup a real-time kernel following these - [instructions](https://frankaemika.github.io/docs/installation.html#setting-up-the-real-time-kernel). - - Up to now, this class provides the following capabilities to: - - move to a given joint position using setPosition() that is blocking and that returns only when the robot - has reached the desired position. - \code - vpRobotFranka robot("192.168.1.1"); - - vpColVector q_d(7); - q_d[3] = -M_PI_2; - q_d[5] = M_PI_2; - q_d[6] = M_PI_4; - std::cout << "Move to joint position: " << q_d.t() << std::endl; - robot.setPosition(vpRobot::JOINT_STATE, q_d); - \endcode - - move applying a joint velocity using setVelocity(). This function is non-blocking. - \code - vpRobotFranka robot("192.168.1.1"); - - robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); - - vpColVector dq_d(7, 0); - dq_d[4] = vpMath::rad(-20.); - dq_d[6] = vpMath::rad(20.); - while(1) { - robot.setVelocity(vpRobot::JOINT_STATE, dq_d); - ... - } - \endcode - - move applying a cartesian velocity to the end-effector using setVelocity(). This function is non-blocking. - \code - vpRobotFranka robot("192.168.1.1"); - - vpColVector ve_d(6); - ve_d[2] = 0.02; // vz = 2 cm/s goes down - - while(1) { - robot.setVelocity(vpRobot::END_EFFECTOR_FRAME, ve_d); - ... - } - \endcode - - move applying a cartesian velocity to the camera frame (or a given tool frame) using setVelocity(). - The camera frame (or a tool frame) location wrt the end-effector is set using set_eMc(). This function is - non-blocking. \code vpRobotFranka robot("192.168.1.1"); vpHomogeneousMatrix eMc; // Position of the camera wrt the - end-effector - // update eMc - robot.set_eMc(eMc); - - vpColVector vc_d(6); - vc_d[2] = 0.02; // vz = 2 cm/s is along the camera optical axis - - while(1) { - robot.setVelocity(vpRobot::CAMERA_FRAME, vc_d); - ... - } - \endcode - If the tool attached to the end-effector is not a camera, you can do exactly the same using: - \code - vpRobotFranka robot("192.168.1.1"); - vpHomogeneousMatrix eMt; - // update eMt, the position of the tool wrt the end-effector frame - robot.set_eMc(eMt); - - vpColVector vt_d(6); - vt_d[2] = 0.02; // vt = 2 cm/s is along tool z axis - - while(1) { - robot.setVelocity(vpRobot::TOOL_FRAME, vt_d); - ... - } - \endcode - - - get the joint position using getPosition() - \code - vpRobotFranka robot("192.168.1.1"); - - vpColVector q; - while(1) { - robot.getPosition(vpRobot::JOINT_STATE, q); - ... - } - \endcode - - get the cartesian end-effector position using getPosition(). This function is non-blocking. - \code - vpRobotFranka robot("192.168.1.1"); - - vpPoseVector wPe; - vpHomogeneousMatrix wMe; - while(1) { - robot.getPosition(vpRobot::END_EFFECTOR_FRAME, wPe); - wMe.buildFrom(wPe); - ... - } - \endcode - - get the cartesian camera (or tool) frame position using getPosition(). This function is non-blocking. - \code - vpRobotFranka robot("192.168.1.1"); - vpHomogeneousMatrix eMc; - // update eMc, the position of the camera wrt the end-effector frame - robot.set_eMc(eMc); - - vpPoseVector wPc; - vpHomogeneousMatrix wMc; - while(1) { - robot.getPosition(vpRobot::CAMERA_FRAME, wPc); - wMc.buildFrom(wPc); - ... - } - \endcode - If the tool attached to the end-effector is not a camera, you can do exactly the same using: - \code - vpRobotFranka robot("192.168.1.1"); - vpHomogeneousMatrix eMt; - // update eMt, the position of the tool wrt the end-effector frame - robot.set_eMc(eMt); - - vpPoseVector wPt; - vpHomogeneousMatrix wMt; - while(1) { - robot.getPosition(vpRobot::TOOL_FRAME, wPt); - wMt.buildFrom(wPt); - ... - } - \endcode - - What is not implemented is: - - move to a given cartesian end-effector position - - gripper controller - - force/torque feedback and control - - Known issues: - - sometimes the joint to joint trajectory generator provided by Franka complains about discontinuities. - - We provide also the getHandler() function that allows to access to the robot handler and call the native - [libfranka API](https://frankaemika.github.io/libfranka/index.html) functionalities: - \code - vpRobotFranka robot("192.168.1.1"); - - franka::Robot *handler = robot.getHandler(); - - // Get end-effector cartesian position - std::array pose = handler->readOnce().O_T_EE; - \endcode - - \sa \ref tutorial-franka-pbvs - \sa \ref tutorial-franka-ibvs - -*/ + * \class vpRobotFranka + * + * \ingroup group_robot_real_arm + * + * This class is a wrapper over the [libfranka](https://github.com/frankaemika/libfranka) + * component part of the [Franka Control Interface](https://frankaemika.github.io/docs/) (FCI). + * + * Before using vpRobotFranka follow the + * [installation instructions](https://frankaemika.github.io/docs/installation.html#) to install + * libfranka. We suggest to + * [build libfranka from source](https://frankaemika.github.io/docs/installation.html#building-libfranka) + * if you are not using ROS. + * + * Moreover, you need also to setup a real-time kernel following these + * [instructions](https://frankaemika.github.io/docs/installation.html#setting-up-the-real-time-kernel). + * + * Up to now, this class provides the following capabilities to: + * - move to a given joint position using setPosition() that is blocking and that returns only when the robot + * has reached the desired position. + * \code + * vpRobotFranka robot("192.168.1.1"); + * + * vpColVector q_d(7); + * q_d[3] = -M_PI_2; + * q_d[5] = M_PI_2; + * q_d[6] = M_PI_4; + * std::cout << "Move to joint position: " << q_d.t() << std::endl; + * robot.setPosition(vpRobot::JOINT_STATE, q_d); + * \endcode + * - move applying a joint velocity using setVelocity(). This function is non-blocking. + * \code + * vpRobotFranka robot("192.168.1.1"); + * + * robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); + * + * vpColVector dq_d(7, 0); + * dq_d[4] = vpMath::rad(-20.); + * dq_d[6] = vpMath::rad(20.); + * while(1) { + * robot.setVelocity(vpRobot::JOINT_STATE, dq_d); + * ... + * } + * \endcode + * - move applying a cartesian velocity to the end-effector using setVelocity(). This function is non-blocking. + * \code + * vpRobotFranka robot("192.168.1.1"); + * + * vpColVector ve_d(6); + * ve_d[2] = 0.02; // vz = 2 cm/s goes down + * + * while(1) { + * robot.setVelocity(vpRobot::END_EFFECTOR_FRAME, ve_d); + * ... + * } + * \endcode + * - move applying a cartesian velocity to the camera frame (or a given tool frame) using setVelocity(). + * The camera frame (or a tool frame) location wrt the end-effector is set using set_eMc(). This function is + * non-blocking. + * \code + * vpRobotFranka robot("192.168.1.1"); + * vpHomogeneousMatrix eMc; // Position of the camera wrt the end-effector + * // update eMc + * robot.set_eMc(eMc); + * + * vpColVector vc_d(6); + * vc_d[2] = 0.02; // vz = 2 cm/s is along the camera optical axis + * + * while(1) { + * robot.setVelocity(vpRobot::CAMERA_FRAME, vc_d); + * ... + * } + * \endcode + * If the tool attached to the end-effector is not a camera, you can do exactly the same using: + * \code + * vpRobotFranka robot("192.168.1.1"); + * vpHomogeneousMatrix eMt; + * // update eMt, the position of the tool wrt the end-effector frame + * robot.set_eMc(eMt); + * + * vpColVector vt_d(6); + * vt_d[2] = 0.02; // vt = 2 cm/s is along tool z axis + * + * while(1) { + * robot.setVelocity(vpRobot::TOOL_FRAME, vt_d); + * ... + * } + * \endcode + * + * - get the joint position using getPosition() + * \code + * vpRobotFranka robot("192.168.1.1"); + * + * vpColVector q; + * while(1) { + * robot.getPosition(vpRobot::JOINT_STATE, q); + * ... + * } + * \endcode + * - get the cartesian end-effector position using getPosition(). This function is non-blocking. + * \code + * vpRobotFranka robot("192.168.1.1"); + * + * vpPoseVector wPe; + * vpHomogeneousMatrix wMe; + * while(1) { + * robot.getPosition(vpRobot::END_EFFECTOR_FRAME, wPe); + * wMe.buildFrom(wPe); + * ... + * } + * \endcode + * - get the cartesian camera (or tool) frame position using getPosition(). This function is non-blocking. + * \code + * vpRobotFranka robot("192.168.1.1"); + * vpHomogeneousMatrix eMc; + * // update eMc, the position of the camera wrt the end-effector frame + * robot.set_eMc(eMc); + * + * vpPoseVector wPc; + * vpHomogeneousMatrix wMc; + * while(1) { + * robot.getPosition(vpRobot::CAMERA_FRAME, wPc); + * wMc.buildFrom(wPc); + * ... + * } + * \endcode + * If the tool attached to the end-effector is not a camera, you can do exactly the same using: + * \code + * vpRobotFranka robot("192.168.1.1"); + * vpHomogeneousMatrix eMt; + * // update eMt, the position of the tool wrt the end-effector frame + * robot.set_eMc(eMt); + * + * vpPoseVector wPt; + * vpHomogeneousMatrix wMt; + * while(1) { + * robot.getPosition(vpRobot::TOOL_FRAME, wPt); + * wMt.buildFrom(wPt); + * ... + * } + * \endcode + * + * What is not implemented is: + * - move to a given cartesian end-effector position + * - gripper controller + * - force/torque feedback and control + * + * Known issues: + * - sometimes the joint to joint trajectory generator provided by Franka complains about discontinuities. + * + * We provide also the getHandler() function that allows to access to the robot handler and call the native + * [libfranka API](https://frankaemika.github.io/libfranka/index.html) functionalities: + * \code + * vpRobotFranka robot("192.168.1.1"); + * + * franka::Robot *handler = robot.getHandler(); + * + * // Get end-effector cartesian position + * std::array pose = handler->readOnce().O_T_EE; + * \endcode + * + * \sa \ref tutorial-franka-pbvs + * \sa \ref tutorial-franka-ibvs + */ class VISP_EXPORT vpRobotFranka : public vpRobot { private: /*! - Copy constructor not allowed. + * Copy constructor not allowed. */ vpRobotFranka(const vpRobotFranka &robot); /*! - This function is not implemented. + * This function is not implemented. */ - void getDisplacement(const vpRobot::vpControlFrameType, vpColVector &){}; + void getDisplacement(const vpRobot::vpControlFrameType, vpColVector &) override { }; void init(); @@ -279,9 +278,9 @@ class VISP_EXPORT vpRobotFranka : public vpRobot vpHomogeneousMatrix get_fMc(const vpColVector &q); vpHomogeneousMatrix get_eMc() const; - void get_eJe(vpMatrix &eJe); + void get_eJe(vpMatrix &eJe) override; void get_eJe(const vpColVector &q, vpMatrix &eJe); - void get_fJe(vpMatrix &fJe); + void get_fJe(vpMatrix &fJe) override; void get_fJe(const vpColVector &q, vpMatrix &fJe); void getCoriolis(vpColVector &coriolis); @@ -324,7 +323,7 @@ class VISP_EXPORT vpRobotFranka : public vpRobot void getMass(vpMatrix &mass); - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position) override; void getPosition(const vpRobot::vpControlFrameType frame, vpPoseVector &pose); void getVelocity(const vpRobot::vpControlFrameType frame, vpColVector &d_position); @@ -345,11 +344,11 @@ class VISP_EXPORT vpRobotFranka : public vpRobot void setForceTorque(const vpRobot::vpControlFrameType frame, const vpColVector &ft, const double &filter_gain = 0.1, const bool &activate_pi_controller = false); void setLogFolder(const std::string &folder); - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position) override; void setPositioningVelocity(double velocity); vpRobot::vpRobotStateType setRobotState(vpRobot::vpRobotStateType newState); - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel) override; void stopMotion(); }; diff --git a/modules/robot/include/visp3/robot/vpRobotKinova.h b/modules/robot/include/visp3/robot/vpRobotKinova.h index 803e40c9e1..7d62a0b744 100644 --- a/modules/robot/include/visp3/robot/vpRobotKinova.h +++ b/modules/robot/include/visp3/robot/vpRobotKinova.h @@ -91,12 +91,12 @@ class VISP_EXPORT vpRobotKinova : public vpRobot typedef enum { CMD_LAYER_USB, CMD_LAYER_ETHERNET, CMD_LAYER_UNSET } CommandLayer; vpRobotKinova(); - virtual ~vpRobotKinova(); + virtual ~vpRobotKinova() override; int connect(); - void get_eJe(vpMatrix &eJe); - void get_fJe(vpMatrix &fJe); + void get_eJe(vpMatrix &eJe) override; + void get_fJe(vpMatrix &fJe) override; /*! * Return constant transformation between end-effector and tool frame. @@ -106,8 +106,8 @@ class VISP_EXPORT vpRobotKinova : public vpRobot int getActiveDevice() const { return m_active_device; } int getNumDevices() const { return m_devices_count; } - void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &q); - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position); + void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &q) override; + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position) override; void getPosition(const vpRobot::vpControlFrameType frame, vpPoseVector &pose); void homing(); @@ -124,7 +124,7 @@ class VISP_EXPORT vpRobotKinova : public vpRobot */ void setCommandLayer(CommandLayer command_layer) { m_command_layer = command_layer; } void setDoF(unsigned int dof); - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q) override; /*! * \param[in] plugin_location: Path to Jaco SDK plugins (ie. `Kinova.API.USBCommandLayerUbuntu.so` on * unix-like platform or `CommandLayerWindows.dll` on Windows platform). By default this location is empty, @@ -132,7 +132,7 @@ class VISP_EXPORT vpRobotKinova : public vpRobot * them. */ void setPluginLocation(const std::string &plugin_location) { m_plugin_location = plugin_location; } - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel) override; /*! * Enable or disable verbose mode to print to stdout additional information. * \param[in] verbose : true to enable verbose, false to disable. By default verbose diff --git a/modules/robot/include/visp3/robot/vpRobotPioneer.h b/modules/robot/include/visp3/robot/vpRobotPioneer.h index c020288140..74d6272fa7 100644 --- a/modules/robot/include/visp3/robot/vpRobotPioneer.h +++ b/modules/robot/include/visp3/robot/vpRobotPioneer.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Interface for Pioneer mobile robots based on Aria 3rd party library. - * -*****************************************************************************/ + */ #ifndef VPROBOTPIONEER_H #define VPROBOTPIONEER_H @@ -51,56 +49,54 @@ #include /*! - - \class vpRobotPioneer - - \ingroup group_robot_real_unicycle - - \brief Interface for Pioneer mobile robots based on Aria 3rd party library. - - This class provides a position and speed control interface for Pioneer - mobile robots. It inherits from the Aria ArRobot class. For more information - about the model of the robot, see vpPioneer documentation. - -*/ + * \class vpRobotPioneer + * + * \ingroup group_robot_real_unicycle + * + * \brief Interface for Pioneer mobile robots based on Aria 3rd party library. + * + * This class provides a position and speed control interface for Pioneer + * mobile robots. It inherits from the Aria ArRobot class. For more information + * about the model of the robot, see vpPioneer documentation. + */ class VISP_EXPORT vpRobotPioneer : public vpRobot, public vpPioneer, public ArRobot { private: /* Not allowed functions. */ /*! - Copy constructor not allowed. + * Copy constructor not allowed. */ vpRobotPioneer(const vpRobotPioneer &robot); public: vpRobotPioneer(); - virtual ~vpRobotPioneer(); + virtual ~vpRobotPioneer() override; /*! - Get the robot Jacobian expressed at point E, the point located at the - middle between the two wheels. - - \param eJe : Robot jacobian such as \f$(v_x, w_z) = {^e}{\bf J}e \; {\bf - v}\f$ with \f$(v_x, w_z)\f$ respectively the translational and rotational - control velocities of the mobile robot, \f$\bf v\f$ the six dimention - velocity skew, and where - - \sa get_eJe() - - */ - void get_eJe(vpMatrix &eJe) { eJe = vpUnicycle::get_eJe(); } + * Get the robot Jacobian expressed at point E, the point located at the + * middle between the two wheels. + * + * \param eJe : Robot jacobian such as \f$(v_x, w_z) = {^e}{\bf J}e \; {\bf + * v}\f$ with \f$(v_x, w_z)\f$ respectively the translational and rotational + * control velocities of the mobile robot, \f$\bf v\f$ the six dimension + * velocity skew, and where + * + * \sa get_eJe() + */ + void get_eJe(vpMatrix &eJe) override { eJe = vpUnicycle::get_eJe(); } private: // Set as private since not implemented /*! - Get the robot Jacobian expressed in the robot reference (or world) frame. - \warning Not implemented. - */ - void get_fJe(vpMatrix & /*fJe*/){}; + * Get the robot Jacobian expressed in the robot reference (or world) frame. + * \warning Not implemented. + */ + void get_fJe(vpMatrix & /*fJe*/) override { }; /*! - Get a displacement (frame as to ve specified) between two successive - position control. \warning Not implemented. - */ - void getDisplacement(const vpRobot::vpControlFrameType /*frame*/, vpColVector & /*q*/){}; + * Get a displacement (frame as to ve specified) between two successive + * position control. + * \warning Not implemented. + */ + void getDisplacement(const vpRobot::vpControlFrameType /*frame*/, vpColVector & /*q*/) { }; public: void getVelocity(const vpRobot::vpControlFrameType frame, vpColVector &velocity); @@ -108,27 +104,27 @@ class VISP_EXPORT vpRobotPioneer : public vpRobot, public vpPioneer, public ArRo private: // Set as private since not implemented /*! - Get the robot position (frame has to be specified). - \warning Not implemented. - */ - void getPosition(const vpRobot::vpControlFrameType /*frame*/, vpColVector & /*q*/){}; + * Get the robot position (frame has to be specified). + * \warning Not implemented. + */ + void getPosition(const vpRobot::vpControlFrameType /*frame*/, vpColVector & /*q*/) { }; public: void init(); private: // Set as private since not implemented /*! - Set a displacement (frame has to be specified) in position control. - \warning Not implemented. - */ - void setPosition(const vpRobot::vpControlFrameType /*frame*/, const vpColVector & /*q*/){}; + * Set a displacement (frame has to be specified) in position control. + * \warning Not implemented. + */ + void setPosition(const vpRobot::vpControlFrameType /*frame*/, const vpColVector & /*q*/) override { }; public: - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel) override; /*! - Enable or disable sonar device usage. - */ + * Enable or disable sonar device usage. + */ void useSonar(bool usage) { this->comInt(ArCommands::SONAR, usage); } protected: diff --git a/modules/robot/include/visp3/robot/vpRobotPtu46.h b/modules/robot/include/visp3/robot/vpRobotPtu46.h index 69bba53cc8..4a88e76575 100644 --- a/modules/robot/include/visp3/robot/vpRobotPtu46.h +++ b/modules/robot/include/visp3/robot/vpRobotPtu46.h @@ -99,11 +99,11 @@ class VISP_EXPORT vpRobotPtu46 : public vpPtu46, public vpRobot void get_cMe(vpHomogeneousMatrix &_cMe) const; void get_cVe(vpVelocityTwistMatrix &_cVe) const; - void get_eJe(vpMatrix &_eJe); - void get_fJe(vpMatrix &_fJe); + void get_eJe(vpMatrix &_eJe) override; + void get_fJe(vpMatrix &_fJe) override; void getDisplacement(vpRobot::vpControlFrameType frame, vpColVector &q); - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q) override; double getPositioningVelocity(void); void getVelocity(const vpRobot::vpControlFrameType frame, vpColVector &q_dot); vpColVector getVelocity(const vpRobot::vpControlFrameType frame); @@ -112,13 +112,13 @@ class VISP_EXPORT vpRobotPtu46 : public vpPtu46, public vpRobot bool readPositionFile(const std::string &filename, vpColVector &q); - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q) override; void setPosition(const vpRobot::vpControlFrameType frame, const double &q1, const double &q2); void setPosition(const char *filename); void setPositioningVelocity(double velocity); vpRobot::vpRobotStateType setRobotState(vpRobot::vpRobotStateType newState); - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &q_dot); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &q_dot) override; void stopMotion(); }; diff --git a/modules/robot/include/visp3/robot/vpRobotSimulator.h b/modules/robot/include/visp3/robot/vpRobotSimulator.h index 4e79f6a928..b6d87cbe0d 100644 --- a/modules/robot/include/visp3/robot/vpRobotSimulator.h +++ b/modules/robot/include/visp3/robot/vpRobotSimulator.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * Basic class used to make robot simulators. - * -*****************************************************************************/ + */ #ifndef vpRobotSimulator_HH #define vpRobotSimulator_HH /*! - \file vpRobotSimulator.h - \brief Basic class used to make robot simulators. -*/ + * \file vpRobotSimulator.h + * \brief Basic class used to make robot simulators. + */ #include #include @@ -47,15 +45,14 @@ #include /*! - \class vpRobotSimulator - - \ingroup group_robot_simu_gantry group_robot_simu_arm - group_robot_simu_unicycle \ingroup group_robot_simu_camera - - \brief This class aims to be a basis used to create all the - robot simulators. - -*/ + * \class vpRobotSimulator + * + * \ingroup group_robot_simu_gantry group_robot_simu_arm + * group_robot_simu_unicycle \ingroup group_robot_simu_camera + * + * \brief This class aims to be a basis used to create all the + * robot simulators. + */ class VISP_EXPORT vpRobotSimulator : public vpRobot { protected: @@ -63,28 +60,23 @@ class VISP_EXPORT vpRobotSimulator : public vpRobot public: vpRobotSimulator(); - /*! - Basic destructor - */ - virtual ~vpRobotSimulator(){}; /** @name Inherited functionalities from vpRobotSimulator */ //@{ /*! - Return the sampling time. - - \return Sampling time in second used to compute the robot displacement - from the velocity applied to the robot during this time. - */ + * Return the sampling time. + * + * \return Sampling time in second used to compute the robot displacement + * from the velocity applied to the robot during this time. + */ inline double getSamplingTime() const { return (this->delta_t_); } /*! - Set the sampling time. - - \param delta_t : Sampling time in second used to compute the robot - displacement from the velocity applied to the robot during this time. - - */ + * Set the sampling time. + * + * \param delta_t : Sampling time in second used to compute the robot + * displacement from the velocity applied to the robot during this time. + */ virtual inline void setSamplingTime(const double &delta_t) { this->delta_t_ = delta_t; } //@} }; diff --git a/modules/robot/include/visp3/robot/vpRobotTemplate.h b/modules/robot/include/visp3/robot/vpRobotTemplate.h index 777998f4fb..c158ad26f8 100644 --- a/modules/robot/include/visp3/robot/vpRobotTemplate.h +++ b/modules/robot/include/visp3/robot/vpRobotTemplate.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * Defines a robot just to show which function you must implement. - * -*****************************************************************************/ + */ #ifndef vpRobotTemplate_h #define vpRobotTemplate_h /*! - \file vpRobotTemplate.h - Defines a robot just to show which function you must implement. -*/ + * \file vpRobotTemplate.h + * Defines a robot just to show which function you must implement. + */ #include @@ -47,40 +45,38 @@ #include /*! - - \class vpRobotTemplate - \ingroup group_robot_real_template - \brief Class that defines a robot just to show which function you must implement. - -*/ + * \class vpRobotTemplate + * \ingroup group_robot_real_template + * \brief Class that defines a robot just to show which function you must implement. + */ class VISP_EXPORT vpRobotTemplate : public vpRobot { public: vpRobotTemplate(); - virtual ~vpRobotTemplate(); + virtual ~vpRobotTemplate() override; - void get_eJe(vpMatrix &eJe_); - void get_fJe(vpMatrix &fJe_); + void get_eJe(vpMatrix &eJe_) override; + void get_fJe(vpMatrix &fJe_) override; /*! - Return constant transformation between end-effector and tool frame. - If your tool is a camera, this transformation is obtained by hand-eye calibration. + * Return constant transformation between end-effector and tool frame. + * If your tool is a camera, this transformation is obtained by hand-eye calibration. */ vpHomogeneousMatrix get_eMc() const { return m_eMc; } - void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &q); - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q); + void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &q) override; + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q) override; /*! - Set constant transformation between end-effector and tool frame. - If your tool is a camera, this transformation is obtained by hand-eye calibration. + * Set constant transformation between end-effector and tool frame. + * If your tool is a camera, this transformation is obtained by hand-eye calibration. */ void set_eMc(vpHomogeneousMatrix &eMc) { m_eMc = eMc; } - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q); - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q) override; + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel) override; protected: - void init(); + void init() override; void getJointPosition(vpColVector &q); void setCartVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &v); void setJointVelocity(const vpColVector &qdot); diff --git a/modules/robot/include/visp3/robot/vpRobotUniversalRobots.h b/modules/robot/include/visp3/robot/vpRobotUniversalRobots.h index b1e2eb8716..30651d1c23 100644 --- a/modules/robot/include/visp3/robot/vpRobotUniversalRobots.h +++ b/modules/robot/include/visp3/robot/vpRobotUniversalRobots.h @@ -95,7 +95,7 @@ class VISP_EXPORT vpRobotUniversalRobots : public vpRobot void getForceTorque(const vpRobot::vpControlFrameType frame, vpColVector &force); std::string getPolyScopeVersion(); - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position) override; void getPosition(const vpRobot::vpControlFrameType frame, vpPoseVector &pose); int getRobotMode() const; std::string getRobotModel() const; @@ -105,12 +105,12 @@ class VISP_EXPORT vpRobotUniversalRobots : public vpRobot bool readPosFile(const std::string &filename, vpColVector &q); bool savePosFile(const std::string &filename, const vpColVector &q); - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position) override; void setPosition(const vpRobot::vpControlFrameType frame, const vpPoseVector &pose); void setPositioningVelocity(double velocity); vpRobot::vpRobotStateType setRobotState(vpRobot::vpRobotStateType newState); - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel) override; void set_eMc(const vpHomogeneousMatrix &eMc); @@ -118,9 +118,9 @@ class VISP_EXPORT vpRobotUniversalRobots : public vpRobot private: // Not implemented yet - void get_eJe(vpMatrix &_eJe){}; - void get_fJe(vpMatrix &_fJe){}; - void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &q){}; + void get_eJe(vpMatrix &_eJe) override { }; + void get_fJe(vpMatrix &_fJe) override { }; + void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &q) override { }; protected: void init(); diff --git a/modules/robot/include/visp3/robot/vpRobotViper650.h b/modules/robot/include/visp3/robot/vpRobotViper650.h index 0066eb88a0..cf51cf57bb 100644 --- a/modules/robot/include/visp3/robot/vpRobotViper650.h +++ b/modules/robot/include/visp3/robot/vpRobotViper650.h @@ -336,7 +336,8 @@ class VISP_EXPORT vpRobotViper650 : public vpViper650, public vpRobot public: /* Constantes */ /*! \enum vpControlModeType Control mode. */ - typedef enum { + typedef enum + { AUTO, //!< Automatic control mode (default). MANUAL, //!< Manual control mode activated when the dead man switch is in //!< use. @@ -421,13 +422,13 @@ class VISP_EXPORT vpRobotViper650 : public vpViper650, public vpRobot void get_cMe(vpHomogeneousMatrix &cMe) const; void get_cVe(vpVelocityTwistMatrix &cVe) const; - void get_eJe(vpMatrix &eJe); - void get_fJe(vpMatrix &fJe); + void get_eJe(vpMatrix &eJe) override; + void get_fJe(vpMatrix &fJe) override; void init(void); void - init(vpViper650::vpToolType tool, - vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); + init(vpViper650::vpToolType tool, + vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); void init(vpViper650::vpToolType tool, const std::string &filename); void init(vpViper650::vpToolType tool, const vpHomogeneousMatrix &eMc_); @@ -448,7 +449,7 @@ class VISP_EXPORT vpRobotViper650 : public vpViper650, public vpRobot void setMaxRotationVelocityJoint6(double w6_max); // Position control - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position) override; void setPosition(const vpRobot::vpControlFrameType frame, double pos1, double pos2, double pos3, double pos4, double pos5, double pos6); void setPosition(const std::string &filename); @@ -457,7 +458,7 @@ class VISP_EXPORT vpRobotViper650 : public vpViper650, public vpRobot // State vpRobot::vpRobotStateType setRobotState(vpRobot::vpRobotStateType newState); // Velocity control - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity) override; void stopMotion(); diff --git a/modules/robot/include/visp3/robot/vpRobotViper850.h b/modules/robot/include/visp3/robot/vpRobotViper850.h index 267858d7f0..145d633729 100644 --- a/modules/robot/include/visp3/robot/vpRobotViper850.h +++ b/modules/robot/include/visp3/robot/vpRobotViper850.h @@ -342,7 +342,8 @@ class VISP_EXPORT vpRobotViper850 : public vpViper850, public vpRobot public: /* Constantes */ /*! \enum vpControlModeType Control mode. */ - typedef enum { + typedef enum + { AUTO, //!< Automatic control mode (default). MANUAL, //!< Manual control mode activated when the dead man switch is in //!< use. @@ -414,7 +415,7 @@ class VISP_EXPORT vpRobotViper850 : public vpViper850, public vpRobot vpColVector getForceTorque() const; double getMaxRotationVelocityJoint6() const; - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position) override; void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &position, double ×tamp); void getPosition(const vpRobot::vpControlFrameType frame, vpPoseVector &position); void getPosition(const vpRobot::vpControlFrameType frame, vpPoseVector &position, double ×tamp); @@ -432,13 +433,13 @@ class VISP_EXPORT vpRobotViper850 : public vpViper850, public vpRobot void get_cMe(vpHomogeneousMatrix &cMe) const; void get_cVe(vpVelocityTwistMatrix &cVe) const; - void get_eJe(vpMatrix &eJe); - void get_fJe(vpMatrix &fJe); + void get_eJe(vpMatrix &eJe) override; + void get_fJe(vpMatrix &fJe) override; void init(void); void - init(vpViper850::vpToolType tool, - vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); + init(vpViper850::vpToolType tool, + vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); void init(vpViper850::vpToolType tool, const std::string &filename); void init(vpViper850::vpToolType tool, const vpHomogeneousMatrix &eMc_); @@ -459,7 +460,7 @@ class VISP_EXPORT vpRobotViper850 : public vpViper850, public vpRobot void setMaxRotationVelocityJoint6(double w6_max); // Position control - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &position) override; void setPosition(const vpRobot::vpControlFrameType frame, double pos1, double pos2, double pos3, double pos4, double pos5, double pos6); void setPosition(const std::string &filename); @@ -469,7 +470,7 @@ class VISP_EXPORT vpRobotViper850 : public vpViper850, public vpRobot vpRobot::vpRobotStateType setRobotState(vpRobot::vpRobotStateType newState); // Velocity control - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity) override; void stopMotion(); void unbiasForceTorqueSensor(); diff --git a/modules/robot/include/visp3/robot/vpRobotWireFrameSimulator.h b/modules/robot/include/visp3/robot/vpRobotWireFrameSimulator.h index 19127ce6e4..f2fac6d886 100644 --- a/modules/robot/include/visp3/robot/vpRobotWireFrameSimulator.h +++ b/modules/robot/include/visp3/robot/vpRobotWireFrameSimulator.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * Basic class used to make robot simulators. - * -*****************************************************************************/ + */ #ifndef vpRobotWireFrameSimulator_HH #define vpRobotWireFrameSimulator_HH /*! - \file vpRobotWireFrameSimulator.h - \brief Basic class used to make robot simulators. -*/ + * \file vpRobotWireFrameSimulator.h + * \brief Basic class used to make robot simulators. + */ #include @@ -67,22 +65,22 @@ #include /*! - \class vpRobotWireFrameSimulator - - \ingroup group_robot_simu_gantry group_robot_simu_arm - - \brief This class aims to be a basis used to create all the - simulators of robots. - - Thus in this class you will find all the parameters and methods - which are necessary to create a simulator. Several methods are pure - virtual. In this case it means that they are specific to the each - robot, for example the computation of the geometrical model. - - \warning This class uses threading capabilities. Thus on Unix-like - platforms, the libpthread third-party library need to be - installed. On Windows, we use the native threading capabilities. -*/ + * \class vpRobotWireFrameSimulator + * + * \ingroup group_robot_simu_gantry group_robot_simu_arm + * + * \brief This class aims to be a basis used to create all the + * simulators of robots. + * + * Thus in this class you will find all the parameters and methods + * which are necessary to create a simulator. Several methods are pure + * virtual. In this case it means that they are specific to the each + * robot, for example the computation of the geometrical model. + * + * \warning This class uses threading capabilities. Thus on Unix-like + * platforms, the libpthread third-party library need to be + * installed. On Windows, we use the native threading capabilities. + */ class VISP_EXPORT vpRobotWireFrameSimulator : protected vpWireFrameSimulator, public vpRobotSimulator { public: @@ -178,15 +176,14 @@ class VISP_EXPORT vpRobotWireFrameSimulator : protected vpWireFrameSimulator, pu public: vpRobotWireFrameSimulator(); explicit vpRobotWireFrameSimulator(bool display); - virtual ~vpRobotWireFrameSimulator(); /** @name Inherited functionalities from vpRobotWireFrameSimulator */ //@{ /*! - Get the parameters of the virtual external camera. - - \return It returns the camera parameters. - */ + * Get the parameters of the virtual external camera. + * + * \return It returns the camera parameters. + */ vpCameraParameters getExternalCameraParameters() const { // if(px_ext != 1 && py_ext != 1) @@ -200,12 +197,12 @@ class VISP_EXPORT vpRobotWireFrameSimulator : protected vpWireFrameSimulator, pu } } /*! - Get the external camera's position relative to the the world reference - frame. - - \return the main external camera position relative to the the world - reference frame. - */ + * Get the external camera's position relative to the the world reference + * frame. + * + * \return the main external camera position relative to the the world + * reference frame. + */ vpHomogeneousMatrix getExternalCameraPosition() const { return this->vpWireFrameSimulator::getExternalCameraPosition(); @@ -216,10 +213,10 @@ class VISP_EXPORT vpRobotWireFrameSimulator : protected vpWireFrameSimulator, pu vpHomogeneousMatrix get_cMo(); /*! - Get the pose between the object and the fixed world frame. - - \return The pose between the object and the fixed world frame. - */ + * Get the pose between the object and the fixed world frame. + * + * \return The pose between the object and the fixed world frame. + */ vpHomogeneousMatrix get_fMo() const { return fMo; } /* Display functions */ @@ -229,106 +226,108 @@ class VISP_EXPORT vpRobotWireFrameSimulator : protected vpWireFrameSimulator, pu void initScene(const char *obj); /*! - Set the color used to display the camera in the external view. - - \param col : The desired color. - */ + * Set the color used to display the camera in the external view. + * + * \param col : The desired color. + */ void setCameraColor(const vpColor &col) { camColor = col; } /*! - Set the flag used to force the sampling time in the thread computing the - robot's displacement to a constant value; see setSamplingTime(). It may be - useful if the main thread (computing the features) is very time consuming. - False by default. - - \param _constantSamplingTimeMode : The new value of the - constantSamplingTimeMode flag. - */ + * Set the flag used to force the sampling time in the thread computing the + * robot's displacement to a constant value; see setSamplingTime(). It may be + * useful if the main thread (computing the features) is very time consuming. + * False by default. + * + * \param _constantSamplingTimeMode : The new value of the + * constantSamplingTimeMode flag. + */ inline void setConstantSamplingTimeMode(const bool _constantSamplingTimeMode) { constantSamplingTimeMode = _constantSamplingTimeMode; } /*! - Set the color used to display the object at the current position in the - robot's camera view. - - \param col : The desired color. - */ + * Set the color used to display the object at the current position in the + * robot's camera view. + * + * \param col : The desired color. + */ void setCurrentViewColor(const vpColor &col) { curColor = col; } /*! - Set the color used to display the object at the desired position in the - robot's camera view. - - \param col : The desired color. - */ + * Set the color used to display the object at the desired position in the + * robot's camera view. + * + * \param col : The desired color. + */ void setDesiredViewColor(const vpColor &col) { desColor = col; } /*! - Set the desired position of the robot's camera relative to the object. - - \param cdMo_ : The desired pose of the camera. - */ + * Set the desired position of the robot's camera relative to the object. + * + * \param cdMo_ : The desired pose of the camera. + */ void setDesiredCameraPosition(const vpHomogeneousMatrix &cdMo_) { this->vpWireFrameSimulator::setDesiredCameraPosition(cdMo_); } /*! - Set the way to draw the robot. Depending on what you choose you can - display a 3D wireframe model or a set of lines linking the frames used to - compute the geometrical model. - - \param dispType : Type of display. Can be MODEL_3D or MODEL_DH. - */ + * Set the way to draw the robot. Depending on what you choose you can + * display a 3D wireframe model or a set of lines linking the frames used to + * compute the geometrical model. + * + * \param dispType : Type of display. Can be MODEL_3D or MODEL_DH. + */ inline void setDisplayRobotType(const vpDisplayRobotType dispType) { displayType = dispType; } /*! - Set the external camera point of view. - - \param camMf_ : The pose of the external camera relative to the world - reference frame. - */ + * Set the external camera point of view. + * + * \param camMf_ : The pose of the external camera relative to the world + * reference frame. + */ void setExternalCameraPosition(const vpHomogeneousMatrix &camMf_) { this->vpWireFrameSimulator::setExternalCameraPosition(camMf_); } /*! - Specify the thickness of the graphics drawings. - */ + * Specify the thickness of the graphics drawings. + */ void setGraphicsThickness(unsigned int thickness) { this->thickness_ = thickness; } /*! - Set the sampling time. - - \param delta_t : Sampling time in second used to compute the robot - displacement from the velocity applied to the robot during this time. - - Since the wireframe simulator is threaded, the sampling time is set to - vpTime::getMinTimeForUsleepCall() / 1000 seconds. - - */ + * Set the sampling time. + * + * \param delta_t : Sampling time in second used to compute the robot + * displacement from the velocity applied to the robot during this time. + * + * Since the wireframe simulator is threaded, the sampling time is set to + * vpTime::getMinTimeForUsleepCall() / 1000 seconds. + */ inline void setSamplingTime(const double &delta_t) { if (delta_t < static_cast(vpTime::getMinTimeForUsleepCall() * 1e-3)) { this->delta_t_ = static_cast(vpTime::getMinTimeForUsleepCall() * 1e-3); - } else { + } + else { this->delta_t_ = delta_t; } } - /*! Set the parameter which enable or disable the singularity mangement */ + /*! + * Set the parameter which enable or disable the singularity management. + */ void setSingularityManagement(bool sm) { singularityManagement = sm; } /*! - Activates extra printings when the robot reaches joint limits... - */ + * Activates extra printings when the robot reaches joint limits... + */ void setVerbose(bool verbose) { this->verbose_ = verbose; } /*! - Set the pose between the object and the fixed world frame. - - \param fMo_ : The pose between the object and the fixed world frame. - */ + * Set the pose between the object and the fixed world frame. + * + * \param fMo_ : The pose between the object and the fixed world frame. + */ void set_fMo(const vpHomogeneousMatrix &fMo_) { this->fMo = fMo_; } //@} @@ -337,8 +336,8 @@ class VISP_EXPORT vpRobotWireFrameSimulator : protected vpWireFrameSimulator, pu */ //@{ /*! - Function used to launch the thread which moves the robot. -*/ + * Function used to launch the thread which moves the robot. + */ #if defined(_WIN32) static DWORD WINAPI launcher(LPVOID lpParam) { @@ -354,8 +353,6 @@ class VISP_EXPORT vpRobotWireFrameSimulator : protected vpWireFrameSimulator, pu } #endif - /* Robot functions */ - void init() { ; } /*! Method lauched by the thread to compute the position of the robot in the * articular frame. */ virtual void updateArticularPosition() = 0; diff --git a/modules/robot/include/visp3/robot/vpSimulatorAfma6.h b/modules/robot/include/visp3/robot/vpSimulatorAfma6.h index 739b76911e..3a38780376 100644 --- a/modules/robot/include/visp3/robot/vpSimulatorAfma6.h +++ b/modules/robot/include/visp3/robot/vpSimulatorAfma6.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * Class which provides a simulator for the robot Afma6. - * -*****************************************************************************/ + */ #ifndef vpSimulatorAfma6_HH #define vpSimulatorAfma6_HH /*! - \file vpSimulatorAfma6.h - \brief Class which provides a simulator for the robot Afma6. -*/ + * \file vpSimulatorAfma6.h + * \brief Class which provides a simulator for the robot Afma6. + */ #include #include @@ -49,128 +47,126 @@ #if defined(VISP_HAVE_MODULE_GUI) && ((defined(_WIN32) && !defined(WINRT_8_0)) || defined(VISP_HAVE_PTHREAD)) /*! - \class vpSimulatorAfma6 - - \ingroup group_robot_simu_gantry - - \brief Simulator of Irisa's gantry robot named Afma6. - - Implementation of the vpRobotWireFrameSimulator class in order to simulate - Irisa's Afma6 robot. This robot is a gantry robot with six degrees of freedom - manufactured in 1992 by the french Afma-Robots company. - - \warning This class uses threading capabilities. Thus on Unix-like - platforms, the libpthread third-party library need to be - installed. On Windows, we use the native threading capabilities. - - This class allows to control the Afma6 gantry robot in position - and velocity: - - in the joint space (vpRobot::ARTICULAR_FRAME), - - in the fixed reference frame (vpRobot::REFERENCE_FRAME), - - in the camera frame (vpRobot::CAMERA_FRAME), - - or in a mixed frame (vpRobot::MIXT_FRAME) where translations are expressed - in the reference frame and rotations in the camera frame. - - End-effector frame (vpRobot::END_EFFECTOR_FRAME) is not implemented. - - All the translations are expressed in meters for positions and m/s - for the velocities. Rotations are expressed in radians for the - positions, and rad/s for the rotation velocities. - - The direct and inverse kinematics models are implemented in the - vpAfma6 class. - - To control the robot in position, you may set the controller - to position control and then send the position to reach in a specific - frame like here in the joint space: - - \code -#include -#include -#include -#include - -int main() -{ - vpSimulatorAfma6 robot; - - robot.init(vpAfma6::TOOL_CCMOP, vpCameraParameters::perspectiveProjWithoutDistortion); - - vpColVector q(6); - // Set a joint position - q[0] = 0.1; // Joint 1 position, in meter - q[1] = 0.2; // Joint 2 position, in meter - q[2] = 0.3; // Joint 3 position, in meter - q[3] = M_PI/8; // Joint 4 position, in rad - q[4] = M_PI/4; // Joint 5 position, in rad - q[5] = M_PI; // Joint 6 position, in rad - - // Initialize the controller to position control - robot.setRobotState(vpRobot::STATE_POSITION_CONTROL); - - // Moves the robot in the joint space - robot.setPosition(vpRobot::ARTICULAR_FRAME, q); - - return 0; -} - \endcode - - To control the robot in velocity, you may set the controller to - velocity control and then send the velocities. To end the velocity - control and stop the robot you have to set the controller to the - stop state. Here is an example of a velocity control in the joint - space: - - \code -#include -#include -#include - -int main() -{ - vpSimulatorAfma6 robot; - - robot.init(vpAfma6::TOOL_GRIPPER, vpCameraParameters::perspectiveProjWithoutDistortion); - - vpColVector qvel(6); - // Set a joint velocity - qvel[0] = 0.1; // Joint 1 velocity in m/s - qvel[1] = 0.1; // Joint 2 velocity in m/s - qvel[2] = 0.1; // Joint 3 velocity in m/s - qvel[3] = M_PI/8; // Joint 4 velocity in rad/s - qvel[4] = 0; // Joint 5 velocity in rad/s - qvel[5] = 0; // Joint 6 velocity in rad/s - - // Initialize the controller to position control - robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); - - for ( ; ; ) { - // Apply a velocity in the joint space - robot.setVelocity(vpRobot::ARTICULAR_FRAME, qvel); - - // Compute new velocities qvel... - } - - // Stop the robot - robot.setRobotState(vpRobot::STATE_STOP); - - return 0; -} - \endcode - - It is also possible to measure the robot current position with - getPosition() method and the robot current velocities with the getVelocity() - method. - - For convenience, there is also the ability to read/write joint - positions from a position file with readPosFile() and savePosFile() - methods. - - To know how this class can be used to achieve a visual servoing simulation, - you can follow the \ref tutorial-ibvs. - -*/ - + * \class vpSimulatorAfma6 + * + * \ingroup group_robot_simu_gantry + * + * \brief Simulator of Irisa's gantry robot named Afma6. + * + * Implementation of the vpRobotWireFrameSimulator class in order to simulate + * Irisa's Afma6 robot. This robot is a gantry robot with six degrees of freedom + * manufactured in 1992 by the french Afma-Robots company. + * + * \warning This class uses threading capabilities. Thus on Unix-like + * platforms, the libpthread third-party library need to be + * installed. On Windows, we use the native threading capabilities. + * + * This class allows to control the Afma6 gantry robot in position + * and velocity: + * - in the joint space (vpRobot::ARTICULAR_FRAME), + * - in the fixed reference frame (vpRobot::REFERENCE_FRAME), + * - in the camera frame (vpRobot::CAMERA_FRAME), + * - or in a mixed frame (vpRobot::MIXT_FRAME) where translations are expressed + * in the reference frame and rotations in the camera frame. + * + * End-effector frame (vpRobot::END_EFFECTOR_FRAME) is not implemented. + * + * All the translations are expressed in meters for positions and m/s + * for the velocities. Rotations are expressed in radians for the + * positions, and rad/s for the rotation velocities. + * + * The direct and inverse kinematics models are implemented in the + * vpAfma6 class. + * + * To control the robot in position, you may set the controller + * to position control and then send the position to reach in a specific + * frame like here in the joint space: + * + * \code + * #include + * #include + * #include + * #include + * + * int main() + * { + * vpSimulatorAfma6 robot; + * + * robot.init(vpAfma6::TOOL_CCMOP, vpCameraParameters::perspectiveProjWithoutDistortion); + * + * vpColVector q(6); + * // Set a joint position + * q[0] = 0.1; // Joint 1 position, in meter + * q[1] = 0.2; // Joint 2 position, in meter + * q[2] = 0.3; // Joint 3 position, in meter + * q[3] = M_PI/8; // Joint 4 position, in rad + * q[4] = M_PI/4; // Joint 5 position, in rad + * q[5] = M_PI; // Joint 6 position, in rad + * + * // Initialize the controller to position control + * robot.setRobotState(vpRobot::STATE_POSITION_CONTROL); + * + * // Moves the robot in the joint space + * robot.setPosition(vpRobot::ARTICULAR_FRAME, q); + * + * return 0; + * } + * \endcode + * + * To control the robot in velocity, you may set the controller to + * velocity control and then send the velocities. To end the velocity + * control and stop the robot you have to set the controller to the + * stop state. Here is an example of a velocity control in the joint + * space: + * + * \code + * #include + * #include + * #include + * + * int main() + * { + * vpSimulatorAfma6 robot; + * + * robot.init(vpAfma6::TOOL_GRIPPER, vpCameraParameters::perspectiveProjWithoutDistortion); + * + * vpColVector qvel(6); + * // Set a joint velocity + * qvel[0] = 0.1; // Joint 1 velocity in m/s + * qvel[1] = 0.1; // Joint 2 velocity in m/s + * qvel[2] = 0.1; // Joint 3 velocity in m/s + * qvel[3] = M_PI/8; // Joint 4 velocity in rad/s + * qvel[4] = 0; // Joint 5 velocity in rad/s + * qvel[5] = 0; // Joint 6 velocity in rad/s + * + * // Initialize the controller to position control + * robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); + * + * for ( ; ; ) { + * // Apply a velocity in the joint space + * robot.setVelocity(vpRobot::ARTICULAR_FRAME, qvel); + * + * // Compute new velocities qvel... + * } + * + * // Stop the robot + * robot.setRobotState(vpRobot::STATE_STOP); + * + * return 0; + * } + * \endcode + * + * It is also possible to measure the robot current position with + * getPosition() method and the robot current velocities with the getVelocity() + * method. + * + * For convenience, there is also the ability to read/write joint + * positions from a position file with readPosFile() and savePosFile() + * methods. + * + * To know how this class can be used to achieve a visual servoing simulation, + * you can follow the \ref tutorial-ibvs. + */ class VISP_EXPORT vpSimulatorAfma6 : public vpRobotWireFrameSimulator, public vpAfma6 { public: @@ -191,13 +187,13 @@ class VISP_EXPORT vpSimulatorAfma6 : public vpRobotWireFrameSimulator, public vp public: vpSimulatorAfma6(); explicit vpSimulatorAfma6(bool display); - virtual ~vpSimulatorAfma6(); + virtual ~vpSimulatorAfma6() override; void getCameraParameters(vpCameraParameters &cam, const unsigned int &image_width, const unsigned int &image_height); void getCameraParameters(vpCameraParameters &cam, const vpImage &I); void getCameraParameters(vpCameraParameters &cam, const vpImage &I); - void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &displacement); - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q); + void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &displacement) override; + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q) override; void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q, double ×tamp); void getPosition(const vpRobot::vpControlFrameType frame, vpPoseVector &position); void getPosition(const vpRobot::vpControlFrameType frame, vpPoseVector &position, double ×tamp); @@ -209,12 +205,12 @@ class VISP_EXPORT vpSimulatorAfma6 : public vpRobotWireFrameSimulator, public vp void get_cMe(vpHomogeneousMatrix &cMe); void get_cVe(vpVelocityTwistMatrix &cVe); - void get_eJe(vpMatrix &eJe); - void get_fJe(vpMatrix &fJe); + void get_eJe(vpMatrix &eJe) override; + void get_fJe(vpMatrix &fJe) override; void - init(vpAfma6::vpAfma6ToolType tool, - vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); + init(vpAfma6::vpAfma6ToolType tool, + vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); bool initialiseCameraRelativeToObject(const vpHomogeneousMatrix &cMo); void initialiseObjectRelativeToCamera(const vpHomogeneousMatrix &cMo); @@ -225,26 +221,26 @@ class VISP_EXPORT vpSimulatorAfma6 : public vpRobotWireFrameSimulator, public vp void setCameraParameters(const vpCameraParameters &cam); void setJointLimit(const vpColVector &limitMin, const vpColVector &limitMax); - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q) override; void setPosition(const vpRobot::vpControlFrameType frame, double pos1, double pos2, double pos3, double pos4, double pos5, double pos6); void setPosition(const char *filename); void setPositioningVelocity(double vel) { positioningVelocity = vel; } bool setPosition(const vpHomogeneousMatrix &cdMo, vpImage *Iint = NULL, const double &errMax = 0.001); - vpRobot::vpRobotStateType setRobotState(const vpRobot::vpRobotStateType newState); + vpRobot::vpRobotStateType setRobotState(const vpRobot::vpRobotStateType newState) override; - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity) override; void stopMotion(); protected: /** @name Protected Member Functions Inherited from vpSimulatorAfma6 */ //@{ - void computeArticularVelocity(); + void computeArticularVelocity() override; void compute_fMi(); void findHighestPositioningSpeed(vpColVector &q); void getExternalImage(vpImage &I); - inline void get_fMi(vpHomogeneousMatrix *fMit) + inline void get_fMi(vpHomogeneousMatrix *fMit) override { m_mutex_fMi.lock(); for (int i = 0; i < 8; i++) { @@ -253,12 +249,12 @@ class VISP_EXPORT vpSimulatorAfma6 : public vpRobotWireFrameSimulator, public vp m_mutex_fMi.unlock(); } - void init(); - void initArms(); + void init() override; + void initArms() override; void initDisplay(); - int isInJointLimit(void); + int isInJointLimit() override; bool singularityTest(const vpColVector &q, vpMatrix &J); - void updateArticularPosition(); + void updateArticularPosition() override; //@} }; diff --git a/modules/robot/include/visp3/robot/vpSimulatorCamera.h b/modules/robot/include/visp3/robot/vpSimulatorCamera.h index 451c5c2289..a3f474f4e3 100644 --- a/modules/robot/include/visp3/robot/vpSimulatorCamera.h +++ b/modules/robot/include/visp3/robot/vpSimulatorCamera.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * Defines the simplest robot : a free flying camera. - * -*****************************************************************************/ + */ #ifndef vpSimulatorCamera_H #define vpSimulatorCamera_H /*! - \file vpSimulatorCamera.h - \brief class that defines the simplest robot : a free flying camera -*/ + * \file vpSimulatorCamera.h + * \brief class that defines the simplest robot : a free flying camera + */ #include #include @@ -48,59 +46,59 @@ #include /*! - \class vpSimulatorCamera - - \ingroup group_robot_simu_Camera - - \brief Class that defines the simplest robot: a free flying camera. - - This free flying camera has 6 dof; 3 in translation and 3 in rotation. - It evolves as a gentry robot with respect to a world frame. This class - is similar to vpRobotCamera class except that here the position of the robot - is provided as the transformation from world frame to camera frame; wMc. -This representation is more intuitive than the one implemented in -vpRobotCamera where the transformation from camera to world frame is -considered; cMw. - - For this particular simulated robot, the end-effector and camera frame are -confused. That means that the cMe transformation is equal to identity. - - The robot jacobian expressed in the end-effector frame - \f$ {^e}{\bf J}_e \f$ is also set to identity (see get_eJe()). - - The following code shows how to control this robot in position and velocity. - \code -#include - -int main() -{ - vpHomogeneousMatrix wMc; - vpSimulatorCamera robot; - - robot.getPosition(wMc); // Position of the camera in the world frame - std::cout << "Default position of the camera in the world frame wMc:\n" << wMc << std::endl; - - wMc[2][3] = 1.; // Camera frame is 1 meter along z axis in front of the world frame - robot.setPosition(wMc); // Set the new position of the camera in the world frame - std::cout << "New position of the camera in the world frame wMc:\n" << wMc << std::endl; - - robot.setSamplingTime(0.100); // Modify the default sampling time to 0.1 second - robot.setMaxTranslationVelocity(1.); // vx, vy and vz max set to 1 m/s - robot.setMaxRotationVelocity(vpMath::rad(90)); // wx, wy and wz max set to 90 deg/s - - vpColVector v(6); - v = 0; - v[2] = 1.; // set v_z to 1 m/s - robot.setVelocity(vpRobot::CAMERA_FRAME, v); - // The robot has moved from 0.1 meters along the z axis - robot.getPosition(wMc); // Position of the camera in the world frame - std::cout << "New position of the camera wMc:\n" << wMc << std::endl; -} - \endcode - - To know how this class can be used to achieve a visual servoing simulation, - you can follow the \ref tutorial-ibvs. -*/ + * \class vpSimulatorCamera + * + * \ingroup group_robot_simu_Camera + * + * \brief Class that defines the simplest robot: a free flying camera. + * + * This free flying camera has 6 dof; 3 in translation and 3 in rotation. + * It evolves as a gentry robot with respect to a world frame. This class + * is similar to vpRobotCamera class except that here the position of the robot + * is provided as the transformation from world frame to camera frame; wMc. + * This representation is more intuitive than the one implemented in + * vpRobotCamera where the transformation from camera to world frame is + * considered; cMw. + * + * For this particular simulated robot, the end-effector and camera frame are + * confused. That means that the cMe transformation is equal to identity. + * + * The robot jacobian expressed in the end-effector frame + * \f$ {^e}{\bf J}_e \f$ is also set to identity (see get_eJe()). + * + * The following code shows how to control this robot in position and velocity. + * \code + * #include + * + * int main() + * { + * vpHomogeneousMatrix wMc; + * vpSimulatorCamera robot; + * + * robot.getPosition(wMc); // Position of the camera in the world frame + * std::cout << "Default position of the camera in the world frame wMc:\n" << wMc << std::endl; + * + * wMc[2][3] = 1.; // Camera frame is 1 meter along z axis in front of the world frame + * robot.setPosition(wMc); // Set the new position of the camera in the world frame + * std::cout << "New position of the camera in the world frame wMc:\n" << wMc << std::endl; + * + * robot.setSamplingTime(0.100); // Modify the default sampling time to 0.1 second + * robot.setMaxTranslationVelocity(1.); // vx, vy and vz max set to 1 m/s + * robot.setMaxRotationVelocity(vpMath::rad(90)); // wx, wy and wz max set to 90 deg/s + * + * vpColVector v(6); + * v = 0; + * v[2] = 1.; // set v_z to 1 m/s + * robot.setVelocity(vpRobot::CAMERA_FRAME, v); + * // The robot has moved from 0.1 meters along the z axis + * robot.getPosition(wMc); // Position of the camera in the world frame + * std::cout << "New position of the camera wMc:\n" << wMc << std::endl; + * } + * \endcode + * + * To know how this class can be used to achieve a visual servoing simulation, + * you can follow the \ref tutorial-ibvs. + */ class VISP_EXPORT vpSimulatorCamera : public vpRobotSimulator { protected: @@ -108,28 +106,27 @@ class VISP_EXPORT vpSimulatorCamera : public vpRobotSimulator public: vpSimulatorCamera(); - virtual ~vpSimulatorCamera(); public: /** @name Inherited functionalities from vpSimulatorCamera */ //@{ void get_cVe(vpVelocityTwistMatrix &cVe) const; - void get_eJe(vpMatrix &eJe); + void get_eJe(vpMatrix &eJe) override; vpHomogeneousMatrix getPosition() const; void getPosition(vpHomogeneousMatrix &wMc) const; - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q) override; void setPosition(const vpHomogeneousMatrix &wMc); - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel) override; //@} private: - void init(); + void init() override; // Non implemented virtual pure functions - void get_fJe(vpMatrix & /*_fJe */){}; - void getDisplacement(const vpRobot::vpControlFrameType /* frame */, vpColVector & /* q */){}; - void setPosition(const vpRobot::vpControlFrameType /* frame */, const vpColVector & /* q */){}; + void get_fJe(vpMatrix & /*_fJe */) override { }; + void getDisplacement(const vpRobot::vpControlFrameType /* frame */, vpColVector & /* q */) override { }; + void setPosition(const vpRobot::vpControlFrameType /* frame */, const vpColVector & /* q */) override { }; }; #endif diff --git a/modules/robot/include/visp3/robot/vpSimulatorPioneer.h b/modules/robot/include/visp3/robot/vpSimulatorPioneer.h index 947fcb29be..e58064e504 100644 --- a/modules/robot/include/visp3/robot/vpSimulatorPioneer.h +++ b/modules/robot/include/visp3/robot/vpSimulatorPioneer.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,17 +29,16 @@ * * Description: * Pioneer mobile robot simulator without display. - * -*****************************************************************************/ + */ #ifndef vpSimulatorPioneer_H #define vpSimulatorPioneer_H /*! - \file vpSimulatorPioneer.h - \brief class that defines the Pioneer mobile robot simulator equipped with a - static camera. -*/ + * \file vpSimulatorPioneer.h + * \brief class that defines the Pioneer mobile robot simulator equipped with a + * static camera. + */ #include #include @@ -50,53 +48,52 @@ #include /*! - \class vpSimulatorPioneer - - \ingroup group_robot_simu_unicycle - - \brief Class that defines the Pioneer mobile robot simulator equipped with a -static camera. - - It intends to simulate the mobile robot described in vpPioneer class. - This robot has 2 dof: \f$(v_x, w_z)\f$, the translational and - rotational velocities that are applied at point E. - - The robot position evolves with respect to a world frame; wMc. When a new -joint velocity is applied to the robot using setVelocity(), the position of -the camera wrt the world frame is updated. - - \image html pioneer.png - - The following code shows how to control this robot in position and velocity. - \code -#include - -int main() -{ - vpHomogeneousMatrix wMc; - vpSimulatorPioneer robot; - - robot.getPosition(wMc); // Position of the camera in the world frame - std::cout << "Default position of the camera in the world frame wMc:\n" << wMc << std::endl; - - robot.setSamplingTime(0.100); // Modify the default sampling time to 0.1 second - robot.setMaxTranslationVelocity(1.); // vx max set to 1 m/s - robot.setMaxRotationVelocity(vpMath::rad(90)); // wz max set to 90 deg/s - - vpColVector v(2); // we control vx and wz dof - v = 0; - v[0] = 1.; // set vx to 1 m/s - robot.setVelocity(vpRobot::ARTICULAR_FRAME, v); - // The robot has moved from 0.1 meters along the z axis - robot.getPosition(wMc); // Position of the camera in the world frame - std::cout << "New position of the camera wMc:\n" << wMc << std::endl; -} - \endcode - - The usage of this class is also highlighted in \ref -tutorial-simu-robot-pioneer. - -*/ + * \class vpSimulatorPioneer + * + * \ingroup group_robot_simu_unicycle + * + * \brief Class that defines the Pioneer mobile robot simulator equipped with a + * static camera. + * + * It intends to simulate the mobile robot described in vpPioneer class. + * This robot has 2 dof: \f$(v_x, w_z)\f$, the translational and + * rotational velocities that are applied at point E. + * + * The robot position evolves with respect to a world frame; wMc. When a new + * joint velocity is applied to the robot using setVelocity(), the position of + * the camera wrt the world frame is updated. + * + * \image html pioneer.png + * + * The following code shows how to control this robot in position and velocity. + * \code + * #include + * + * int main() + * { + * vpHomogeneousMatrix wMc; + * vpSimulatorPioneer robot; + * + * robot.getPosition(wMc); // Position of the camera in the world frame + * std::cout << "Default position of the camera in the world frame wMc:\n" << wMc << std::endl; + * + * robot.setSamplingTime(0.100); // Modify the default sampling time to 0.1 second + * robot.setMaxTranslationVelocity(1.); // vx max set to 1 m/s + * robot.setMaxRotationVelocity(vpMath::rad(90)); // wz max set to 90 deg/s + * + * vpColVector v(2); // we control vx and wz dof + * v = 0; + * v[0] = 1.; // set vx to 1 m/s + * robot.setVelocity(vpRobot::ARTICULAR_FRAME, v); + * // The robot has moved from 0.1 meters along the z axis + * robot.getPosition(wMc); // Position of the camera in the world frame + * std::cout << "New position of the camera wMc:\n" << wMc << std::endl; + * } + * \endcode + * + * The usage of this class is also highlighted in \ref + * tutorial-simu-robot-pioneer. + */ class VISP_EXPORT vpSimulatorPioneer : public vpPioneer, public vpRobotSimulator { @@ -114,25 +111,24 @@ class VISP_EXPORT vpSimulatorPioneer : public vpPioneer, public vpRobotSimulator public: vpSimulatorPioneer(); - virtual ~vpSimulatorPioneer(); public: /** @name Inherited functionalities from vpSimulatorPioneer */ //@{ - void get_eJe(vpMatrix &eJe); + void get_eJe(vpMatrix &eJe) override; void getPosition(vpHomogeneousMatrix &wMc) const; - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q); - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q) override; + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel) override; //@} private: - void init(); + void init() override; // Non implemented virtual pure functions - void get_fJe(vpMatrix & /*_fJe */){}; - void getDisplacement(const vpRobot::vpControlFrameType /* frame */, vpColVector & /* q */){}; - void setPosition(const vpRobot::vpControlFrameType /* frame */, const vpColVector & /* q */){}; + void get_fJe(vpMatrix & /*_fJe */) override { }; + void getDisplacement(const vpRobot::vpControlFrameType /* frame */, vpColVector & /* q */) override { }; + void setPosition(const vpRobot::vpControlFrameType /* frame */, const vpColVector & /* q */) override { }; }; #endif diff --git a/modules/robot/include/visp3/robot/vpSimulatorPioneerPan.h b/modules/robot/include/visp3/robot/vpSimulatorPioneerPan.h index 21cd005b00..25c8e2ac59 100644 --- a/modules/robot/include/visp3/robot/vpSimulatorPioneerPan.h +++ b/modules/robot/include/visp3/robot/vpSimulatorPioneerPan.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,17 +29,16 @@ * * Description: * Pioneer mobile robot equipped with a pan head simulator without display. - * -*****************************************************************************/ + */ #ifndef vpSimulatorPioneerPan_H #define vpSimulatorPioneerPan_H /*! - \file vpSimulatorPioneerPan.h - \brief class that defines the Pioneer mobile robot simulator equipped - with a camera able to move in pan. -*/ + * \file vpSimulatorPioneerPan.h + * \brief class that defines the Pioneer mobile robot simulator equipped + * with a camera able to move in pan. + */ #include #include @@ -50,61 +48,59 @@ #include /*! - \class vpSimulatorPioneerPan - - \ingroup group_robot_simu_unicycle - - \brief Class that defines the Pioneer mobile robot simulator equipped - with a camera able to move in pan. - - It intends to simulate the mobile robot described in vpPioneerPan class. - This robot has 3 dof: \f$(v_x, w_z, \dot{q_1})\f$, the translational and - rotational velocities of the mobile platform, the pan head velocity -respectively. - - The robot position evolves with respect to a world frame; wMc. When a new -joint velocity is applied to the robot using setVelocity(), the position of -the camera wrt the world frame is updated. - - \image html pioneer-pan.png - - The following code shows how to control this robot in position and velocity. - \code -#include - -int main() -{ - vpHomogeneousMatrix wMc; - vpSimulatorPioneerPan robot; - - robot.getPosition(wMc); // Position of the camera in the world frame - std::cout << "Default position of the camera in the world frame wMc:\n" << wMc << std::endl; - - robot.setSamplingTime(0.100); // Modify the default sampling time to 0.1 second - robot.setMaxTranslationVelocity(1.); // vx max set to 1 m/s - robot.setMaxRotationVelocity(vpMath::rad(90)); // wz max set to 90 deg/s - - vpColVector v(3); // we control vx, wz and q_pan - v = 0; - v[0] = 1.; // set vx to 1 m/s - robot.setVelocity(vpRobot::ARTICULAR_FRAME, v); - // The robot has moved from 0.1 meters along the z axis - robot.getPosition(wMc); // Position of the camera in the world frame - std::cout << "New position of the camera wMc:\n" << wMc << std::endl; -} - \endcode - - The usage of this class is also highlighted in \ref -tutorial-simu-robot-pioneer. - -*/ + * \class vpSimulatorPioneerPan + * + * \ingroup group_robot_simu_unicycle + * + * \brief Class that defines the Pioneer mobile robot simulator equipped + * with a camera able to move in pan. + * + * It intends to simulate the mobile robot described in vpPioneerPan class. + * This robot has 3 dof: \f$(v_x, w_z, \dot{q_1})\f$, the translational and + * rotational velocities of the mobile platform, the pan head velocity + * respectively. + * + * The robot position evolves with respect to a world frame; wMc. When a new + * joint velocity is applied to the robot using setVelocity(), the position of + * the camera wrt the world frame is updated. + * + * \image html pioneer-pan.png + * + * The following code shows how to control this robot in position and velocity. + * \code + * #include + * + * int main() + * { + * vpHomogeneousMatrix wMc; + * vpSimulatorPioneerPan robot; + * + * robot.getPosition(wMc); // Position of the camera in the world frame + * std::cout << "Default position of the camera in the world frame wMc:\n" << wMc << std::endl; + * + * robot.setSamplingTime(0.100); // Modify the default sampling time to 0.1 second + * robot.setMaxTranslationVelocity(1.); // vx max set to 1 m/s + * robot.setMaxRotationVelocity(vpMath::rad(90)); // wz max set to 90 deg/s + * + * vpColVector v(3); // we control vx, wz and q_pan + * v = 0; + * v[0] = 1.; // set vx to 1 m/s + * robot.setVelocity(vpRobot::ARTICULAR_FRAME, v); + * // The robot has moved from 0.1 meters along the z axis + * robot.getPosition(wMc); // Position of the camera in the world frame + * std::cout << "New position of the camera wMc:\n" << wMc << std::endl; + * } + * \endcode + * + * The usage of this class is also highlighted in \ref tutorial-simu-robot-pioneer. + */ class VISP_EXPORT vpSimulatorPioneerPan : public vpPioneerPan, public vpRobotSimulator { protected: //! robot / camera location in the world frame vpHomogeneousMatrix wMc_; // world to camera - vpHomogeneousMatrix wMm_; // world to mobile robot frame located between the two weels + vpHomogeneousMatrix wMm_; // world to mobile robot frame located between the two wheels // mMp_ mobile robot to pan frame is a protected member of vpPioneerPan // pMe_ pan head to end effector frame is a protected member of vpPioneerPan // cMe_ is a protected member of vpUnicycle @@ -116,25 +112,24 @@ class VISP_EXPORT vpSimulatorPioneerPan : public vpPioneerPan, public vpRobotSim public: vpSimulatorPioneerPan(); - virtual ~vpSimulatorPioneerPan(); public: /** @name Inherited functionalities from vpSimulatorPioneerPan */ //@{ - void get_eJe(vpMatrix &eJe); + void get_eJe(vpMatrix &eJe) override; void getPosition(vpHomogeneousMatrix &wMc) const; - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q); - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q) override; + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel) override; //@} private: - void init(); + void init() override; // Non implemented virtual pure functions - void get_fJe(vpMatrix & /*_fJe */){}; - void getDisplacement(const vpRobot::vpControlFrameType /* frame */, vpColVector & /* q */){}; - void setPosition(const vpRobot::vpControlFrameType /* frame */, const vpColVector & /* q */){}; + void get_fJe(vpMatrix & /*_fJe */) override { }; + void getDisplacement(const vpRobot::vpControlFrameType /* frame */, vpColVector & /* q */) override { }; + void setPosition(const vpRobot::vpControlFrameType /* frame */, const vpColVector & /* q */) override { }; }; #endif diff --git a/modules/robot/include/visp3/robot/vpSimulatorViper850.h b/modules/robot/include/visp3/robot/vpSimulatorViper850.h index 95aec1dbfa..627b4dd7ab 100644 --- a/modules/robot/include/visp3/robot/vpSimulatorViper850.h +++ b/modules/robot/include/visp3/robot/vpSimulatorViper850.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * Class which provides a simulator for the robot Viper850. - * -*****************************************************************************/ + */ #ifndef vpSimulatorViper850_HH #define vpSimulatorViper850_HH /*! - \file vpSimulatorViper850.h - \brief Class which provides a simulator for the robot Viper850.. -*/ + * \file vpSimulatorViper850.h + * \brief Class which provides a simulator for the robot Viper850.. + */ #include #if defined(VISP_HAVE_MODULE_GUI) && ((defined(_WIN32) && !defined(WINRT_8_0)) || defined(VISP_HAVE_PTHREAD)) @@ -49,153 +47,151 @@ #include /*! - \class vpSimulatorViper850 - - \ingroup group_robot_simu_arm - - \brief Simulator of Irisa's Viper S850 robot named Viper850. - - Implementation of the vpRobotWireFrameSimulator class in order to simulate - Irisa's Viper850 robot. This robot is an ADEPT six degrees of freedom arm. - - \warning This class uses threading capabilities. Thus on Unix-like - platforms, the libpthread third-party library need to be - installed. On Windows, we use the native threading capabilities. - - This class allows to control the Viper850 arm robot in position - and velocity: - - in the joint space (vpRobot::ARTICULAR_FRAME), - - in the fixed reference frame (vpRobot::REFERENCE_FRAME), - - in the camera frame (vpRobot::CAMERA_FRAME), - - or in a mixed frame (vpRobot::MIXT_FRAME) where translations are expressed - in the reference frame and rotations in the camera frame. - - End-effector frame (vpRobot::END_EFFECTOR_FRAME) is not implemented. - - All the translations are expressed in meters for positions and m/s - for the velocities. Rotations are expressed in radians for the - positions, and rad/s for the rotation velocities. - - The direct and inverse kinematics models are implemented in the - vpViper850 class. - - To control the robot in position, you may set the controller - to position control and then send the position to reach in a specific - frame like here in the joint space: - - \code -#include -#include -#include -#include - -int main() -{ - vpSimulatorViper850 robot; - - vpColVector q(6); - // Set a joint position - q[0] = vpMath::rad(10); // Joint 1 position, in rad - q[1] = 0.2; // Joint 2 position, in rad - q[2] = 0.3; // Joint 3 position, in rad - q[3] = M_PI/8; // Joint 4 position, in rad - q[4] = M_PI/4; // Joint 5 position, in rad - q[5] = M_PI; // Joint 6 position, in rad - - // Initialize the controller to position control - robot.setRobotState(vpRobot::STATE_POSITION_CONTROL); - - // Moves the robot in the joint space - robot.setPosition(vpRobot::ARTICULAR_FRAME, q); - - return 0; -} - \endcode - - The robot moves to the specified position with the default - positioning velocity vpRobotViper850::defaultPositioningVelocity. The - setPositioningVelocity() method allows to change the maximal - velocity used to reach the desired position. - - \code -#include -#include -#include - -int main() -{ - vpSimulatorViper850 robot; - - vpColVector q(6); - // Set q[i] with i in [0:5] - - // Initialize the controller to position control - robot.setRobotState(vpRobot::STATE_POSITION_CONTROL); - - // Set the max velocity to 40% - robot.setPositioningVelocity(40); - - // Moves the robot in the joint space - robot.setPosition(vpRobot::ARTICULAR_FRAME, q); - - return 0; -} - \endcode - - To control the robot in velocity, you may set the controller to - velocity control and then send the velocities. To end the velocity - control and stop the robot you have to set the controller to the - stop state. Here is an example of a velocity control in the joint - space: - - \code -#include -#include -#include - -int main() -{ - vpSimulatorViper850 robot; - - vpColVector qvel(6); - // Set a joint velocity - qvel[0] = 0.1; // Joint 1 velocity in rad/s - qvel[1] = vpMath::rad(15); // Joint 2 velocity in rad/s - qvel[2] = 0; // Joint 3 velocity in rad/s - qvel[3] = M_PI/8; // Joint 4 velocity in rad/s - qvel[4] = 0; // Joint 5 velocity in rad/s - qvel[5] = 0; // Joint 6 velocity in rad/s - - // Initialize the controller to position control - robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); - - for ( ; ; ) { - // Apply a velocity in the joint space - robot.setVelocity(vpRobot::ARTICULAR_FRAME, qvel); - - // Compute new velocities qvel... - } - - // Stop the robot - robot.setRobotState(vpRobot::STATE_STOP); - - return 0; -} - \endcode - - It is also possible to measure the robot current position with - getPosition() method and the robot current velocities with the getVelocity() - method. - - For convenience, there is also the ability to read/write joint - positions from a position file with readPosFile() and savePosFile() - methods. - - To know how this class can be used to achieve a visual servoing simulation, - you can follow the \ref tutorial-ibvs. - -*/ - + * \class vpSimulatorViper850 + * + * \ingroup group_robot_simu_arm + * + * \brief Simulator of Irisa's Viper S850 robot named Viper850. + * + * Implementation of the vpRobotWireFrameSimulator class in order to simulate + * Irisa's Viper850 robot. This robot is an ADEPT six degrees of freedom arm. + * + * \warning This class uses threading capabilities. Thus on Unix-like + * platforms, the libpthread third-party library need to be + * installed. On Windows, we use the native threading capabilities. + * + * This class allows to control the Viper850 arm robot in position + * and velocity: + * - in the joint space (vpRobot::ARTICULAR_FRAME), + * - in the fixed reference frame (vpRobot::REFERENCE_FRAME), + * - in the camera frame (vpRobot::CAMERA_FRAME), + * - or in a mixed frame (vpRobot::MIXT_FRAME) where translations are expressed + * in the reference frame and rotations in the camera frame. + * + * End-effector frame (vpRobot::END_EFFECTOR_FRAME) is not implemented. + * + * All the translations are expressed in meters for positions and m/s + * for the velocities. Rotations are expressed in radians for the + * positions, and rad/s for the rotation velocities. + * + * The direct and inverse kinematics models are implemented in the + * vpViper850 class. + * + * To control the robot in position, you may set the controller + * to position control and then send the position to reach in a specific + * frame like here in the joint space: + * + * \code + * #include + * #include + * #include + * #include + * + * int main() + * { + * vpSimulatorViper850 robot; + * + * vpColVector q(6); + * // Set a joint position + * q[0] = vpMath::rad(10); // Joint 1 position, in rad + * q[1] = 0.2; // Joint 2 position, in rad + * q[2] = 0.3; // Joint 3 position, in rad + * q[3] = M_PI/8; // Joint 4 position, in rad + * q[4] = M_PI/4; // Joint 5 position, in rad + * q[5] = M_PI; // Joint 6 position, in rad + * + * // Initialize the controller to position control + * robot.setRobotState(vpRobot::STATE_POSITION_CONTROL); + * + * // Moves the robot in the joint space + * robot.setPosition(vpRobot::ARTICULAR_FRAME, q); + * + * return 0; + * } + * \endcode + * + * The robot moves to the specified position with the default + * positioning velocity vpRobotViper850::defaultPositioningVelocity. The + * setPositioningVelocity() method allows to change the maximal + * velocity used to reach the desired position. + * + * \code + * #include + * #include + * #include + * + * int main() + * { + * vpSimulatorViper850 robot; + * + * vpColVector q(6); + * // Set q[i] with i in [0:5] + * + * // Initialize the controller to position control + * robot.setRobotState(vpRobot::STATE_POSITION_CONTROL); + * + * // Set the max velocity to 40% + * robot.setPositioningVelocity(40); + * + * // Moves the robot in the joint space + * robot.setPosition(vpRobot::ARTICULAR_FRAME, q); + * + * return 0; + * } + * \endcode + * + * To control the robot in velocity, you may set the controller to + * velocity control and then send the velocities. To end the velocity + * control and stop the robot you have to set the controller to the + * stop state. Here is an example of a velocity control in the joint + * space: + * + * \code + * #include + * #include + * #include + * + * int main() + * { + * vpSimulatorViper850 robot; + * + * vpColVector qvel(6); + * // Set a joint velocity + * qvel[0] = 0.1; // Joint 1 velocity in rad/s + * qvel[1] = vpMath::rad(15); // Joint 2 velocity in rad/s + * qvel[2] = 0; // Joint 3 velocity in rad/s + * qvel[3] = M_PI/8; // Joint 4 velocity in rad/s + * qvel[4] = 0; // Joint 5 velocity in rad/s + * qvel[5] = 0; // Joint 6 velocity in rad/s + * + * // Initialize the controller to position control + * robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); + * + * for ( ; ; ) { + * // Apply a velocity in the joint space + * robot.setVelocity(vpRobot::ARTICULAR_FRAME, qvel); + * + * // Compute new velocities qvel... + * } + * + * // Stop the robot + * robot.setRobotState(vpRobot::STATE_STOP); + * + * return 0; + * } + * \endcode + * + * It is also possible to measure the robot current position with + * getPosition() method and the robot current velocities with the getVelocity() + * method. + * + * For convenience, there is also the ability to read/write joint + * positions from a position file with readPosFile() and savePosFile() + * methods. + * + * To know how this class can be used to achieve a visual servoing simulation, + * you can follow the \ref tutorial-ibvs. + */ class VISP_EXPORT vpSimulatorViper850 : public vpRobotWireFrameSimulator, public vpViper850 { public: @@ -216,15 +212,15 @@ class VISP_EXPORT vpSimulatorViper850 : public vpRobotWireFrameSimulator, public public: vpSimulatorViper850(); explicit vpSimulatorViper850(bool display); - virtual ~vpSimulatorViper850(); + virtual ~vpSimulatorViper850() override; void getCameraParameters(vpCameraParameters &cam, const unsigned int &image_width, const unsigned int &image_height); void getCameraParameters(vpCameraParameters &cam, const vpImage &I); void getCameraParameters(vpCameraParameters &cam, const vpImage &I); - void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &displacement); + void getDisplacement(const vpRobot::vpControlFrameType frame, vpColVector &displacement) override; - void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q); + void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q) override; void getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q, double ×tamp); void getPosition(const vpRobot::vpControlFrameType frame, vpPoseVector &position); void getPosition(const vpRobot::vpControlFrameType frame, vpPoseVector &position, double ×tamp); @@ -237,12 +233,12 @@ class VISP_EXPORT vpSimulatorViper850 : public vpRobotWireFrameSimulator, public void get_cMe(vpHomogeneousMatrix &cMe); void get_cVe(vpVelocityTwistMatrix &cVe); - void get_eJe(vpMatrix &eJe); - void get_fJe(vpMatrix &fJe); + void get_eJe(vpMatrix &eJe) override; + void get_fJe(vpMatrix &fJe) override; void - init(vpViper850::vpToolType tool, - vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); + init(vpViper850::vpToolType tool, + vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); bool initialiseCameraRelativeToObject(const vpHomogeneousMatrix &cMo); void initialiseObjectRelativeToCamera(const vpHomogeneousMatrix &cMo); @@ -253,26 +249,26 @@ class VISP_EXPORT vpSimulatorViper850 : public vpRobotWireFrameSimulator, public void setCameraParameters(const vpCameraParameters &cam); void setJointLimit(const vpColVector &limitMin, const vpColVector &limitMax); - void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q); + void setPosition(const vpRobot::vpControlFrameType frame, const vpColVector &q) override; void setPosition(const vpRobot::vpControlFrameType frame, double pos1, double pos2, double pos3, double pos4, double pos5, double pos6); void setPosition(const char *filename); void setPositioningVelocity(double vel) { positioningVelocity = vel; } - vpRobot::vpRobotStateType setRobotState(const vpRobot::vpRobotStateType newState); + vpRobot::vpRobotStateType setRobotState(const vpRobot::vpRobotStateType newState) override; - void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity); + void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity) override; void stopMotion(); protected: /** @name Protected Member Functions Inherited from vpSimulatorViper850 */ //@{ - void computeArticularVelocity(); + void computeArticularVelocity() override; void compute_fMi(); void findHighestPositioningSpeed(vpColVector &q); void getExternalImage(vpImage &I); - inline void get_fMi(vpHomogeneousMatrix *fMit) + inline void get_fMi(vpHomogeneousMatrix *fMit) override { m_mutex_fMi.lock(); for (int i = 0; i < 8; i++) { @@ -280,12 +276,12 @@ class VISP_EXPORT vpSimulatorViper850 : public vpRobotWireFrameSimulator, public } m_mutex_fMi.unlock(); } - void init(); - void initArms(); + void init() override; + void initArms() override; void initDisplay(); - int isInJointLimit(void); + int isInJointLimit() override; bool singularityTest(const vpColVector &q, vpMatrix &J); - void updateArticularPosition(); + void updateArticularPosition() override; //@} }; diff --git a/modules/robot/include/visp3/robot/vpUnicycle.h b/modules/robot/include/visp3/robot/vpUnicycle.h index 9fa31000e1..840e00a8a3 100644 --- a/modules/robot/include/visp3/robot/vpUnicycle.h +++ b/modules/robot/include/visp3/robot/vpUnicycle.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Common features for unicycle mobile robots. - * -*****************************************************************************/ + */ #ifndef VPUNICYCLE_H #define VPUNICYCLE_H @@ -40,42 +38,35 @@ #include /*! - - \class vpUnicycle - - \ingroup group_robot_simu_unicycle - - \brief Generic functions for unicycle mobile robots. - - This class provides common features for unicycle mobile robots. - -*/ + * \class vpUnicycle + * + * \ingroup group_robot_simu_unicycle + * + * \brief Generic functions for unicycle mobile robots. + * + * This class provides common features for unicycle mobile robots. + */ class VISP_EXPORT vpUnicycle { public: /*! - Default constructor that does nothing. - */ - vpUnicycle() : cMe_(), eJe_(){}; - /*! - Destructor that does nothing. - */ - virtual ~vpUnicycle(){}; + * Default constructor that does nothing. + */ + vpUnicycle() : cMe_(), eJe_() { }; /** @name Inherited functionalities from vpUnicycle */ //@{ /*! - Return the transformation \f${^c}{\bf M}_e\f$ between the camera frame - and the mobile robot end effector frame. - */ + * Return the transformation \f${^c}{\bf M}_e\f$ between the camera frame + * and the mobile robot end effector frame. + */ vpHomogeneousMatrix get_cMe() const { return cMe_; } /*! - - Return the twist transformation from camera frame to the mobile robot - end effector frame. This transformation allows to compute a velocity - expressed in the end effector frame into the camera frame. - */ + * Return the twist transformation from camera frame to the mobile robot + * end effector frame. This transformation allows to compute a velocity + * expressed in the end effector frame into the camera frame. + */ vpVelocityTwistMatrix get_cVe() const { vpVelocityTwistMatrix cVe; @@ -84,39 +75,38 @@ class VISP_EXPORT vpUnicycle } /*! - - Return the twist transformation from camera frame to the mobile robot - end effector frame. This transformation allows to compute a velocity - expressed in the end effector frame into the camera frame. - - \sa get_cVe() - */ + * Return the twist transformation from camera frame to the mobile robot + * end effector frame. This transformation allows to compute a velocity + * expressed in the end effector frame into the camera frame. + * + * \sa get_cVe() + */ void get_cVe(vpVelocityTwistMatrix &cVe) const { cVe = get_cVe(); } /*! - Return the robot jacobian \f${^e}{\bf J}_e\f$ expressed in the end - effector frame. - - \return The robot jacobian such as \f${\bf v} = {^e}{\bf J}_e \; \dot{\bf - q}\f$ with \f$\dot{\bf q} = (v_x, w_z)\f$ the robot control velocities and - \f$\bf v\f$ the six dimention velocity skew. - */ + * Return the robot jacobian \f${^e}{\bf J}_e\f$ expressed in the end + * effector frame. + * + * \return The robot jacobian such as \f${\bf v} = {^e}{\bf J}_e \; \dot{\bf + * q}\f$ with \f$\dot{\bf q} = (v_x, w_z)\f$ the robot control velocities and + * \f$\bf v\f$ the six dimension velocity skew. + */ vpMatrix get_eJe() const { return eJe_; } /*! - Set the transformation between the camera frame and the end effector - frame. - */ + * Set the transformation between the camera frame and the end effector + * frame. + */ void set_cMe(const vpHomogeneousMatrix &cMe) { cMe_ = cMe; } /*! - Set the robot jacobian \f${^e}{\bf J}_e\f$ expressed in the end effector - frame. - - \param eJe : The robot jacobian to set such as \f${\bf v} = {^e}{\bf J}_e - \; \dot{\bf q}\f$ with \f$\dot{\bf q} = (v_x, w_z)\f$ the robot control - velocities and \f$\bf v\f$ the six dimention velocity skew. - */ + * Set the robot jacobian \f${^e}{\bf J}_e\f$ expressed in the end effector + * frame. + * + * \param eJe : The robot jacobian to set such as \f${\bf v} = {^e}{\bf J}_e + * \; \dot{\bf q}\f$ with \f$\dot{\bf q} = (v_x, w_z)\f$ the robot control + * velocities and \f$\bf v\f$ the six dimension velocity skew. + */ void set_eJe(const vpMatrix &eJe) { eJe_ = eJe; } //@} diff --git a/modules/robot/include/visp3/robot/vpViper.h b/modules/robot/include/visp3/robot/vpViper.h index e47f5e615c..465f81500a 100644 --- a/modules/robot/include/visp3/robot/vpViper.h +++ b/modules/robot/include/visp3/robot/vpViper.h @@ -40,7 +40,7 @@ \file vpViper.h - Modelisation of the ADEPT Viper 650 or 850 robot. + Modelization of the ADEPT Viper 650 or 850 robot. */ @@ -57,7 +57,7 @@ \ingroup group_robot_real_arm group_robot_simu_arm - \brief Modelisation of the ADEPT Viper robot + \brief Modelization of the ADEPT Viper robot This robot has six degrees of freedom. The model of the robot is the following: \image html model-viper.png Model of the Viper 850 robot. diff --git a/modules/robot/include/visp3/robot/vpViper650.h b/modules/robot/include/visp3/robot/vpViper650.h index c4eedb66a3..ab2082ae04 100644 --- a/modules/robot/include/visp3/robot/vpViper650.h +++ b/modules/robot/include/visp3/robot/vpViper650.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,72 +29,66 @@ * * Description: * Interface for the ADEPT Viper 650 robot. - * -*****************************************************************************/ + */ #ifndef vpViper650_h #define vpViper650_h -/*! - - \file vpViper650.h - - Modelisation of the ADEPT Viper 650 robot. - -*/ +#include /*! + * \file vpViper650.h + * + * Modelization of the ADEPT Viper 650 robot. + */ - \class vpViper650 - - \ingroup group_robot_real_arm - - \brief Modelisation of the ADEPT Viper 650 robot. - - The model of the robot is the following: - \image html model-viper.png Model of the Viper 650 robot. - - The non modified Denavit-Hartenberg representation of the robot is - given in the table below, where \f$q_1^*, \ldots, q_6^*\f$ - are the variable joint positions. - - \f[ - \begin{tabular}{|c|c|c|c|c|} - \hline - Joint & $a_i$ & $d_i$ & $\alpha_i$ & $\theta_i$ \\ - \hline - 1 & $a_1$ & $d_1$ & $-\pi/2$ & $q_1^*$ \\ - 2 & $a_2$ & 0 & 0 & $q_2^*$ \\ - 3 & $a_3$ & 0 & $-\pi/2$ & $q_3^* - \pi$ \\ - 4 & 0 & $d_4$ & $\pi/2$ & $q_4^*$ \\ - 5 & 0 & 0 & $-\pi/2$ & $q_5^*$ \\ - 6 & 0 & 0 & 0 & $q_6^*-\pi$ \\ - 7 & 0 & $d_6$ & 0 & 0 \\ - \hline - \end{tabular} - \f] - - In this modelization, different frames have to be considered. - - - \f$ {\cal F}_f \f$: the reference frame, also called world frame - - - \f$ {\cal F}_w \f$: the wrist frame located at the intersection of - the last three rotations, with \f$ ^f{\bf M}_w = ^0{\bf M}_6 \f$ - - - \f$ {\cal F}_e \f$: the end-effector frame located at the interface of the - two tool changers, with \f$^f{\bf M}_e = 0{\bf M}_7 \f$ - - - \f$ {\cal F}_c \f$: the camera or tool frame, with \f$^f{\bf M}_c = ^f{\bf - M}_e \; ^e{\bf M}_c \f$ where \f$ ^e{\bf M}_c \f$ is the result of - a calibration stage. We can also consider a custom tool TOOL_CUSTOM and - set this tool during robot initialisation or using set_eMc(). - - - \f$ {\cal F}_s \f$: the force/torque sensor frame, with \f$d7=0.0666\f$. - -*/ - -#include - +/*! + * \class vpViper650 + * + * \ingroup group_robot_real_arm + * + * \brief Modelization of the ADEPT Viper 650 robot. + * + * The model of the robot is the following: + * \image html model-viper.png Model of the Viper 650 robot. + * + * The non modified Denavit-Hartenberg representation of the robot is + * given in the table below, where \f$q_1^*, \ldots, q_6^*\f$ + * are the variable joint positions. + * + * \f[ + * \begin{tabular}{|c|c|c|c|c|} + * \hline + * Joint & $a_i$ & $d_i$ & $\alpha_i$ & $\theta_i$ \\ + * \hline + * 1 & $a_1$ & $d_1$ & $-\pi/2$ & $q_1^*$ \\ + * 2 & $a_2$ & 0 & 0 & $q_2^*$ \\ + * 3 & $a_3$ & 0 & $-\pi/2$ & $q_3^* - \pi$ \\ + * 4 & 0 & $d_4$ & $\pi/2$ & $q_4^*$ \\ + * 5 & 0 & 0 & $-\pi/2$ & $q_5^*$ \\ + * 6 & 0 & 0 & 0 & $q_6^*-\pi$ \\ + * 7 & 0 & $d_6$ & 0 & 0 \\ + * \hline + * \end{tabular} + * \f] + * + * In this modelization, different frames have to be considered. + * + * - \f$ {\cal F}_f \f$: the reference frame, also called world frame + * + * - \f$ {\cal F}_w \f$: the wrist frame located at the intersection of + * the last three rotations, with \f$ ^f{\bf M}_w = ^0{\bf M}_6 \f$ + * + * - \f$ {\cal F}_e \f$: the end-effector frame located at the interface of the + * two tool changers, with \f$^f{\bf M}_e = 0{\bf M}_7 \f$ + * + * - \f$ {\cal F}_c \f$: the camera or tool frame, with \f$^f{\bf M}_c = ^f{\bf + * M}_e \; ^e{\bf M}_c \f$ where \f$ ^e{\bf M}_c \f$ is the result of + * a calibration stage. We can also consider a custom tool TOOL_CUSTOM and + * set this tool during robot initialisation or using set_eMc(). + * + * - \f$ {\cal F}_s \f$: the force/torque sensor frame, with \f$d7=0.0666\f$. + */ class VISP_EXPORT vpViper650 : public vpViper { public: @@ -121,7 +114,8 @@ class VISP_EXPORT vpViper650 : public vpViper static const char *const CONST_GENERIC_CAMERA_NAME; //! List of possible tools that can be attached to the robot end-effector. - typedef enum { + typedef enum + { TOOL_MARLIN_F033C_CAMERA, /*!< Marlin F033C camera. */ TOOL_PTGREY_FLEA2_CAMERA, /*!< Point Grey Flea 2 camera. */ TOOL_SCHUNK_GRIPPER_CAMERA, /*!< Camera attached to the Schunk gripper. */ @@ -133,15 +127,14 @@ class VISP_EXPORT vpViper650 : public vpViper static const vpToolType defaultTool; vpViper650(); - virtual ~vpViper650(){}; /** @name Inherited functionalities from vpViper650 */ //@{ void init(void); void init(const std::string &camera_extrinsic_parameters); void - init(vpViper650::vpToolType tool, - vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); + init(vpViper650::vpToolType tool, + vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); void init(vpViper650::vpToolType tool, const std::string &filename); void init(vpViper650::vpToolType tool, const vpHomogeneousMatrix &eMc_); diff --git a/modules/robot/include/visp3/robot/vpViper850.h b/modules/robot/include/visp3/robot/vpViper850.h index 0c94fd44c9..2a04d80c5a 100644 --- a/modules/robot/include/visp3/robot/vpViper850.h +++ b/modules/robot/include/visp3/robot/vpViper850.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,73 +29,68 @@ * * Description: * Interface for the ADEPT Viper 850 robot. - * -*****************************************************************************/ + */ #ifndef vpViper850_h #define vpViper850_h -/*! - - \file vpViper850.h - - Modelisation of the ADEPT Viper 850 robot. - -*/ - -/*! - - \class vpViper850 - - \ingroup group_robot_real_arm group_robot_simu_arm - - \brief Modelisation of the ADEPT Viper 850 robot. - - The model of the robot is the following: - \image html model-viper.png Model of the Viper 850 robot. - - The non modified Denavit-Hartenberg representation of the robot is - given in the table below, where \f$q_1^*, \ldots, q_6^*\f$ - are the variable joint positions. - - \f[ - \begin{tabular}{|c|c|c|c|c|} - \hline - Joint & $a_i$ & $d_i$ & $\alpha_i$ & $\theta_i$ \\ - \hline - 1 & $a_1$ & $d_1$ & $-\pi/2$ & $q_1^*$ \\ - 2 & $a_2$ & 0 & 0 & $q_2^*$ \\ - 3 & $a_3$ & 0 & $-\pi/2$ & $q_3^* - \pi$ \\ - 4 & 0 & $d_4$ & $\pi/2$ & $q_4^*$ \\ - 5 & 0 & 0 & $-\pi/2$ & $q_5^*$ \\ - 6 & 0 & 0 & 0 & $q_6^*-\pi$ \\ - 7 & 0 & $d_6$ & 0 & 0 \\ - \hline - \end{tabular} - \f] - - In this modelization, different frames have to be considered. - - - \f$ {\cal F}_f \f$: the reference frame, also called world frame - - - \f$ {\cal F}_w \f$: the wrist frame located at the intersection of - the last three rotations, with \f$ ^f{\bf M}_w = ^0{\bf M}_6 \f$ - - - \f$ {\cal F}_e \f$: the end-effector frame located at the interface of the - two tool changers, with \f$^f{\bf M}_e = 0{\bf M}_7 \f$ - - - \f$ {\cal F}_c \f$: the camera or tool frame, with \f$^f{\bf M}_c = ^f{\bf - M}_e \; ^e{\bf M}_c \f$ where \f$ ^e{\bf M}_c \f$ is the result of - a calibration stage. We can also consider a custom tool TOOL_CUSTOM and - set this during robot initialisation or using set_eMc(). - - - \f$ {\cal F}_s \f$: the force/torque sensor frame, with \f$d7=0.0666\f$. - -*/ #include #include +/*! + * \file vpViper850.h + * + * Modelization of the ADEPT Viper 850 robot. + */ + +/*! + * \class vpViper850 + * + * \ingroup group_robot_real_arm group_robot_simu_arm + * + * \brief Modelization of the ADEPT Viper 850 robot. + * + * The model of the robot is the following: + * \image html model-viper.png Model of the Viper 850 robot. + * + * The non modified Denavit-Hartenberg representation of the robot is + * given in the table below, where \f$q_1^*, \ldots, q_6^*\f$ + * are the variable joint positions. + * + * \f[ + * \begin{tabular}{|c|c|c|c|c|} + * \hline + * Joint & $a_i$ & $d_i$ & $\alpha_i$ & $\theta_i$ \\ + * \hline + * 1 & $a_1$ & $d_1$ & $-\pi/2$ & $q_1^*$ \\ + * 2 & $a_2$ & 0 & 0 & $q_2^*$ \\ + * 3 & $a_3$ & 0 & $-\pi/2$ & $q_3^* - \pi$ \\ + * 4 & 0 & $d_4$ & $\pi/2$ & $q_4^*$ \\ + * 5 & 0 & 0 & $-\pi/2$ & $q_5^*$ \\ + * 6 & 0 & 0 & 0 & $q_6^*-\pi$ \\ + * 7 & 0 & $d_6$ & 0 & 0 \\ + * \hline + * \end{tabular} + * \f] + * + * In this modelization, different frames have to be considered. + * + * - \f$ {\cal F}_f \f$: the reference frame, also called world frame + * + * - \f$ {\cal F}_w \f$: the wrist frame located at the intersection of + * the last three rotations, with \f$ ^f{\bf M}_w = ^0{\bf M}_6 \f$ + * + * - \f$ {\cal F}_e \f$: the end-effector frame located at the interface of the + * two tool changers, with \f$^f{\bf M}_e = 0{\bf M}_7 \f$ + * + * - \f$ {\cal F}_c \f$: the camera or tool frame, with \f$^f{\bf M}_c = ^f{\bf + * M}_e \; ^e{\bf M}_c \f$ where \f$ ^e{\bf M}_c \f$ is the result of + * a calibration stage. We can also consider a custom tool TOOL_CUSTOM and + * set this during robot initialisation or using set_eMc(). + * + * - \f$ {\cal F}_s \f$: the force/torque sensor frame, with \f$d7=0.0666\f$. + */ class VISP_EXPORT vpViper850 : public vpViper { public: @@ -122,7 +116,8 @@ class VISP_EXPORT vpViper850 : public vpViper static const char *const CONST_GENERIC_CAMERA_NAME; //! List of possible tools that can be attached to the robot end-effector. - typedef enum { + typedef enum + { TOOL_MARLIN_F033C_CAMERA, /*!< Marlin F033C camera. */ TOOL_PTGREY_FLEA2_CAMERA, /*!< Point Grey Flea 2 camera. */ TOOL_SCHUNK_GRIPPER_CAMERA, /*!< Camera attached to the Schunk gripper. */ @@ -134,15 +129,14 @@ class VISP_EXPORT vpViper850 : public vpViper static const vpToolType defaultTool; vpViper850(); - virtual ~vpViper850(){}; /** @name Inherited functionalities from vpViper850 */ //@{ void init(void); void init(const std::string &camera_extrinsic_parameters); void - init(vpViper850::vpToolType tool, - vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); + init(vpViper850::vpToolType tool, + vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithoutDistortion); void init(vpViper850::vpToolType tool, const std::string &filename); void init(vpViper850::vpToolType tool, const vpHomogeneousMatrix &eMc_); diff --git a/modules/robot/src/haptic-device/qbdevice/vpQbSoftHand.cpp b/modules/robot/src/haptic-device/qbdevice/vpQbSoftHand.cpp index 8ddb8c1c63..5c8aca7159 100644 --- a/modules/robot/src/haptic-device/qbdevice/vpQbSoftHand.cpp +++ b/modules/robot/src/haptic-device/qbdevice/vpQbSoftHand.cpp @@ -44,13 +44,7 @@ * Default constructor that does nothing. * To connect to a device call init(). */ -vpQbSoftHand::vpQbSoftHand() : vpQbDevice() {} - -/** - * Close all the still open serial ports. - * \sa close() - */ -vpQbSoftHand::~vpQbSoftHand() {} +vpQbSoftHand::vpQbSoftHand() : vpQbDevice() { } /** * Retrieve the motor currents of the given device. @@ -129,7 +123,8 @@ void vpQbSoftHand::setPosition(const vpColVector &position, const int &id) if (commands[0] < position_limits[0]) { commands[0] = position_limits[0]; - } else if (commands[0] > position_limits[1]) { + } + else if (commands[0] > position_limits[1]) { commands[0] = position_limits[1]; } @@ -171,13 +166,15 @@ void vpQbSoftHand::setPosition(const vpColVector &position, double speed_factor, double vel = speed_factor; if (vel < 0.01) { vel = 0.01; - } else if (vel > 1.) { + } + else if (vel > 1.) { vel = 1.0; } double current_factor = stiffness; if (current_factor < 0.0) { current_factor = 0.0; - } else if (current_factor > 1.) { + } + else if (current_factor > 1.) { current_factor = 1.0; } double slope = sign * max_slope * vel; @@ -189,7 +186,8 @@ void vpQbSoftHand::setPosition(const vpColVector &position, double speed_factor, q[0] = q_mes[0] + slope * delta_t / 1000.0 * i; if (q[0] < getPositionLimits()[0]) { q[0] = getPositionLimits()[0]; - } else if (q[0] > getPositionLimits()[1]) { + } + else if (q[0] > getPositionLimits()[1]) { q[0] = getPositionLimits()[1]; } setPosition(q, id); @@ -198,7 +196,8 @@ void vpQbSoftHand::setPosition(const vpColVector &position, double speed_factor, if (std::fabs(current[0]) > current_factor * current_max) { current_failures++; - } else { + } + else { current_failures = 0; } diff --git a/modules/robot/src/real-robot/viper/vpViper.cpp b/modules/robot/src/real-robot/viper/vpViper.cpp index 0019e7fc1e..edcb72aedf 100644 --- a/modules/robot/src/real-robot/viper/vpViper.cpp +++ b/modules/robot/src/real-robot/viper/vpViper.cpp @@ -37,7 +37,7 @@ \file vpViper.cpp - Modelisation of the ADEPT Viper 650 or 850 robot. + Modelization of the ADEPT Viper 650 or 850 robot. */ diff --git a/modules/robot/src/real-robot/viper/vpViper650.cpp b/modules/robot/src/real-robot/viper/vpViper650.cpp index 4557434552..704de86e7d 100644 --- a/modules/robot/src/real-robot/viper/vpViper650.cpp +++ b/modules/robot/src/real-robot/viper/vpViper650.cpp @@ -37,7 +37,7 @@ \file vpViper650.cpp - Modelisation of the ADEPT Viper 650 robot. + Modelization of the ADEPT Viper 650 robot. */ diff --git a/modules/robot/src/real-robot/viper/vpViper850.cpp b/modules/robot/src/real-robot/viper/vpViper850.cpp index b0d612fe16..53eac839cf 100644 --- a/modules/robot/src/real-robot/viper/vpViper850.cpp +++ b/modules/robot/src/real-robot/viper/vpViper850.cpp @@ -37,7 +37,7 @@ \file vpViper850.cpp - Modelisation of the ADEPT Viper 850 robot. + Modelization of the ADEPT Viper 850 robot. */ diff --git a/modules/robot/src/robot-simulator/vpRobotCamera.cpp b/modules/robot/src/robot-simulator/vpRobotCamera.cpp index 0f1f3954fd..e5ac10c3d1 100644 --- a/modules/robot/src/robot-simulator/vpRobotCamera.cpp +++ b/modules/robot/src/robot-simulator/vpRobotCamera.cpp @@ -89,12 +89,6 @@ void vpRobotCamera::init() setMaxRotationVelocity(vpMath::rad(90)); // wx, wy and wz max set to 90 deg/s } -/*! - Destructor. - -*/ -vpRobotCamera::~vpRobotCamera() {} - /*! Get the twist transformation from camera frame to end-effector @@ -256,5 +250,5 @@ void vpRobotCamera::setPosition(const vpHomogeneousMatrix &cMw) #elif !defined(VISP_BUILD_SHARED_LIBS) // Work around to avoid warning: libvisp_robot.a(vpRobotCamera.cpp.o) has no // symbols -void dummy_vpRobotCamera(){}; +void dummy_vpRobotCamera() { }; #endif diff --git a/modules/robot/src/robot-simulator/vpRobotWireFrameSimulator.cpp b/modules/robot/src/robot-simulator/vpRobotWireFrameSimulator.cpp index 5fc26d3a2b..e64efb01a9 100644 --- a/modules/robot/src/robot-simulator/vpRobotWireFrameSimulator.cpp +++ b/modules/robot/src/robot-simulator/vpRobotWireFrameSimulator.cpp @@ -48,19 +48,19 @@ */ vpRobotWireFrameSimulator::vpRobotWireFrameSimulator() : vpWireFrameSimulator(), vpRobotSimulator(), I(), tcur(0), tprev(0), robotArms(NULL), size_fMi(8), fMi(NULL), - artCoord(), artVel(), velocity(), + artCoord(), artVel(), velocity(), #if defined(_WIN32) #elif defined(VISP_HAVE_PTHREAD) - thread(), attr(), + thread(), attr(), #endif - m_mutex_fMi(), m_mutex_eMc(), m_mutex_artVel(), m_mutex_artCoord(), m_mutex_velocity(), m_mutex_display(), m_mutex_robotStop(), m_mutex_frame(), m_mutex_setVelocityCalled(), m_mutex_scene(), - displayBusy(false), - robotStop(false), jointLimit(false), jointLimitArt(false), singularityManagement(true), cameraParam(), + m_mutex_fMi(), m_mutex_eMc(), m_mutex_artVel(), m_mutex_artCoord(), m_mutex_velocity(), m_mutex_display(), m_mutex_robotStop(), m_mutex_frame(), m_mutex_setVelocityCalled(), m_mutex_scene(), + displayBusy(false), + robotStop(false), jointLimit(false), jointLimitArt(false), singularityManagement(true), cameraParam(), #if defined(VISP_HAVE_DISPLAY) - display(), + display(), #endif - displayType(MODEL_3D), displayAllowed(true), constantSamplingTimeMode(false), setVelocityCalled(false), - verbose_(false) + displayType(MODEL_3D), displayAllowed(true), constantSamplingTimeMode(false), setVelocityCalled(false), + verbose_(false) { setSamplingTime(0.010); velocity.resize(6); @@ -77,19 +77,19 @@ vpRobotWireFrameSimulator::vpRobotWireFrameSimulator() */ vpRobotWireFrameSimulator::vpRobotWireFrameSimulator(bool do_display) : vpWireFrameSimulator(), vpRobotSimulator(), I(), tcur(0), tprev(0), robotArms(NULL), size_fMi(8), fMi(NULL), - artCoord(), artVel(), velocity(), + artCoord(), artVel(), velocity(), #if defined(_WIN32) #elif defined(VISP_HAVE_PTHREAD) - thread(), attr(), + thread(), attr(), #endif - m_mutex_fMi(), m_mutex_eMc(), m_mutex_artVel(), m_mutex_artCoord(), m_mutex_velocity(), m_mutex_display(), m_mutex_robotStop(), m_mutex_frame(), m_mutex_setVelocityCalled(), m_mutex_scene(), - displayBusy(false), robotStop(false), jointLimit(false), jointLimitArt(false), singularityManagement(true), - cameraParam(), + m_mutex_fMi(), m_mutex_eMc(), m_mutex_artVel(), m_mutex_artCoord(), m_mutex_velocity(), m_mutex_display(), m_mutex_robotStop(), m_mutex_frame(), m_mutex_setVelocityCalled(), m_mutex_scene(), + displayBusy(false), robotStop(false), jointLimit(false), jointLimitArt(false), singularityManagement(true), + cameraParam(), #if defined(VISP_HAVE_DISPLAY) - display(), + display(), #endif - displayType(MODEL_3D), displayAllowed(do_display), constantSamplingTimeMode(false), setVelocityCalled(false), - verbose_(false) + displayType(MODEL_3D), displayAllowed(do_display), constantSamplingTimeMode(false), setVelocityCalled(false), + verbose_(false) { setSamplingTime(0.010); velocity.resize(6); @@ -102,13 +102,6 @@ vpRobotWireFrameSimulator::vpRobotWireFrameSimulator(bool do_display) #endif } -/*! - Basic destructor -*/ -vpRobotWireFrameSimulator::~vpRobotWireFrameSimulator() -{ -} - /*! Initialize the display. It enables to choose the type of scene which will be used to display the object at the current position and at the desired @@ -231,7 +224,8 @@ void vpRobotWireFrameSimulator::getInternalView(vpImage &I_) (std::fabs(py_int - 1) > vpMath::maximum(py_int, 1.) * std::numeric_limits::epsilon())) { u = (double)I_.getWidth() / (2 * px_int); v = (double)I_.getHeight() / (2 * py_int); - } else { + } + else { u = (double)I_.getWidth() / (vpMath::minimum(I_.getWidth(), I_.getHeight())); v = (double)I_.getHeight() / (vpMath::minimum(I_.getWidth(), I_.getHeight())); } @@ -304,7 +298,8 @@ void vpRobotWireFrameSimulator::getInternalView(vpImage &I_) (std::fabs(py_int - 1) > vpMath::maximum(py_int, 1.) * std::numeric_limits::epsilon())) { u = (double)I.getWidth() / (2 * px_int); v = (double)I.getHeight() / (2 * py_int); - } else { + } + else { u = (double)I_.getWidth() / (vpMath::minimum(I_.getWidth(), I_.getHeight())); v = (double)I_.getHeight() / (vpMath::minimum(I_.getWidth(), I_.getHeight())); } @@ -371,5 +366,5 @@ vpHomogeneousMatrix vpRobotWireFrameSimulator::get_cMo() #elif !defined(VISP_BUILD_SHARED_LIBS) // Work around to avoid warning: // libvisp_robot.a(vpRobotWireFrameSimulator.cpp.o) has no symbols -void dummy_vpRobotWireFrameSimulator(){}; +void dummy_vpRobotWireFrameSimulator() { }; #endif diff --git a/modules/robot/src/robot-simulator/vpSimulatorCamera.cpp b/modules/robot/src/robot-simulator/vpSimulatorCamera.cpp index f12d2a370b..889449e6da 100644 --- a/modules/robot/src/robot-simulator/vpSimulatorCamera.cpp +++ b/modules/robot/src/robot-simulator/vpSimulatorCamera.cpp @@ -72,12 +72,6 @@ void vpSimulatorCamera::init() setMaxRotationVelocity(vpMath::rad(90)); // wx, wy and wz max set to 90 deg/s } -/*! - Destructor. - -*/ -vpSimulatorCamera::~vpSimulatorCamera() {} - /*! Get the twist transformation from camera frame to end-effector diff --git a/modules/robot/src/robot-simulator/vpSimulatorPioneer.cpp b/modules/robot/src/robot-simulator/vpSimulatorPioneer.cpp index 9193c9dd71..54368f734e 100644 --- a/modules/robot/src/robot-simulator/vpSimulatorPioneer.cpp +++ b/modules/robot/src/robot-simulator/vpSimulatorPioneer.cpp @@ -75,12 +75,6 @@ void vpSimulatorPioneer::init() wMc_ = wMe_ * cMe_.inverse(); } -/*! - Destructor. - -*/ -vpSimulatorPioneer::~vpSimulatorPioneer() {} - /*! Get the robot jacobian expressed in the end-effector frame. The jacobian expression is given in vpPioneer class. @@ -196,8 +190,8 @@ void vpSimulatorPioneer::getPosition(const vpRobot::vpControlFrameType frame, vp case vpRobot::ARTICULAR_FRAME: std::cout << "ARTICULAR_FRAME is not implemented in " - "vpSimulatorPioneer::getPosition()" - << std::endl; + "vpSimulatorPioneer::getPosition()" + << std::endl; break; case vpRobot::REFERENCE_FRAME: { // Convert wMc_ to a position diff --git a/modules/robot/src/robot-simulator/vpSimulatorPioneerPan.cpp b/modules/robot/src/robot-simulator/vpSimulatorPioneerPan.cpp index 316db9559d..a7f9f7c73f 100644 --- a/modules/robot/src/robot-simulator/vpSimulatorPioneerPan.cpp +++ b/modules/robot/src/robot-simulator/vpSimulatorPioneerPan.cpp @@ -83,12 +83,6 @@ void vpSimulatorPioneerPan::init() wMc_ = wMm_ * mMp_ * pMe_ * cMe_.inverse(); } -/*! - Destructor. - -*/ -vpSimulatorPioneerPan::~vpSimulatorPioneerPan() {} - /*! Get the robot jacobian expressed in the end-effector frame. The jacobian expression is given in vpPioneerPan class. @@ -211,8 +205,8 @@ void vpSimulatorPioneerPan::getPosition(const vpRobot::vpControlFrameType frame, case vpRobot::ARTICULAR_FRAME: std::cout << "ARTICULAR_FRAME is not implemented in " - "vpSimulatorPioneer::getPosition()" - << std::endl; + "vpSimulatorPioneer::getPosition()" + << std::endl; break; case vpRobot::REFERENCE_FRAME: { // Convert wMc_ to a position diff --git a/modules/sensor/include/visp3/sensor/vpForceTorqueAtiNetFTSensor.h b/modules/sensor/include/visp3/sensor/vpForceTorqueAtiNetFTSensor.h index ac40b94b53..80edb39c38 100755 --- a/modules/sensor/include/visp3/sensor/vpForceTorqueAtiNetFTSensor.h +++ b/modules/sensor/include/visp3/sensor/vpForceTorqueAtiNetFTSensor.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * ATI Force torque interface. - * -*****************************************************************************/ + */ #ifndef _vpForceTorqueAtiNetFTSensor_h_ #define _vpForceTorqueAtiNetFTSensor_h_ @@ -48,64 +46,63 @@ #ifdef VISP_HAVE_FUNC_INET_NTOP /*! - \class vpForceTorqueAtiNetFTSensor - - \ingroup group_sensor_ft - - Interface for ATI force/torque sensor using [Net F/T](https://www.ati-ia.com/products/ft/ft_NetFT.aspx) over UDP. - - The Network Force/Torque (Net F/T) sensor system measures six components of force and torque (Fx, Fy, Fz, Tx, Ty, Tz). - The Net F/T provides an EtherNet/IP communication interface and is compatible with standard Ethernet. The Net - F/T system is available with any of ATI transducer models. The Net F/T's web browser interface allows for easy - configuration and set up via the Ethernet connection present on all NetBox models. - - This class was tested with ATI Nano 43 F/T sensor connected to a NetBox. To use this class, you don't need to install - any specific third-party. - - To use this class, connect an Ethernet cable to the NetBox. The default IP address of the Net F/T is: 192.168.1.1. - The default Ethernet port is 49152. - You can use your favorite web browser on http://192.168.1.1 to modify Net F/T sensor settings and select sensor - calibration configuration. - - The following example shows how to use this class to get F/T measurements. - \code -#include - -#include - -int main(int argc, char **argv) -{ - vpForceTorqueAtiNetFTSensor ati_net_ft("192.168.1.1", 49152); - - ati_net_ft.startStreaming(); - ati_net_ft.bias(); - - while (1) { - double t = vpTime::measureTimeMs(); - if (ati_net_ft.waitForNewData()) { - vpColVector ft = ati_net_ft.getForceTorque(); - std::cout << "F/T: " << ft.t() << std::endl; - } - std::cout << "Loop time: " << vpTime::measureTimeMs() - t << " ms" << std::endl; - } -} - \endcode - - It produces the following output: - \code -F/T: -0.00150018 0.0030764 -0.00791356 -8.22294e-06 4.18799e-05 1.078288e-05 -Loop time: 0.03393554688 ms -... - \endcode - where 3 first values are forces Fx, Fy, Fz in N and the 3 last are torques Tx, Ty, Tz in Nm. - -*/ + * \class vpForceTorqueAtiNetFTSensor + * + * \ingroup group_sensor_ft + * + * Interface for ATI force/torque sensor using [Net F/T](https://www.ati-ia.com/products/ft/ft_NetFT.aspx) over UDP. + * + * The Network Force/Torque (Net F/T) sensor system measures six components of force and torque (Fx, Fy, Fz, Tx, Ty, Tz). + * The Net F/T provides an EtherNet/IP communication interface and is compatible with standard Ethernet. The Net + * F/T system is available with any of ATI transducer models. The Net F/T's web browser interface allows for easy + * configuration and set up via the Ethernet connection present on all NetBox models. + * + * This class was tested with ATI Nano 43 F/T sensor connected to a NetBox. To use this class, you don't need to install + * any specific third-party. + * + * To use this class, connect an Ethernet cable to the NetBox. The default IP address of the Net F/T is: 192.168.1.1. + * The default Ethernet port is 49152. + * You can use your favorite web browser on http://192.168.1.1 to modify Net F/T sensor settings and select sensor + * calibration configuration. + * + * The following example shows how to use this class to get F/T measurements. + * \code + * #include + * + * #include + * + * int main(int argc, char **argv) + * { + * vpForceTorqueAtiNetFTSensor ati_net_ft("192.168.1.1", 49152); + * + * ati_net_ft.startStreaming(); + * ati_net_ft.bias(); + * + * while (1) { + * double t = vpTime::measureTimeMs(); + * if (ati_net_ft.waitForNewData()) { + * vpColVector ft = ati_net_ft.getForceTorque(); + * std::cout << "F/T: " << ft.t() << std::endl; + * } + * std::cout << "Loop time: " << vpTime::measureTimeMs() - t << " ms" << std::endl; + * } + * } + * \endcode + * + * It produces the following output: + * \code + * F/T: -0.00150018 0.0030764 -0.00791356 -8.22294e-06 4.18799e-05 1.078288e-05 + * Loop time: 0.03393554688 ms + * ... + * \endcode + * where 3 first values are forces Fx, Fy, Fz in N and the 3 last are torques Tx, Ty, Tz in Nm. + */ class VISP_EXPORT vpForceTorqueAtiNetFTSensor : public vpUDPClient { public: vpForceTorqueAtiNetFTSensor(); vpForceTorqueAtiNetFTSensor(const std::string &hostname, int port); - virtual ~vpForceTorqueAtiNetFTSensor(); + virtual ~vpForceTorqueAtiNetFTSensor() override; void bias(unsigned int n_counts = 50); /*! diff --git a/modules/sensor/include/visp3/sensor/vpForceTorqueAtiSensor.h b/modules/sensor/include/visp3/sensor/vpForceTorqueAtiSensor.h index 84e9634664..de7033e568 100644 --- a/modules/sensor/include/visp3/sensor/vpForceTorqueAtiSensor.h +++ b/modules/sensor/include/visp3/sensor/vpForceTorqueAtiSensor.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * ATI Force torque interface. - * -*****************************************************************************/ + */ #ifndef _vpForceTorqueAtiSensor_h_ #define _vpForceTorqueAtiSensor_h_ @@ -45,57 +43,56 @@ #include /*! - \class vpForceTorqueAtiSensor - - \ingroup group_sensor_ft - - Interface for ATI force/torque sensor. This class works only under -linux-like OS. It requires Comedi 3rd party. Installation instructions are -provided here https://visp.inria.fr/3rd_comedi. - - Comedi is the linux control and measurement device interface. For more -information see http://www.comedi.org. - - This class was tested with ATI Gamma 65-SI FT sensor connected to a - National Instrument NI DAQmx PCI-6220 board. - - Synchronous F/T data acquisition is performed using getForceTorque(). The -call to the function blocks until the whole acquisition has finished. - - The following example shows how to get single measures from an ATI F/T -device each 10 ms (100 Hz). -\code -#include -#include - -int main(int argc, char** argv) -{ - vpForceTorqueAtiSensor ati; - ati.setCalibrationFile("FT12345.cal"); - ati.open(); - ati.bias(); - for(unsigned int i=0; i < 20; i++) { - std::cout << "F/T: " << ati.getForceTorque().t() << std::endl; - vpTime::wait(10); - } - ati.close(); -#endif -} - \endcode - -*/ + * \class vpForceTorqueAtiSensor + * + * \ingroup group_sensor_ft + * + * Interface for ATI force/torque sensor. This class works only under + * linux-like OS. It requires Comedi 3rd party. Installation instructions are + * provided here https://visp.inria.fr/3rd_comedi. + * + * Comedi is the linux control and measurement device interface. For more + * information see http://www.comedi.org. + * + * This class was tested with ATI Gamma 65-SI FT sensor connected to a + * National Instrument NI DAQmx PCI-6220 board. + * + * Synchronous F/T data acquisition is performed using getForceTorque(). The + * call to the function blocks until the whole acquisition has finished. + * + * The following example shows how to get single measures from an ATI F/T + * device each 10 ms (100 Hz). + * \code + * #include + * #include + * + * int main(int argc, char** argv) + * { + * vpForceTorqueAtiSensor ati; + * ati.setCalibrationFile("FT12345.cal"); + * ati.open(); + * ati.bias(); + * for(unsigned int i=0; i < 20; i++) { + * std::cout << "F/T: " << ati.getForceTorque().t() << std::endl; + * vpTime::wait(10); + * } + * ati.close(); + * #endif + * } + * \endcode + */ class VISP_EXPORT vpForceTorqueAtiSensor : public vpComedi { public: vpForceTorqueAtiSensor(); - virtual ~vpForceTorqueAtiSensor(); + virtual ~vpForceTorqueAtiSensor() override; void bias(); void close(); /*! - Return the calibration file location specified using - setCalibrationFile(). \sa setCalibrationFile() + * Return the calibration file location specified using + * setCalibrationFile(). \sa setCalibrationFile() */ std::string getCalibrationFile() const { return m_calibfile; } vpColVector getForceTorque() const; diff --git a/modules/sensor/include/visp3/sensor/vpSickLDMRS.h b/modules/sensor/include/visp3/sensor/vpSickLDMRS.h index 84a71ccdec..de42458366 100644 --- a/modules/sensor/include/visp3/sensor/vpSickLDMRS.h +++ b/modules/sensor/include/visp3/sensor/vpSickLDMRS.h @@ -51,77 +51,79 @@ #include /*! - - \file vpSickLDMRS.h - - \brief Driver for the Sick LD-MRS laser scanner. -*/ + * \file vpSickLDMRS.h + * + * \brief Driver for the Sick LD-MRS laser scanner. + */ /*! - - \class vpSickLDMRS - - \ingroup group_sensor_laserscanner - - \brief Driver for the Sick LD-MRS laser scanner. - - \warning For the moment, this driver works only on UNIX platform. - - The code below shows how the four laser scan provided by the Sick - LD-MRS could be acquired. - - \code -#include "visp3/sensor/vpSickLDMRS.h" - -int main() -{ -#if !defined(_WIN32) && (defined(__unix__) || defined(__unix) || -(defined(__APPLE__) && defined(__MACH__))) // UNIX std::string ip = -"131.254.12.119"; - - vpSickLDMRS laser; - laser.setIpAddress(ip); - laser.setup(); - - vpLaserScan laserscan[4]; - for ( ; ; ) { - // Get the measured points in the four layers - laser.measure(laserscan); - - // Prints all the measured points - for (int layer=0; layer<4; layer++) { - std::vector pointsInLayer = laserscan[layer].getScanPoints(); vpScanPoint p; - - for (unsigned int i=0; i < pointsInLayer.size(); i++) { - std::cout << pointsInLayer[i] << std::endl; - } - } - } -#endif -} - \endcode -*/ + * \class vpSickLDMRS + * + * \ingroup group_sensor_laserscanner + * + * \brief Driver for the Sick LD-MRS laser scanner. + * + * \warning For the moment, this driver works only on UNIX platform. + * + * The code below shows how the four laser scan provided by the Sick + * LD-MRS could be acquired. + * + * \code + * #include "visp3/sensor/vpSickLDMRS.h" + * + * int main() + * { + * #if !defined(_WIN32) && (defined(__unix__) || defined(__unix) || + * (defined(__APPLE__) && defined(__MACH__))) // UNIX std::string ip = + * "131.254.12.119"; + * + * vpSickLDMRS laser; + * laser.setIpAddress(ip); + * laser.setup(); + * + * vpLaserScan laserscan[4]; + * for ( ; ; ) { + * // Get the measured points in the four layers + * laser.measure(laserscan); + * + * // Prints all the measured points + * for (int layer=0; layer<4; layer++) { + * std::vector pointsInLayer = laserscan[layer].getScanPoints(); vpScanPoint p; + * + * for (unsigned int i=0; i < pointsInLayer.size(); i++) { + * std::cout << pointsInLayer[i] << std::endl; + * } + * } + * } + * #endif + * } + * \endcode + */ class VISP_EXPORT vpSickLDMRS : public vpLaserScanner { public: - enum MagicWord { + enum MagicWord + { MagicWordC2 = 0xAFFEC0C2 ///< The magic word that allows to identify the ///< messages that are sent by the Sick LD-MRS. }; - enum DataType { + enum DataType + { MeasuredData = 0x2202 ///< Flag to indicate that the body of a message ///< contains measured data. }; vpSickLDMRS(); + /*! Copy constructor. */ vpSickLDMRS(const vpSickLDMRS &sick) : vpLaserScanner(sick), socket_fd(-1), body(NULL), vAngle(), time_offset(0), isFirstMeasure(true), - maxlen_body(104000) + maxlen_body(104000) { *this = sick; }; - virtual ~vpSickLDMRS(); - /*! Copy constructor. */ + virtual ~vpSickLDMRS() override; + + /*! Copy operator. */ vpSickLDMRS &operator=(const vpSickLDMRS &sick) { if (this != &sick) { diff --git a/modules/tracker/blob/include/visp3/blob/vpDot.h b/modules/tracker/blob/include/visp3/blob/vpDot.h index f208d48f6c..ef8e4190f5 100644 --- a/modules/tracker/blob/include/visp3/blob/vpDot.h +++ b/modules/tracker/blob/include/visp3/blob/vpDot.h @@ -59,62 +59,63 @@ #endif /*! - \class vpDot - - \ingroup module_blob - - \brief This tracker is meant to track a dot (connected pixels with same - gray level) on a vpImage. - - The underground algorithm is based on a binarization of the image - and a connex component segmentation to determine the dot - characteristics (location, moments, size...). - - The following sample code shows how to grab images from a firewire camera, - track a blob and display the tracking results. - - \code -#include -#include -#include - -int main() -{ -#if defined(VISP_HAVE_DC1394) - vpImage I; // Create a gray level image container - vp1394TwoGrabber g(false); // Create a grabber based on libdc1394-2.x third party lib - g.acquire(I); // Acquire an image - -#if defined(VISP_HAVE_X11) - vpDisplayX d(I, 0, 0, "Camera view"); -#endif - vpDisplay::display(I); - vpDisplay::flush(I); - - vpDot blob; - blob.initTracking(I); - blob.setGraphics(true); - - while(1) { - g.acquire(I); // Acquire an image - vpDisplay::display(I); - blob.track(I); - - vpDisplay::flush(I); - } -#endif -} - \endcode - - \sa vpDot2 -*/ + * \class vpDot + * + * \ingroup module_blob + * + * \brief This tracker is meant to track a dot (connected pixels with same + * gray level) on a vpImage. + * + * The underground algorithm is based on a binarization of the image + * and a connex component segmentation to determine the dot + * characteristics (location, moments, size...). + * + * The following sample code shows how to grab images from a firewire camera, + * track a blob and display the tracking results. + * + * \code + * #include + * #include + * #include + * + * int main() + * { + * #if defined(VISP_HAVE_DC1394) + * vpImage I; // Create a gray level image container + * vp1394TwoGrabber g(false); // Create a grabber based on libdc1394-2.x third party lib + * g.acquire(I); // Acquire an image + * + * #if defined(VISP_HAVE_X11) + * vpDisplayX d(I, 0, 0, "Camera view"); + * #endif + * vpDisplay::display(I); + * vpDisplay::flush(I); + * + * vpDot blob; + * blob.initTracking(I); + * blob.setGraphics(true); + * + * while(1) { + * g.acquire(I); // Acquire an image + * vpDisplay::display(I); + * blob.track(I); + * + * vpDisplay::flush(I); + * } + * #endif + * } + * \endcode + * + * \sa vpDot2 + */ class VISP_EXPORT vpDot : public vpTracker { public: /*! \enum vpConnexityType - Type of connexity 4, or 8. - */ - typedef enum { + * Type of connexity 4, or 8. + */ + typedef enum + { CONNEXITY_4, /*!< For a given pixel 4 neighbors are considered (left, right, up, down) */ CONNEXITY_8 /*!< For a given pixel 8 neighbors are considered (left, @@ -190,19 +191,19 @@ class VISP_EXPORT vpDot : public vpTracker vpDot(); explicit vpDot(const vpImagePoint &ip); vpDot(const vpDot &d); - virtual ~vpDot(); + virtual ~vpDot() override; void display(const vpImage &I, vpColor color = vpColor::red, unsigned int thickness = 1) const; /*! - Gets the second order normalized centered moment \f$ n_{ij} \f$ - as a 3-dim vector containing \f$ n_{20}, n_{11}, n_{02} \f$ - such as \f$ n_{ij} = \mu_{ij}/m_{00} \f$ - - \return The 3-dim vector containing \f$ n_{20}, n_{11}, n_{02} \f$. - - \sa getCog(), getArea() - */ + * Gets the second order normalized centered moment \f$ n_{ij} \f$ + * as a 3-dim vector containing \f$ n_{20}, n_{11}, n_{02} \f$ + * such as \f$ n_{ij} = \mu_{ij}/m_{00} \f$ + * + * \return The 3-dim vector containing \f$ n_{20}, n_{11}, n_{02} \f$. + * + * \sa getCog(), getArea() + */ inline vpColVector get_nij() const { vpColVector nij(3); @@ -214,19 +215,17 @@ class VISP_EXPORT vpDot : public vpTracker } /*! - Gets the area of the blob corresponding also to the zero order moment. - - \return The blob area. - */ + * Gets the area of the blob corresponding also to the zero order moment. + * + * \return The blob area. + */ inline double getArea() const { return m00; } /*! - - Return the dot bounding box. - - \sa getWidth(), getHeight() - - */ + * Return the dot bounding box. + * + * \sa getWidth(), getHeight() + */ inline vpRect getBBox() const { vpRect bbox; @@ -236,66 +235,60 @@ class VISP_EXPORT vpDot : public vpTracker return (bbox); }; /*! - Return the location of the dot center of gravity. - - \return The coordinates of the center of gravity. - */ + * Return the location of the dot center of gravity. + * + * \return The coordinates of the center of gravity. + */ inline vpImagePoint getCog() const { return cog; } /*! - Return the list of all the image points on the border of the dot. - - \warning Doesn't return the image points inside the dot anymore. To get - those points see getConnexities(). - */ + * Return the list of all the image points on the border of the dot. + * + * \warning Doesn't return the image points inside the dot anymore. To get + * those points see getConnexities(). + */ inline std::list getEdges() const { return this->ip_edges_list; }; /*! - - Return the list of all the image points inside the dot. - - \return The list of all the images points in the dot. - This list is updated after a call to track(). - - */ + * Return the list of all the image points inside the dot. + * + * \return The list of all the images points in the dot. + * This list is updated after a call to track(). + */ inline std::list getConnexities() const { return this->ip_connexities_list; }; inline double getGamma() const { return this->gamma; }; - /*! - - Return the precision of the gray level of the dot. It is a double - precision float witch value is in ]0,1]. 1 means full precision, whereas - values close to 0 show a very bad precision. - */ + /*! + * Return the precision of the gray level of the dot. It is a double + * precision float witch value is in ]0,1]. 1 means full precision, whereas + * values close to 0 show a very bad precision. + */ double getGrayLevelPrecision() const { return grayLevelPrecision; } double getMaxDotSize() const { return this->maxDotSizePercentage; } + /*! - Return the mean gray level value of the dot. - */ + * Return the mean gray level value of the dot. + */ double getMeanGrayLevel() const { return (this->mean_gray_level); }; /*! - \return a vpPolygon made from the edges of the dot. - */ + * \return a vpPolygon made from the edges of the dot. + */ vpPolygon getPolygon() const { return (vpPolygon(ip_edges_list)); }; /*! - - Return the width of the dot. - - \sa getHeight() - - */ + * Return the width of the dot. + * + * \sa getHeight() + */ inline unsigned int getWidth() const { return (this->u_max - this->u_min + 1); }; /*! - - Return the width of the dot. - - \sa getHeight() - - */ + * Return the width of the dot. + * + * \sa getHeight() + */ inline unsigned int getHeight() const { return (this->v_max - this->v_min + 1); }; void initTracking(const vpImage &I); @@ -311,30 +304,28 @@ class VISP_EXPORT vpDot : public vpTracker void print(std::ostream &os) { os << *this << std::endl; } /*! - Initialize the dot coordinates with \e ip. - */ + * Initialize the dot coordinates with \e ip. + */ inline void setCog(const vpImagePoint &ip) { this->cog = ip; } /*! - - Activates the dot's moments computation. - - \param activate true, if you want to compute the moments. If false, - moments are not computed. - - Computed moment are vpDot::m00, vpDot::m10, vpDot::m01, vpDot::m11, - vpDot::m20, vpDot::m02 and second order centered moments vpDot::mu11, - vpDot::mu20, vpDot::mu02 computed with respect to the blob centroid. - - The coordinates of the region's centroid (u, v) can be computed from the - moments by \f$u=\frac{m10}{m00}\f$ and \f$v=\frac{m01}{m00}\f$. - - */ + * Activates the dot's moments computation. + * + * \param activate true, if you want to compute the moments. If false, + * moments are not computed. + * + * Computed moment are vpDot::m00, vpDot::m10, vpDot::m01, vpDot::m11, + * vpDot::m20, vpDot::m02 and second order centered moments vpDot::mu11, + * vpDot::mu20, vpDot::mu02 computed with respect to the blob centroid. + * + * The coordinates of the region's centroid (u, v) can be computed from the + * moments by \f$u=\frac{m10}{m00}\f$ and \f$v=\frac{m01}{m00}\f$. + */ void setComputeMoments(bool activate) { compute_moment = activate; } /*! - Set the type of connexity: 4 or 8. - */ + * Set the type of connexity: 4 or 8. + */ void setConnexity(vpConnexityType type) { this->connexityType = type; }; void setMaxDotSize(double percentage); void setGrayLevelMin(const unsigned int &level_min) { this->gray_level_min = level_min; }; @@ -342,25 +333,26 @@ class VISP_EXPORT vpDot : public vpTracker void setGrayLevelPrecision(const double &grayLevelPrecision); /*! - Activates the display of all the pixels of the dot during the tracking. - The default thickness of the overlayed drawings can be modified using - setGraphicsThickness(). + * Activates the display of all the pixels of the dot during the tracking. + * The default thickness of the overlayed drawings can be modified using + * setGraphicsThickness(). + * + * \warning To effectively display the dot graphics a call to + * vpDisplay::flush() is needed. + * + * \param activate true to activate the display of dot pixels, false to turn + * off the display. + * + * \sa setGraphicsThickness() + */ - \warning To effectively display the dot graphics a call to - vpDisplay::flush() is needed. - - \param activate true to activate the display of dot pixels, false to turn - off the display. - - \sa setGraphicsThickness() - */ void setGraphics(bool activate) { graphics = activate; } /*! - Modify the default thickness that is set to 1 of the drawings in overlay - when setGraphics() is enabled. - - \sa setGraphics() - */ + * Modify the default thickness that is set to 1 of the drawings in overlay + * when setGraphics() is enabled. + * + * \sa setGraphics() + */ void setGraphicsThickness(unsigned int t) { this->thickness = t; }; void track(const vpImage &I); @@ -394,8 +386,8 @@ class VISP_EXPORT vpDot : public vpTracker unsigned char gray_level_out; double mean_gray_level; // Mean gray level of the dot - unsigned int gray_level_min; // left threshold for binarisation - unsigned int gray_level_max; // right threshold for binarisation + unsigned int gray_level_min; // left threshold for binarization + unsigned int gray_level_max; // right threshold for binarization double grayLevelPrecision; // precision of the gray level of the dot. // It is a double precision float witch value is in ]0,1]. // 1 means full precision, whereas values close to 0 show a very bad diff --git a/modules/tracker/blob/include/visp3/blob/vpDot2.h b/modules/tracker/blob/include/visp3/blob/vpDot2.h index 1b02a29e06..a89dceba84 100644 --- a/modules/tracker/blob/include/visp3/blob/vpDot2.h +++ b/modules/tracker/blob/include/visp3/blob/vpDot2.h @@ -52,81 +52,80 @@ #include /*! - \class vpDot2 - - \ingroup module_blob - - \brief This tracker is meant to track a blob (connex pixels with same - gray level) on a vpImage. - - The underground algorithm is based on a binarisation of the image - and then on a contour detection using the Freeman chain coding to - determine the blob characteristics (location, moments, size...). - - The binarisation is done using gray level minimum and maximum values - that define the admissible gray levels of the blob. You can specify these - levels by setGrayLevelMin() and setGrayLevelMax(). These levels are also - set automatically by setGrayLevelPrecision(). The algorithm allows - to track white objects on a black background and vice versa. - - When a blob is found, some tests are done to see if it is valid: - - A blob is considered by default as ellipsoid. The found blob could - be rejected if the shape is not ellipsoid. To determine if the shape - is ellipsoid the algorithm consider an inner and outside ellipse. - Sampled points on these two ellipses should have the right gray levels. - Along the inner ellipse the sampled points should have gray levels - that are in the gray level minimum and maximum bounds, while - on the outside ellipse, the gray levels should be out of the gray level - bounds. To set the percentage of the sample points which should have the - right levels use setEllipsoidBadPointsPercentage(). The distance between the - inner ellpsoid and the blob contour, as well the distance between the - blob contour and the outside ellipse is fixed by - setEllipsoidShapePrecision(). If you want to track a non ellipsoid shape, - and turn off this validation test, you have to call - setEllipsoidShapePrecision(0). - - The width, height and surface of the blob are compared to the - corresponding values of the previous blob. If they differ to much - the blob could be rejected. To set the admissible distance you can - use setSizePrecision(). - - Note that track() and searchDotsInArea() are the most important features - of this class. - - - track() estimate the current position of the dot using its previous - position, then try to compute the new parameters of the dot. If everything - went ok, tracking succeeds, otherwise we search this dot in a window - around the last position of the dot. - - - searchDotsInArea() enable to find dots similar to this dot in a window. It - is used when there was a problem performing basic tracking of the dot, but - can also be used to find a certain type of dots in the full image. - - The following sample code available in - tutorial-blob-tracker-live-firewire.cpp shows how to grab images from a - firewire camera, track a blob and display the tracking results. - - \include tutorial-blob-tracker-live-firewire.cpp - A line by line explanation of the previous example is provided in - \ref tutorial-tracking-blob. - - This other example available in tutorial-blob-auto-tracker.cpp shows firstly - how to detect in the first image all the blobs that match some - characteristics in terms of size, area, gray level. Secondly, it shows how - to track all the dots that are detected. - - \include tutorial-blob-auto-tracker.cpp - A line by line explanation of this last example is also provided in - \ref tutorial-tracking-blob, section \ref tracking_blob_tracking. - - \sa vpDot -*/ + * \class vpDot2 + * + * \ingroup module_blob + * + * \brief This tracker is meant to track a blob (connex pixels with same + * gray level) on a vpImage. + * + * The underground algorithm is based on a binarization of the image + * and then on a contour detection using the Freeman chain coding to + * determine the blob characteristics (location, moments, size...). + * + * The binarization is done using gray level minimum and maximum values + * that define the admissible gray levels of the blob. You can specify these + * levels by setGrayLevelMin() and setGrayLevelMax(). These levels are also + * set automatically by setGrayLevelPrecision(). The algorithm allows + * to track white objects on a black background and vice versa. + * + * When a blob is found, some tests are done to see if it is valid: + * - A blob is considered by default as ellipsoid. The found blob could + * be rejected if the shape is not ellipsoid. To determine if the shape + * is ellipsoid the algorithm consider an inner and outside ellipse. + * Sampled points on these two ellipses should have the right gray levels. + * Along the inner ellipse the sampled points should have gray levels + * that are in the gray level minimum and maximum bounds, while + * on the outside ellipse, the gray levels should be out of the gray level + * bounds. To set the percentage of the sample points which should have the + * right levels use setEllipsoidBadPointsPercentage(). The distance between the + * inner ellipsoid and the blob contour, as well the distance between the + * blob contour and the outside ellipse is fixed by + * setEllipsoidShapePrecision(). If you want to track a non ellipsoid shape, + * and turn off this validation test, you have to call + * setEllipsoidShapePrecision(0). + * - The width, height and surface of the blob are compared to the + * corresponding values of the previous blob. If they differ to much + * the blob could be rejected. To set the admissible distance you can + * use setSizePrecision(). + * + * Note that track() and searchDotsInArea() are the most important features + * of this class. + * + * - track() estimate the current position of the dot using its previous + * position, then try to compute the new parameters of the dot. If everything + * went ok, tracking succeeds, otherwise we search this dot in a window + * around the last position of the dot. + * + * - searchDotsInArea() enable to find dots similar to this dot in a window. It + * is used when there was a problem performing basic tracking of the dot, but + * can also be used to find a certain type of dots in the full image. + * + * The following sample code available in + * tutorial-blob-tracker-live-firewire.cpp shows how to grab images from a + * firewire camera, track a blob and display the tracking results. + * + * \include tutorial-blob-tracker-live-firewire.cpp + * A line by line explanation of the previous example is provided in + * \ref tutorial-tracking-blob. + * + * This other example available in tutorial-blob-auto-tracker.cpp shows firstly + * how to detect in the first image all the blobs that match some + * characteristics in terms of size, area, gray level. Secondly, it shows how + * to track all the dots that are detected. + * + * \include tutorial-blob-auto-tracker.cpp + * A line by line explanation of this last example is also provided in + * \ref tutorial-tracking-blob, section \ref tracking_blob_tracking. + * + * \sa vpDot + */ class VISP_EXPORT vpDot2 : public vpTracker { public: vpDot2(); explicit vpDot2(const vpImagePoint &ip); vpDot2(const vpDot2 &twinDot); - virtual ~vpDot2(); static vpMatrix defineDots(vpDot2 dot[], const unsigned int &n, const std::string &dotFile, vpImage &I, vpColor col = vpColor::blue, bool trackDot = true); @@ -134,14 +133,14 @@ class VISP_EXPORT vpDot2 : public vpTracker void display(const vpImage &I, vpColor color = vpColor::red, unsigned int thickness = 1) const; /*! - Gets the second order normalized centered moment \f$ n_{ij} \f$ - as a 3-dim vector containing \f$ n_{20}, n_{11}, n_{02} \f$ - such as \f$ n_{ij} = \mu_{ij}/m_{00} \f$ - - \return The 3-dim vector containing \f$ n_{20}, n_{11}, n_{02} \f$. - - \sa getCog(), getArea() - */ + * Gets the second order normalized centered moment \f$ n_{ij} \f$ + * as a 3-dim vector containing \f$ n_{20}, n_{11}, n_{02} \f$ + * such as \f$ n_{ij} = \mu_{ij}/m_{00} \f$ + * + * \return The 3-dim vector containing \f$ n_{20}, n_{11}, n_{02} \f$. + * + * \sa getCog(), getArea() + */ inline vpColVector get_nij() const { vpColVector nij(3); @@ -153,13 +152,12 @@ class VISP_EXPORT vpDot2 : public vpTracker } double getArea() const; - /*! - - Return the dot bounding box. - \sa getWidth(), getHeight() - - */ + /*! + * Return the dot bounding box. + * + * \sa getWidth(), getHeight() + */ inline vpRect getBBox() const { vpRect bbox; @@ -169,40 +167,39 @@ class VISP_EXPORT vpDot2 : public vpTracker return (bbox); }; - /*! - Return the location of the dot center of gravity. - \return The coordinates of the center of gravity. - */ + /*! + * Return the location of the dot center of gravity. + * + * \return The coordinates of the center of gravity. + */ inline vpImagePoint getCog() const { return cog; } double getDistance(const vpDot2 &distantDot) const; /*! - - Return the list of all the image points on the dot - border. - - \param edges_list : The list of all the images points on the dot - border. This list is update after a call to track(). - - */ + * Return the list of all the image points on the dot + * border. + * + * \param edges_list : The list of all the images points on the dot + * border. This list is update after a call to track(). + */ void getEdges(std::list &edges_list) const { edges_list = this->ip_edges_list; }; - /*! - - Return the list of all the image points on the dot - border. - - \return The list of all the images points on the dot - border. This list is update after a call to track(). - */ - std::list getEdges() const { return (this->ip_edges_list); }; /*! - Get the percentage of sampled points that are considered non conform - in terms of the gray level on the inner and the ouside ellipses. + * Return the list of all the image points on the dot + * border. + * + * \return The list of all the images points on the dot + * border. This list is update after a call to track(). + */ + std::list getEdges() const { return (this->ip_edges_list); }; - \sa setEllipsoidBadPointsPercentage() - */ + /*! + * Get the percentage of sampled points that are considered non conform + * in terms of the gray level on the inner and the outside ellipses. + * + * \sa setEllipsoidBadPointsPercentage() + */ double getEllipsoidBadPointsPercentage() const { return allowedBadPointsPercentage_; } double getEllipsoidShapePrecision() const; @@ -210,28 +207,30 @@ class VISP_EXPORT vpDot2 : public vpTracker inline double getGamma() const { return this->gamma; }; /*! - Return the color level of pixels inside the dot. - - \sa getGrayLevelMax() - */ + * Return the color level of pixels inside the dot. + * + * \sa getGrayLevelMax() + */ inline unsigned int getGrayLevelMin() const { return gray_level_min; }; /*! - Return the color level of pixels inside the dot. - - \sa getGrayLevelMin() - */ + * Return the color level of pixels inside the dot. + * + * \sa getGrayLevelMin() + */ inline unsigned int getGrayLevelMax() const { return gray_level_max; }; double getGrayLevelPrecision() const; double getHeight() const; double getMaxSizeSearchDistancePrecision() const; + /*! - \return The mean gray level value of the dot. - */ + * \return The mean gray level value of the dot. + */ double getMeanGrayLevel() const { return (this->mean_gray_level); }; + /*! - \return a vpPolygon made from the edges of the dot. - */ + * \return a vpPolygon made from the edges of the dot. + */ vpPolygon getPolygon() const { return (vpPolygon(ip_edges_list)); }; double getSizePrecision() const; double getWidth() const; @@ -252,38 +251,37 @@ class VISP_EXPORT vpDot2 : public vpTracker void setArea(const double &area); /*! - Initialize the dot coordinates with \e ip. - */ + * Initialize the dot coordinates with \e ip. + */ inline void setCog(const vpImagePoint &ip) { this->cog = ip; } - /*! - - Activates the dot's moments computation. - - \param activate true, if you want to compute the moments. If false, - moments are not computed. - - Computed moment are vpDot::m00, vpDot::m10, vpDot::m01, vpDot::m11, - vpDot::m20, vpDot::m02 and second order centereed moments vpDot::mu11, - vpDot::mu20, vpDot::mu02 computed with respect to the blob centroid. - - The coordinates of the region's centroid (u, v) can be computed from the - moments by \f$u=\frac{m10}{m00}\f$ and \f$v=\frac{m01}{m00}\f$. - */ + /*! + * Activates the dot's moments computation. + * + * \param activate true, if you want to compute the moments. If false, + * moments are not computed. + * + * Computed moment are vpDot::m00, vpDot::m10, vpDot::m01, vpDot::m11, + * vpDot::m20, vpDot::m02 and second order centered moments vpDot::mu11, + * vpDot::mu20, vpDot::mu02 computed with respect to the blob centroid. + * + * The coordinates of the region's centroid (u, v) can be computed from the + * moments by \f$u=\frac{m10}{m00}\f$ and \f$v=\frac{m01}{m00}\f$. + */ void setComputeMoments(bool activate) { compute_moment = activate; } /*! - Set the percentage of sampled points that are considered non conform - in terms of the gray level on the inner and the ouside ellipses. - Points located on the inner ellipse should have the same gray level - than the blob, while points located on the outside ellipse should - have a different gray level. - - \param percentage : Percentage of points sampled with bad gray level - on the inner and outside ellipses that are admissible. 0 means - that all the points should have a right level, while a value of 1 - means that all the points can have a bad gray level. - */ + * Set the percentage of sampled points that are considered non conform + * in terms of the gray level on the inner and the outside ellipses. + * Points located on the inner ellipse should have the same gray level + * than the blob, while points located on the outside ellipse should + * have a different gray level. + * + * \param percentage : Percentage of points sampled with bad gray level + * on the inner and outside ellipses that are admissible. 0 means + * that all the points should have a right level, while a value of 1 + * means that all the points can have a bad gray level. + */ void setEllipsoidBadPointsPercentage(const double &percentage = 0.0) { if (percentage < 0.) @@ -295,39 +293,40 @@ class VISP_EXPORT vpDot2 : public vpTracker } void setEllipsoidShapePrecision(const double &ellipsoidShapePrecision); - /*! - Activates the display of the border of the dot during the tracking. - The default thickness of the overlayed drawings can be modified using - setGraphicsThickness(). - - \warning To effectively display the dot graphics a call to - vpDisplay::flush() is needed. - - \param activate If true, the border of the dot will be painted. false to - turn off border painting. - \sa setGraphicsThickness() - */ - void setGraphics(bool activate) { graphics = activate; } /*! - Modify the default thickness that is set to 1 of the drawings in overlay - when setGraphics() is enabled. + * Activates the display of the border of the dot during the tracking. + * The default thickness of the overlayed drawings can be modified using + * setGraphicsThickness(). + * + * \warning To effectively display the dot graphics a call to + * vpDisplay::flush() is needed. + * + * \param activate If true, the border of the dot will be painted. false to + * turn off border painting. + * + * \sa setGraphicsThickness() + */ + void setGraphics(bool activate) { graphics = activate; } - \sa setGraphics() - */ - void setGraphicsThickness(unsigned int t) { this->thickness = t; }; /*! + * Modify the default thickness that is set to 1 of the drawings in overlay + * when setGraphics() is enabled. + * + * \sa setGraphics() + */ + void setGraphicsThickness(unsigned int t) { this->thickness = t; }; - Set the color level of the dot to search a dot in a region of interest. This - level will be used to know if a pixel in the image belongs to the dot or - not. Only pixels with higher level can belong to the dot. If the level is - lower than the minimum level for a dot, set the level to MIN_IN_LEVEL. - - \param min : Color level of a dot to search in a region of interest. - - \sa setGrayLevelMax(), setGrayLevelPrecision() - - */ + /*! + * Set the color level of the dot to search a dot in a region of interest. This + * level will be used to know if a pixel in the image belongs to the dot or + * not. Only pixels with higher level can belong to the dot. If the level is + * lower than the minimum level for a dot, set the level to MIN_IN_LEVEL. + * + * \param min : Color level of a dot to search in a region of interest. + * + * \sa setGrayLevelMax(), setGrayLevelPrecision() + */ inline void setGrayLevelMin(const unsigned int &min) { if (min > 255) @@ -337,14 +336,13 @@ class VISP_EXPORT vpDot2 : public vpTracker }; /*! - - Set the color level of pixels surrounding the dot. This is meant to be used - to search a dot in a region of interest. - - \param max : Intensity level of a dot to search in a region of interest. - - \sa setGrayLevelMin(), setGrayLevelPrecision() - */ + * Set the color level of pixels surrounding the dot. This is meant to be used + * to search a dot in a region of interest. + * + * \param max : Intensity level of a dot to search in a region of interest. + * + * \sa setGrayLevelMin(), setGrayLevelPrecision() + */ inline void setGrayLevelMax(const unsigned int &max) { if (max > 255) @@ -352,6 +350,7 @@ class VISP_EXPORT vpDot2 : public vpTracker else this->gray_level_max = max; }; + void setGrayLevelPrecision(const double &grayLevelPrecision); void setHeight(const double &height); void setMaxSizeSearchDistancePrecision(const double &maxSizeSearchDistancePrecision); @@ -451,22 +450,19 @@ class VISP_EXPORT vpDot2 : public vpTracker void computeMeanGrayLevel(const vpImage &I); /*! - - Get the starting point on a dot border. The dot border is - computed from this point. - - \sa getFirstBorder_v() - - */ + Get the starting point on a dot border. The dot border is + computed from this point. + * + \sa getFirstBorder_v() + */ unsigned int getFirstBorder_u() const { return this->firstBorder_u; } - /*! - Get the starting point on a dot border. The dot border is - computed from this point. - - \sa getFirstBorder_u() - - */ + /*! + Get the starting point on a dot border. The dot border is + computed from this point. + * + \sa getFirstBorder_u() + */ unsigned int getFirstBorder_v() const { return this->firstBorder_v; } bool computeFreemanChainElement(const vpImage &I, const unsigned int &u, const unsigned int &v, @@ -522,7 +518,7 @@ class VISP_EXPORT vpDot2 : public vpTracker // Bounding box int bbox_u_min, bbox_u_max, bbox_v_min, bbox_v_max; - // The first point coodinate on the dot border + // The first point coordinate on the dot border unsigned int firstBorder_u; unsigned int firstBorder_v; diff --git a/modules/tracker/blob/src/dots/vpDot2.cpp b/modules/tracker/blob/src/dots/vpDot2.cpp index 4c2d5607be..f3453957db 100644 --- a/modules/tracker/blob/src/dots/vpDot2.cpp +++ b/modules/tracker/blob/src/dots/vpDot2.cpp @@ -102,12 +102,11 @@ void vpDot2::init() */ vpDot2::vpDot2() : m00(0.), m10(0.), m01(0.), m11(0.), m20(0.), m02(0.), mu11(0.), mu20(0.), mu02(0.), cog(), width(0), height(0), - surface(0), gray_level_min(128), gray_level_max(255), mean_gray_level(0), grayLevelPrecision(0.8), gamma(1.5), - sizePrecision(0.65), ellipsoidShapePrecision(0.65), maxSizeSearchDistancePrecision(0.65), - allowedBadPointsPercentage_(0.), area(), direction_list(), ip_edges_list(), compute_moment(false), graphics(false), - thickness(1), bbox_u_min(0), bbox_u_max(0), bbox_v_min(0), bbox_v_max(0), firstBorder_u(0), firstBorder_v() -{ -} + surface(0), gray_level_min(128), gray_level_max(255), mean_gray_level(0), grayLevelPrecision(0.8), gamma(1.5), + sizePrecision(0.65), ellipsoidShapePrecision(0.65), maxSizeSearchDistancePrecision(0.65), + allowedBadPointsPercentage_(0.), area(), direction_list(), ip_edges_list(), compute_moment(false), graphics(false), + thickness(1), bbox_u_min(0), bbox_u_max(0), bbox_v_min(0), bbox_v_max(0), firstBorder_u(0), firstBorder_v() +{ } /*! @@ -119,23 +118,22 @@ vpDot2::vpDot2() */ vpDot2::vpDot2(const vpImagePoint &ip) : m00(0.), m10(0.), m01(0.), m11(0.), m20(0.), m02(0.), mu11(0.), mu20(0.), mu02(0.), cog(ip), width(0), height(0), - surface(0), gray_level_min(128), gray_level_max(255), mean_gray_level(0), grayLevelPrecision(0.8), gamma(1.5), - sizePrecision(0.65), ellipsoidShapePrecision(0.65), maxSizeSearchDistancePrecision(0.65), - allowedBadPointsPercentage_(0.), area(), direction_list(), ip_edges_list(), compute_moment(false), graphics(false), - thickness(1), bbox_u_min(0), bbox_u_max(0), bbox_v_min(0), bbox_v_max(0), firstBorder_u(0), firstBorder_v() -{ -} + surface(0), gray_level_min(128), gray_level_max(255), mean_gray_level(0), grayLevelPrecision(0.8), gamma(1.5), + sizePrecision(0.65), ellipsoidShapePrecision(0.65), maxSizeSearchDistancePrecision(0.65), + allowedBadPointsPercentage_(0.), area(), direction_list(), ip_edges_list(), compute_moment(false), graphics(false), + thickness(1), bbox_u_min(0), bbox_u_max(0), bbox_v_min(0), bbox_v_max(0), firstBorder_u(0), firstBorder_v() +{ } /*! Copy constructor. */ vpDot2::vpDot2(const vpDot2 &twinDot) : vpTracker(twinDot), m00(0.), m10(0.), m01(0.), m11(0.), m20(0.), m02(0.), mu11(0.), mu20(0.), mu02(0.), cog(), - width(0), height(0), surface(0), gray_level_min(128), gray_level_max(255), mean_gray_level(0), - grayLevelPrecision(0.8), gamma(1.5), sizePrecision(0.65), ellipsoidShapePrecision(0.65), - maxSizeSearchDistancePrecision(0.65), allowedBadPointsPercentage_(0.), area(), direction_list(), ip_edges_list(), - compute_moment(false), graphics(false), thickness(1), bbox_u_min(0), bbox_u_max(0), bbox_v_min(0), bbox_v_max(0), - firstBorder_u(0), firstBorder_v() + width(0), height(0), surface(0), gray_level_min(128), gray_level_max(255), mean_gray_level(0), + grayLevelPrecision(0.8), gamma(1.5), sizePrecision(0.65), ellipsoidShapePrecision(0.65), + maxSizeSearchDistancePrecision(0.65), allowedBadPointsPercentage_(0.), area(), direction_list(), ip_edges_list(), + compute_moment(false), graphics(false), thickness(1), bbox_u_min(0), bbox_u_max(0), bbox_v_min(0), bbox_v_max(0), + firstBorder_u(0), firstBorder_v() { *this = twinDot; } @@ -191,11 +189,6 @@ vpDot2 &vpDot2::operator=(const vpDot2 &twinDot) return (*this); } -/*! - Destructor... do nothing for the moment. -*/ -vpDot2::~vpDot2() {} - /****************************************************************************** * * PUBLIC METHODS @@ -261,7 +254,8 @@ void vpDot2::initTracking(const vpImage &I, unsigned int size) if (Ip - (1 - grayLevelPrecision) < 0) { gray_level_min = 0; - } else { + } + else { gray_level_min = (unsigned int)(255 * pow(Ip - (1 - grayLevelPrecision), gamma)); if (gray_level_min > 255) gray_level_min = 255; @@ -275,8 +269,9 @@ void vpDot2::initTracking(const vpImage &I, unsigned int size) try { track(I); - } catch (const vpException &e) { - // vpERROR_TRACE("Error caught") ; + } + catch (const vpException &e) { + // vpERROR_TRACE("Error caught") ; throw(e); } } @@ -319,7 +314,8 @@ void vpDot2::initTracking(const vpImage &I, const vpImagePoint &i if (Ip - (1 - grayLevelPrecision) < 0) { gray_level_min = 0; - } else { + } + else { gray_level_min = (unsigned int)(255 * pow(Ip - (1 - grayLevelPrecision), gamma)); if (gray_level_min > 255) gray_level_min = 255; @@ -333,8 +329,9 @@ void vpDot2::initTracking(const vpImage &I, const vpImagePoint &i try { track(I); - } catch (const vpException &e) { - // vpERROR_TRACE("Error caught") ; + } + catch (const vpException &e) { + // vpERROR_TRACE("Error caught") ; throw(e); } } @@ -391,8 +388,9 @@ void vpDot2::initTracking(const vpImage &I, const vpImagePoint &i try { track(I); - } catch (const vpException &e) { - // vpERROR_TRACE("Error caught") ; + } + catch (const vpException &e) { + // vpERROR_TRACE("Error caught") ; throw(e); } } @@ -486,10 +484,12 @@ void vpDot2::track(const vpImage &I, bool canMakeTheWindowGrow) std::fabs(getHeight()) <= std::numeric_limits::epsilon()) { searchWindowWidth = 80.; searchWindowHeight = 80.; - } else if (canMakeTheWindowGrow) { + } + else if (canMakeTheWindowGrow) { searchWindowWidth = getWidth() * 5; searchWindowHeight = getHeight() * 5; - } else { + } + else { searchWindowWidth = getWidth(); searchWindowHeight = getHeight(); } @@ -547,7 +547,8 @@ void vpDot2::track(const vpImage &I, bool canMakeTheWindowGrow) // getMeanGrayLevel(I); if (Ip - (1 - grayLevelPrecision) < 0) { gray_level_min = 0; - } else { + } + else { gray_level_min = (unsigned int)(255 * pow(Ip - (1 - grayLevelPrecision), gamma)); if (gray_level_min > 255) gray_level_min = 255; @@ -719,9 +720,11 @@ void vpDot2::setGrayLevelPrecision(const double &precision) double epsilon = 0.05; if (grayLevelPrecision < epsilon) { this->grayLevelPrecision = epsilon; - } else if (grayLevelPrecision > 1) { + } + else if (grayLevelPrecision > 1) { this->grayLevelPrecision = 1.0; - } else { + } + else { this->grayLevelPrecision = precision; } } @@ -746,9 +749,11 @@ void vpDot2::setSizePrecision(const double &precision) { if (sizePrecision < 0) { this->sizePrecision = 0; - } else if (sizePrecision > 1) { + } + else if (sizePrecision > 1) { this->sizePrecision = 1.0; - } else { + } + else { this->sizePrecision = precision; } } @@ -790,9 +795,11 @@ void vpDot2::setEllipsoidShapePrecision(const double &precision) if (ellipsoidShapePrecision < 0) { this->ellipsoidShapePrecision = 0; - } else if (ellipsoidShapePrecision > 1) { + } + else if (ellipsoidShapePrecision > 1) { this->ellipsoidShapePrecision = 1.0; - } else { + } + else { this->ellipsoidShapePrecision = precision; } } @@ -817,9 +824,11 @@ void vpDot2::setMaxSizeSearchDistancePrecision(const double &precision) double epsilon = 0.05; if (maxSizeSearchDistancePrecision < epsilon) { this->maxSizeSearchDistancePrecision = epsilon; - } else if (maxSizeSearchDistancePrecision > 1) { + } + else if (maxSizeSearchDistancePrecision > 1) { this->maxSizeSearchDistancePrecision = 1.0; - } else { + } + else { this->maxSizeSearchDistancePrecision = precision; } } @@ -1062,10 +1071,10 @@ void vpDot2::searchDotsInArea(const vpImage &I, int area_u, int a // if( border_u == cogBadDot.get_u() && v == cogBadDot.get_v()) { if ((std::fabs(border_u - cogBadDot.get_u()) <= vpMath::maximum(std::fabs((double)border_u), std::fabs(cogBadDot.get_u())) * - std::numeric_limits::epsilon()) && + std::numeric_limits::epsilon()) && (std::fabs(v - cogBadDot.get_v()) <= vpMath::maximum(std::fabs((double)v), std::fabs(cogBadDot.get_v())) * - std::numeric_limits::epsilon())) { + std::numeric_limits::epsilon())) { good_germ = false; } ++it_edges; @@ -1179,8 +1188,9 @@ void vpDot2::searchDotsInArea(const vpImage &I, int area_u, int a if (itnice == niceDots.end() && stopLoop == false) { niceDots.push_back(*dotToTest); } - } else { - // Store bad dots + } + else { + // Store bad dots badDotsVector.push_front(*dotToTest); } } @@ -1231,10 +1241,10 @@ bool vpDot2::isValid(const vpImage &I, const vpDot2 &wantedDot) #ifdef DEBUG std::cout << "test size precision......................\n"; std::cout << "wanted dot: " - << "w=" << wantedDot.getWidth() << " h=" << wantedDot.getHeight() << " s=" << wantedDot.getArea() - << " precision=" << size_precision << " epsilon=" << epsilon << std::endl; + << "w=" << wantedDot.getWidth() << " h=" << wantedDot.getHeight() << " s=" << wantedDot.getArea() + << " precision=" << size_precision << " epsilon=" << epsilon << std::endl; std::cout << "dot found: " - << "w=" << getWidth() << " h=" << getHeight() << " s=" << getArea() << std::endl; + << "w=" << getWidth() << " h=" << getHeight() << " s=" << getArea() << std::endl; #endif if ((wantedDot.getWidth() * size_precision - epsilon < getWidth()) == false) { @@ -1440,7 +1450,8 @@ bool vpDot2::hasGoodLevel(const vpImage &I, const unsigned int &u if (I[v][u] >= gray_level_min && I[v][u] <= gray_level_max) { return true; - } else { + } + else { return false; } } @@ -1465,7 +1476,8 @@ bool vpDot2::hasReverseLevel(const vpImage &I, const unsigned int if (I[v][u] < gray_level_min || I[v][u] > gray_level_max) { return true; - } else { + } + else { return false; } } @@ -1702,7 +1714,8 @@ bool vpDot2::computeParameters(const vpImage &I, const double &_u std::fabs(m00 - 1.) <= vpMath::maximum(std::fabs(m00), 1.) * std::numeric_limits::epsilon()) { vpDEBUG_TRACE(3, "The center of gravity of the dot wasn't properly detected"); return false; - } else // compute the center + } + else // compute the center { // this magic formula gives the coordinates of the center of gravity double tmpCenter_u = m10 / m00; @@ -1808,58 +1821,66 @@ bool vpDot2::computeFreemanChainElement(const vpImage &I, const u updateFreemanPosition(_u, _v, (element + 2) % 8); if (hasGoodLevel(I, _u, _v)) { element = (element + 2) % 8; // turn right - } else { + } + else { unsigned int _u1 = u; unsigned int _v1 = v; updateFreemanPosition(_u1, _v1, (element + 1) % 8); if (hasGoodLevel(I, _u1, _v1)) { element = (element + 1) % 8; // turn diag right - } else { + } + else { unsigned int _u2 = u; unsigned int _v2 = v; updateFreemanPosition(_u2, _v2, element); // same direction if (hasGoodLevel(I, _u2, _v2)) { // element = element; // keep same dir - } else { + } + else { unsigned int _u3 = u; unsigned int _v3 = v; updateFreemanPosition(_u3, _v3, (element + 7) % 8); // diag left if (hasGoodLevel(I, _u3, _v3)) { element = (element + 7) % 8; // turn diag left - } else { + } + else { unsigned int _u4 = u; unsigned int _v4 = v; updateFreemanPosition(_u4, _v4, (element + 6) % 8); // left if (hasGoodLevel(I, _u4, _v4)) { element = (element + 6) % 8; // turn left - } else { + } + else { unsigned int _u5 = u; unsigned int _v5 = v; updateFreemanPosition(_u5, _v5, (element + 5) % 8); // left if (hasGoodLevel(I, _u5, _v5)) { element = (element + 5) % 8; // turn diag down - } else { + } + else { unsigned int _u6 = u; unsigned int _v6 = v; updateFreemanPosition(_u6, _v6, (element + 4) % 8); // left if (hasGoodLevel(I, _u6, _v6)) { element = (element + 4) % 8; // turn down - } else { + } + else { unsigned int _u7 = u; unsigned int _v7 = v; updateFreemanPosition(_u7, _v7, (element + 3) % 8); // diag if (hasGoodLevel(I, _u7, _v7)) { element = (element + 3) % 8; // turn diag right down - } else { - // No neighbor with a good level - // + } + else { + // No neighbor with a good level + // return false; } } @@ -2208,14 +2229,16 @@ void vpDot2::computeMeanGrayLevel(const vpImage &I) if (nb_pixels < 10) { // could be good to choose the min nb points from area of dot // add diagonals points to have enough point int imin, imax; - if ((cog_u - bbox_u_min) > (cog_v - bbox_v_min)) { + if ((cog_u - bbox_u_min) >(cog_v - bbox_v_min)) { imin = cog_v - bbox_v_min; - } else { + } + else { imin = cog_u - bbox_u_min; } if ((bbox_u_max - cog_u) > (bbox_v_max - cog_v)) { imax = bbox_v_max - cog_v; - } else { + } + else { imax = bbox_u_max - cog_u; } for (int i = -imin; i <= imax; i++) { @@ -2228,12 +2251,14 @@ void vpDot2::computeMeanGrayLevel(const vpImage &I) if ((cog_u - bbox_u_min) > (bbox_v_max - cog_v)) { imin = bbox_v_max - cog_v; - } else { + } + else { imin = cog_u - bbox_u_min; } if ((bbox_u_max - cog_u) > (cog_v - bbox_v_min)) { imax = cog_v - bbox_v_min; - } else { + } + else { imax = bbox_u_max - cog_u; } @@ -2249,7 +2274,8 @@ void vpDot2::computeMeanGrayLevel(const vpImage &I) if (nb_pixels == 0) { // should never happen throw(vpTrackingException(vpTrackingException::notEnoughPointError, "No point was found")); - } else { + } + else { mean_gray_level = sum_value / nb_pixels; } } @@ -2303,7 +2329,8 @@ vpMatrix vpDot2::defineDots(vpDot2 dot[], const unsigned int &n, const std::stri vpDisplay::displayCross(I, cog, 10, col); } } - } catch (...) { + } + catch (...) { std::cout << "Cannot track dots from file" << std::endl; fromFile = false; } @@ -2331,7 +2358,8 @@ vpMatrix vpDot2::defineDots(vpDot2 dot[], const unsigned int &n, const std::stri dot[i].setGraphics(true); dot[i].initTracking(I); cog = dot[i].getCog(); - } else { + } + else { vpDisplay::getClick(I, cog); dot[i].setCog(cog); } diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbDepthDenseTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbDepthDenseTracker.h index b804b965f9..b42f849550 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbDepthDenseTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbDepthDenseTracker.h @@ -49,26 +49,26 @@ class VISP_EXPORT vpMbDepthDenseTracker : public virtual vpMbTracker { public: vpMbDepthDenseTracker(); - virtual ~vpMbDepthDenseTracker(); + virtual ~vpMbDepthDenseTracker() override; virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; - virtual inline vpColVector getError() const { return m_error_depthDense; } + virtual inline vpColVector getError() const override { return m_error_depthDense; } virtual std::vector > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - bool displayFullModel = false); + bool displayFullModel = false) override; - virtual inline vpColVector getRobustWeights() const { return m_w_depthDense; } + virtual inline vpColVector getRobustWeights() const override { return m_w_depthDense; } - virtual void init(const vpImage &I); + virtual void init(const vpImage &I) override; - virtual void loadConfigFile(const std::string &configFile, bool verbose = true); + virtual void loadConfigFile(const std::string &configFile, bool verbose = true) override; void reInitModel(const vpImage &I, const std::string &cad_name, const vpHomogeneousMatrix &cMo, bool verbose = false); @@ -77,9 +77,9 @@ class VISP_EXPORT vpMbDepthDenseTracker : public virtual vpMbTracker const vpHomogeneousMatrix &cMo, bool verbose = false); #endif - virtual void resetTracker(); + virtual void resetTracker() override; - virtual void setCameraParameters(const vpCameraParameters &camera); + virtual void setCameraParameters(const vpCameraParameters &camera) override; virtual void setDepthDenseFilteringMaxDistance(double maxDistance); virtual void setDepthDenseFilteringMethod(int method); @@ -97,22 +97,22 @@ class VISP_EXPORT vpMbDepthDenseTracker : public virtual vpMbTracker m_depthDenseSamplingStepY = stepY; } - virtual void setOgreVisibilityTest(const bool &v); + virtual void setOgreVisibilityTest(const bool &v) override; - virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo); - virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo); + virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo) override; + virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo) override; #ifdef VISP_HAVE_PCL virtual void setPose(const pcl::PointCloud::ConstPtr &point_cloud, const vpHomogeneousMatrix &cdMo); #endif - virtual void setScanLineVisibilityTest(const bool &v); + virtual void setScanLineVisibilityTest(const bool &v) override; void setUseDepthDenseTracking(const std::string &name, const bool &useDepthDenseTracking); - virtual void testTracking(); + virtual void testTracking() override; - virtual void track(const vpImage &); - virtual void track(const vpImage &); + virtual void track(const vpImage &) override; + virtual void track(const vpImage &) override; #ifdef VISP_HAVE_PCL virtual void track(const pcl::PointCloud::ConstPtr &point_cloud); #endif @@ -151,20 +151,20 @@ class VISP_EXPORT vpMbDepthDenseTracker : public virtual vpMbTracker void computeVisibility(unsigned int width, unsigned int height); void computeVVS(); - virtual void computeVVSInit(); - virtual void computeVVSInteractionMatrixAndResidu(); + virtual void computeVVSInit() override; + virtual void computeVVSInteractionMatrixAndResidu() override; virtual void computeVVSWeights(); using vpMbTracker::computeVVSWeights; virtual void initCircle(const vpPoint &p1, const vpPoint &p2, const vpPoint &p3, double radius, int idFace = 0, - const std::string &name = ""); + const std::string &name = "") override; virtual void initCylinder(const vpPoint &p1, const vpPoint &p2, double radius, int idFace = 0, - const std::string &name = ""); + const std::string &name = "") override; - virtual void initFaceFromCorners(vpMbtPolygon &polygon); + virtual void initFaceFromCorners(vpMbtPolygon &polygon) override; - virtual void initFaceFromLines(vpMbtPolygon &polygon); + virtual void initFaceFromLines(vpMbtPolygon &polygon) override; #ifdef VISP_HAVE_PCL void segmentPointCloud(const pcl::PointCloud::ConstPtr &point_cloud); diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbDepthNormalTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbDepthNormalTracker.h index 290d51bf9d..72678f823e 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbDepthNormalTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbDepthNormalTracker.h @@ -49,31 +49,31 @@ class VISP_EXPORT vpMbDepthNormalTracker : public virtual vpMbTracker { public: vpMbDepthNormalTracker(); - virtual ~vpMbDepthNormalTracker(); + virtual ~vpMbDepthNormalTracker() override; virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; virtual inline vpMbtFaceDepthNormal::vpFeatureEstimationType getDepthFeatureEstimationMethod() const { return m_depthNormalFeatureEstimationMethod; } - virtual inline vpColVector getError() const { return m_error_depthNormal; } + virtual inline vpColVector getError() const override { return m_error_depthNormal; } virtual std::vector > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - bool displayFullModel = false); + bool displayFullModel = false) override; - virtual inline vpColVector getRobustWeights() const { return m_w_depthNormal; } + virtual inline vpColVector getRobustWeights() const override { return m_w_depthNormal; } - virtual void init(const vpImage &I); + virtual void init(const vpImage &I) override; - virtual void loadConfigFile(const std::string &configFile, bool verbose = true); + virtual void loadConfigFile(const std::string &configFile, bool verbose = true) override; void reInitModel(const vpImage &I, const std::string &cad_name, const vpHomogeneousMatrix &cMo, bool verbose = false); @@ -82,9 +82,9 @@ class VISP_EXPORT vpMbDepthNormalTracker : public virtual vpMbTracker const vpHomogeneousMatrix &cMo, bool verbose = false); #endif - virtual void resetTracker(); + virtual void resetTracker() override; - virtual void setCameraParameters(const vpCameraParameters &camera); + virtual void setCameraParameters(const vpCameraParameters &camera) override; virtual void setDepthNormalFaceCentroidMethod(const vpMbtFaceDepthNormal::vpFaceCentroidType &method); @@ -100,22 +100,22 @@ class VISP_EXPORT vpMbDepthNormalTracker : public virtual vpMbTracker // virtual void setDepthNormalUseRobust(bool use); - virtual void setOgreVisibilityTest(const bool &v); + virtual void setOgreVisibilityTest(const bool &v) override; - virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo); - virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo); + virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo) override; + virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo) override; #if defined(VISP_HAVE_PCL) virtual void setPose(const pcl::PointCloud::ConstPtr &point_cloud, const vpHomogeneousMatrix &cdMo); #endif - virtual void setScanLineVisibilityTest(const bool &v); + virtual void setScanLineVisibilityTest(const bool &v) override; void setUseDepthNormalTracking(const std::string &name, const bool &useDepthNormalTracking); - virtual void testTracking(); + virtual void testTracking() override; - virtual void track(const vpImage &); - virtual void track(const vpImage &I_color); + virtual void track(const vpImage &) override; + virtual void track(const vpImage &I_color) override; #if defined(VISP_HAVE_PCL) virtual void track(const pcl::PointCloud::ConstPtr &point_cloud); #endif @@ -166,20 +166,20 @@ class VISP_EXPORT vpMbDepthNormalTracker : public virtual vpMbTracker void computeVisibility(unsigned int width, unsigned int height); void computeVVS(); - virtual void computeVVSInit(); - virtual void computeVVSInteractionMatrixAndResidu(); + virtual void computeVVSInit() override; + virtual void computeVVSInteractionMatrixAndResidu() override; virtual std::vector > getFeaturesForDisplayDepthNormal(); virtual void initCircle(const vpPoint &p1, const vpPoint &p2, const vpPoint &p3, double radius, int idFace = 0, - const std::string &name = ""); + const std::string &name = "") override; virtual void initCylinder(const vpPoint &p1, const vpPoint &p2, double radius, int idFace = 0, - const std::string &name = ""); + const std::string &name = "") override; - virtual void initFaceFromCorners(vpMbtPolygon &polygon); + virtual void initFaceFromCorners(vpMbtPolygon &polygon) override; - virtual void initFaceFromLines(vpMbtPolygon &polygon); + virtual void initFaceFromLines(vpMbtPolygon &polygon) override; #ifdef VISP_HAVE_PCL void segmentPointCloud(const pcl::PointCloud::ConstPtr &point_cloud); diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeKltTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeKltTracker.h index 68ef176a30..73152356c9 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeKltTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeKltTracker.h @@ -232,18 +232,18 @@ class VISP_EXPORT vpMbEdgeKltTracker : virtual ~vpMbEdgeKltTracker(); virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; - virtual inline vpColVector getError() const { return m_error_hybrid; } + virtual inline vpColVector getError() const override { return m_error_hybrid; } virtual std::vector > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - bool displayFullModel = false); + bool displayFullModel = false) override; - virtual inline vpColVector getRobustWeights() const { return m_w_hybrid; } + virtual inline vpColVector getRobustWeights() const override { return m_w_hybrid; } /*! Get the near distance for clipping. @@ -252,13 +252,13 @@ class VISP_EXPORT vpMbEdgeKltTracker : */ virtual inline double getNearClippingDistance() const { return vpMbKltTracker::getNearClippingDistance(); } - virtual void loadConfigFile(const std::string &configFile, bool verbose = true); + virtual void loadConfigFile(const std::string &configFile, bool verbose = true) override; void reInitModel(const vpImage &I, const std::string &cad_name, const vpHomogeneousMatrix &cMo, - bool verbose = false, const vpHomogeneousMatrix &T = vpHomogeneousMatrix()); - void resetTracker(); + bool verbose = false, const vpHomogeneousMatrix &T = vpHomogeneousMatrix()) override; + void resetTracker() override; - virtual void setCameraParameters(const vpCameraParameters &cam); + virtual void setCameraParameters(const vpCameraParameters &cam) override; /*! Specify which clipping to use. @@ -267,21 +267,21 @@ class VISP_EXPORT vpMbEdgeKltTracker : \param flags : New clipping flags. */ - virtual void setClipping(const unsigned int &flags) { vpMbEdgeTracker::setClipping(flags); } + virtual void setClipping(const unsigned int &flags) override { vpMbEdgeTracker::setClipping(flags); } /*! Set the far distance for clipping. \param dist : Far clipping value. */ - virtual void setFarClippingDistance(const double &dist) { vpMbEdgeTracker::setFarClippingDistance(dist); } + virtual void setFarClippingDistance(const double &dist) override { vpMbEdgeTracker::setFarClippingDistance(dist); } /*! Set the near distance for clipping. \param dist : Near clipping value. */ - virtual void setNearClippingDistance(const double &dist) { vpMbEdgeTracker::setNearClippingDistance(dist); } + virtual void setNearClippingDistance(const double &dist) override { vpMbEdgeTracker::setNearClippingDistance(dist); } /*! Use Ogre3D for visibility tests @@ -291,7 +291,7 @@ class VISP_EXPORT vpMbEdgeKltTracker : \param v : True to use it, False otherwise */ - virtual void setOgreVisibilityTest(const bool &v) + virtual void setOgreVisibilityTest(const bool &v) override { vpMbTracker::setOgreVisibilityTest(v); #ifdef VISP_HAVE_OGRE @@ -304,40 +304,43 @@ class VISP_EXPORT vpMbEdgeKltTracker : \param v : True to use it, False otherwise */ - virtual void setScanLineVisibilityTest(const bool &v) + virtual void setScanLineVisibilityTest(const bool &v) override { vpMbEdgeTracker::setScanLineVisibilityTest(v); vpMbKltTracker::setScanLineVisibilityTest(v); } - virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo); - virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo); + virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo) override; + virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo) override; /*! - Set if the projection error criteria has to be computed. - - \param flag : True if the projection error criteria has to be computed, - false otherwise - */ - virtual void setProjectionErrorComputation(const bool &flag) { vpMbEdgeTracker::setProjectionErrorComputation(flag); } + * Set if the projection error criteria has to be computed. + * + * \param flag : True if the projection error criteria has to be computed, + * false otherwise + */ + virtual void setProjectionErrorComputation(const bool &flag) override + { + vpMbEdgeTracker::setProjectionErrorComputation(flag); + } - virtual void testTracking() {} - virtual void track(const vpImage &I); - virtual void track(const vpImage &I_color); + virtual void testTracking() override { } + virtual void track(const vpImage &I) override; + virtual void track(const vpImage &I_color) override; protected: virtual void computeVVS(const vpImage &I, const unsigned int &nbInfos, unsigned int &nbrow, unsigned int lvl = 0, double *edge_residual = NULL, double *klt_residual = NULL); - virtual void computeVVSInit(); - virtual void computeVVSInteractionMatrixAndResidu(); + virtual void computeVVSInit() override; + virtual void computeVVSInteractionMatrixAndResidu() override; using vpMbTracker::computeCovarianceMatrixVVS; using vpMbTracker::computeVVSPoseEstimation; - virtual void init(const vpImage &I); + virtual void init(const vpImage &I) override; virtual void initCircle(const vpPoint &, const vpPoint &, const vpPoint &, double r, int idFace = 0, - const std::string &name = ""); - virtual void initCylinder(const vpPoint &, const vpPoint &, double r, int idFace, const std::string &name = ""); - virtual void initFaceFromCorners(vpMbtPolygon &polygon); - virtual void initFaceFromLines(vpMbtPolygon &polygon); + const std::string &name = "") override; + virtual void initCylinder(const vpPoint &, const vpPoint &, double r, int idFace, const std::string &name = "") override; + virtual void initFaceFromCorners(vpMbtPolygon &polygon) override; + virtual void initFaceFromLines(vpMbtPolygon &polygon) override; unsigned int initMbtTracking(unsigned int level = 0); bool postTracking(const vpImage &I, vpColVector &w_mbt, vpColVector &w_klt, unsigned int lvl = 0); diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeTracker.h index 8fc8ce6c70..4f5073230f 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeTracker.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,13 +29,12 @@ * * Description: * Make the complete tracking of an object by using its CAD model - * -*****************************************************************************/ + */ /*! - \file vpMbEdgeTracker.h - \brief Make the complete tracking of an object by using its CAD model. -*/ + * \file vpMbEdgeTracker.h + * \brief Make the complete tracking of an object by using its CAD model. + */ #ifndef vpMbEdgeTracker_HH #define vpMbEdgeTracker_HH @@ -76,168 +74,166 @@ #endif /*! - \class vpMbEdgeTracker - \ingroup group_mbt_trackers - \brief Make the complete tracking of an object by using its CAD model. - \warning This class is deprecated for user usage. You should rather use the high level - vpMbGenericTracker class. - - This class allows to track an object or a scene given its 3D model. A - video can be found on YouTube \e https://www.youtube.com/watch?v=UK10KMMJFCI - The \ref tutorial-tracking-mb-deprecated is also a good starting point to use this class. - - The tracker requires the knowledge of the 3D model that could be provided in - a vrml or in a cao file. The cao format is described in loadCAOModel(). It may - also use an xml file used to tune the behavior of the tracker and an init file - used to compute the pose at the very first image. - - The following code shows the simplest way to use the tracker. - -\code -#include -#include -#include -#include -#include -#include - -int main() -{ - vpMbEdgeTracker tracker; // Create a model based tracker. - vpImage I; - vpHomogeneousMatrix cMo; // Pose computed using the tracker. - vpCameraParameters cam; - - // Acquire an image - vpImageIo::read(I, "cube.pgm"); - -#if defined(VISP_HAVE_X11) - vpDisplayX display; - display.init(I,100,100,"Mb Edge Tracker"); -#endif - - tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker - tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). - tracker.loadModel("cube.cao"); // Load the 3d model in cao format. No 3rd party library is required - // Initialise manually the pose by clicking on the image points associated to the 3d points contained in the - // cube.init file. - tracker.initClick(I, "cube.init"); - - while(true){ - // Acquire a new image - vpDisplay::display(I); - tracker.track(I); // Track the object on this image - tracker.getPose(cMo); // Get the pose - - tracker.display(I, cMo, cam, vpColor::darkRed, 1); // Display the model at the computed pose. - vpDisplay::flush(I); - } - - return 0; -} -\endcode - - For application with large inter-images displacement, multi-scale tracking - is also possible, by setting the number of scales used and by activating (or - not) them using a vector of booleans, as presented in the following code: - -\code - ... - vpHomogeneousMatrix cMo; // Pose computed using the tracker. - vpCameraParameters cam; - - std::vector< bool > scales(3); //Three scales used - scales.push_back(true); //First scale : active - scales.push_back(false); //Second scale (/2) : not active - scales.push_back(true); //Third scale (/4) : active - tracker.setScales(scales); // Set active scales for multi-scale tracking - - tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker - tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). - ... -\endcode - - The tracker can also be used without display, in that case the initial pose - must be known (object always at the same initial pose for example) or - computed using another method: - -\code -#include -#include -#include -#include -#include - -int main() -{ - vpMbEdgeTracker tracker; // Create a model based tracker. - vpImage I; - vpHomogeneousMatrix cMo; // Pose used in entry (has to be defined), then computed using the tracker. - - //acquire an image - vpImageIo::read(I, "cube.pgm"); // Example of acquisition - - tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker - // load the 3d model, to read .wrl model coin is required, if coin is not installed .cao file can be used. - tracker.loadModel("cube.cao"); - tracker.initFromPose(I, cMo); // initialize the tracker with the given pose. - - while(true){ - // acquire a new image - tracker.track(I); // track the object on this image - tracker.getPose(cMo); // get the pose - } - - return 0; -} -\endcode - - Finally it can be used not to track an object but just to display a model at - a given pose: - -\code -#include -#include -#include -#include -#include -#include - -int main() -{ - vpMbEdgeTracker tracker; // Create a model based tracker. - vpImage I; - vpHomogeneousMatrix cMo; // Pose used to display the model. - vpCameraParameters cam; - - // Acquire an image - vpImageIo::read(I, "cube.pgm"); - -#if defined(VISP_HAVE_X11) - vpDisplayX display; - display.init(I,100,100,"Mb Edge Tracker"); -#endif - - tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker - tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). - // load the 3d model, to read .wrl model coin is required, if coin is not installed - // .cao file can be used. - tracker.loadModel("cube.cao"); - - while(true){ - // acquire a new image - // Get the pose using any method - vpDisplay::display(I); - tracker.display(I, cMo, cam, vpColor::darkRed, 1, true); // Display the model at the computed pose. - vpDisplay::flush(I); - } - - return 0; -} -\endcode - -*/ - + * \class vpMbEdgeTracker + * \ingroup group_mbt_trackers + * \brief Make the complete tracking of an object by using its CAD model. + * \warning This class is deprecated for user usage. You should rather use the high level + * vpMbGenericTracker class. + * + * This class allows to track an object or a scene given its 3D model. A + * video can be found on YouTube \e https://www.youtube.com/watch?v=UK10KMMJFCI + * The \ref tutorial-tracking-mb-deprecated is also a good starting point to use this class. + * + * The tracker requires the knowledge of the 3D model that could be provided in + * a vrml or in a cao file. The cao format is described in loadCAOModel(). It may + * also use an xml file used to tune the behavior of the tracker and an init file + * used to compute the pose at the very first image. + * + * The following code shows the simplest way to use the tracker. + * + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * vpMbEdgeTracker tracker; // Create a model based tracker. + * vpImage I; + * vpHomogeneousMatrix cMo; // Pose computed using the tracker. + * vpCameraParameters cam; + * + * // Acquire an image + * vpImageIo::read(I, "cube.pgm"); + * + * #if defined(VISP_HAVE_X11) + * vpDisplayX display; + * display.init(I,100,100,"Mb Edge Tracker"); + * #endif + * + * tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker + * tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). + * tracker.loadModel("cube.cao"); // Load the 3d model in cao format. No 3rd party library is required + * // Initialise manually the pose by clicking on the image points associated to the 3d points contained in the + * // cube.init file. + * tracker.initClick(I, "cube.init"); + * + * while(true){ + * // Acquire a new image + * vpDisplay::display(I); + * tracker.track(I); // Track the object on this image + * tracker.getPose(cMo); // Get the pose + * + * tracker.display(I, cMo, cam, vpColor::darkRed, 1); // Display the model at the computed pose. + * vpDisplay::flush(I); + * } + * + * return 0; + * } + * \endcode + * + * For application with large inter-images displacement, multi-scale tracking + * is also possible, by setting the number of scales used and by activating (or + * not) them using a vector of booleans, as presented in the following code: + * + * \code + * ... + * vpHomogeneousMatrix cMo; // Pose computed using the tracker. + * vpCameraParameters cam; + * + * std::vector< bool > scales(3); //Three scales used + * scales.push_back(true); //First scale : active + * scales.push_back(false); //Second scale (/2) : not active + * scales.push_back(true); //Third scale (/4) : active + * tracker.setScales(scales); // Set active scales for multi-scale tracking + * + * tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker + * tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). + * ... + * \endcode + * + * The tracker can also be used without display, in that case the initial pose + * must be known (object always at the same initial pose for example) or + * computed using another method: + * + * \code + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * vpMbEdgeTracker tracker; // Create a model based tracker. + * vpImage I; + * vpHomogeneousMatrix cMo; // Pose used in entry (has to be defined), then computed using the tracker. + * + * //acquire an image + * vpImageIo::read(I, "cube.pgm"); // Example of acquisition + * + * tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker + * // load the 3d model, to read .wrl model coin is required, if coin is not installed .cao file can be used. + * tracker.loadModel("cube.cao"); + * tracker.initFromPose(I, cMo); // initialize the tracker with the given pose. + * + * while(true){ + * // acquire a new image + * tracker.track(I); // track the object on this image + * tracker.getPose(cMo); // get the pose + * } + * + * return 0; + * } + * \endcode + * + * Finally it can be used not to track an object but just to display a model at + * a given pose: + * + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * vpMbEdgeTracker tracker; // Create a model based tracker. + * vpImage I; + * vpHomogeneousMatrix cMo; // Pose used to display the model. + * vpCameraParameters cam; + * + * // Acquire an image + * vpImageIo::read(I, "cube.pgm"); + * + * #if defined(VISP_HAVE_X11) + * vpDisplayX display; + * display.init(I,100,100,"Mb Edge Tracker"); + * #endif + * + * tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker + * tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). + * // load the 3d model, to read .wrl model coin is required, if coin is not installed + * // .cao file can be used. + * tracker.loadModel("cube.cao"); + * + * while(true){ + * // acquire a new image + * // Get the pose using any method + * vpDisplay::display(I); + * tracker.display(I, cMo, cam, vpColor::darkRed, 1, true); // Display the model at the computed pose. + * vpDisplay::flush(I); + * } + * + * return 0; + * } + $ \endcode + */ class VISP_EXPORT vpMbEdgeTracker : public virtual vpMbTracker { protected: @@ -314,15 +310,15 @@ class VISP_EXPORT vpMbEdgeTracker : public virtual vpMbTracker public: vpMbEdgeTracker(); - virtual ~vpMbEdgeTracker(); + virtual ~vpMbEdgeTracker() override; /** @name Inherited functionalities from vpMbEdgeTracker */ //@{ virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; void getLline(std::list &linesList, unsigned int level = 0) const; void getLcircle(std::list &circlesList, unsigned int level = 0) const; @@ -331,57 +327,57 @@ class VISP_EXPORT vpMbEdgeTracker : public virtual vpMbTracker virtual std::vector > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - bool displayFullModel = false); + bool displayFullModel = false) override; /*! - Get the moving edge parameters. - - \param p_me [out] : an instance of the moving edge parameters used by the - tracker. - */ + * Get the moving edge parameters. + * + * \param p_me [out] : an instance of the moving edge parameters used by the + * tracker. + */ virtual inline void getMovingEdge(vpMe &p_me) const { p_me = this->me; } /*! - Get the moving edge parameters. - - \return an instance of the moving edge parameters used by the tracker. - */ + * Get the moving edge parameters. + * + * \return an instance of the moving edge parameters used by the tracker. + */ virtual inline vpMe getMovingEdge() const { return this->me; } virtual unsigned int getNbPoints(unsigned int level = 0) const; /*! - Return the scales levels used for the tracking. - - \return The scales levels used for the tracking. - */ + * Return the scales levels used for the tracking. + * + * \return The scales levels used for the tracking. + */ std::vector getScales() const { return scales; } /*! - \return The threshold value between 0 and 1 over good moving edges ratio. - It allows to decide if the tracker has enough valid moving edges to - compute a pose. 1 means that all moving edges should be considered as - good to have a valid pose, while 0.1 means that 10% of the moving edge - are enough to declare a pose valid. - - \sa setGoodMovingEdgesRatioThreshold() + * \return The threshold value between 0 and 1 over good moving edges ratio. + * It allows to decide if the tracker has enough valid moving edges to + * compute a pose. 1 means that all moving edges should be considered as + * good to have a valid pose, while 0.1 means that 10% of the moving edge + * are enough to declare a pose valid. + * + * \sa setGoodMovingEdgesRatioThreshold() */ inline double getGoodMovingEdgesRatioThreshold() const { return percentageGdPt; } - virtual inline vpColVector getError() const { return m_error_edge; } + virtual inline vpColVector getError() const override { return m_error_edge; } - virtual inline vpColVector getRobustWeights() const { return m_w_edge; } + virtual inline vpColVector getRobustWeights() const override { return m_w_edge; } - virtual void loadConfigFile(const std::string &configFile, bool verbose = true); + virtual void loadConfigFile(const std::string &configFile, bool verbose = true) override; virtual void reInitModel(const vpImage &I, const std::string &cad_name, const vpHomogeneousMatrix &cMo, bool verbose = false, const vpHomogeneousMatrix &T = vpHomogeneousMatrix()); - void resetTracker(); - - /*! - Set the camera parameters. + void resetTracker() override; - \param cam : The new camera parameters. + /*! + * Set the camera parameters. + * + * \param cam : The new camera parameters. */ - virtual void setCameraParameters(const vpCameraParameters &cam) + virtual void setCameraParameters(const vpCameraParameters &cam) override { m_cam = cam; @@ -403,21 +399,21 @@ class VISP_EXPORT vpMbEdgeTracker : public virtual vpMbTracker } } - virtual void setClipping(const unsigned int &flags); + virtual void setClipping(const unsigned int &flags) override; - virtual void setFarClippingDistance(const double &dist); + virtual void setFarClippingDistance(const double &dist) override; - virtual void setNearClippingDistance(const double &dist); + virtual void setNearClippingDistance(const double &dist) override; /*! - Use Ogre3D for visibility tests - - \warning This function has to be called before the initialization of the - tracker. - - \param v : True to use it, False otherwise - */ - virtual void setOgreVisibilityTest(const bool &v) + * Use Ogre3D for visibility tests + * + * \warning This function has to be called before the initialization of the + * tracker. + * + * \param v : True to use it, False otherwise + */ + virtual void setOgreVisibilityTest(const bool &v) override { vpMbTracker::setOgreVisibilityTest(v); #ifdef VISP_HAVE_OGRE @@ -426,11 +422,11 @@ class VISP_EXPORT vpMbEdgeTracker : public virtual vpMbTracker } /*! - Use Scanline algorithm for visibility tests - - \param v : True to use it, False otherwise - */ - virtual void setScanLineVisibilityTest(const bool &v) + * Use Scanline algorithm for visibility tests + * + * \param v : True to use it, False otherwise + */ + virtual void setScanLineVisibilityTest(const bool &v) override { vpMbTracker::setScanLineVisibilityTest(v); @@ -444,31 +440,31 @@ class VISP_EXPORT vpMbEdgeTracker : public virtual vpMbTracker } /*! - Set the threshold value between 0 and 1 over good moving edges ratio. It - allows to decide if the tracker has enough valid moving edges to compute - a pose. 1 means that all moving edges should be considered as good to - have a valid pose, while 0.1 means that 10% of the moving edge are enough - to declare a pose valid. - - \param threshold : Value between 0 and 1 that corresponds to the ratio of - good moving edges that is necessary to consider that the estimated pose - is valid. Default value is 0.4. - - \sa getGoodMovingEdgesRatioThreshold() + * Set the threshold value between 0 and 1 over good moving edges ratio. It + * allows to decide if the tracker has enough valid moving edges to compute + * a pose. 1 means that all moving edges should be considered as good to + * have a valid pose, while 0.1 means that 10% of the moving edge are enough + * to declare a pose valid. + * + * \param threshold : Value between 0 and 1 that corresponds to the ratio of + * good moving edges that is necessary to consider that the estimated pose + * is valid. Default value is 0.4. + * + * \sa getGoodMovingEdgesRatioThreshold() */ void setGoodMovingEdgesRatioThreshold(double threshold) { percentageGdPt = threshold; } void setMovingEdge(const vpMe &me); - virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo); - virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo); + virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo) override; + virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo) override; void setScales(const std::vector &_scales); void setUseEdgeTracking(const std::string &name, const bool &useEdgeTracking); - virtual void track(const vpImage &I); - virtual void track(const vpImage &I); + virtual void track(const vpImage &I) override; + virtual void track(const vpImage &I) override; //@} protected: @@ -487,8 +483,8 @@ class VISP_EXPORT vpMbEdgeTracker : public virtual vpMbTracker void computeVVSFirstPhase(const vpImage &I, unsigned int iter, double &count, unsigned int lvl = 0); void computeVVSFirstPhaseFactor(const vpImage &I, unsigned int lvl = 0); void computeVVSFirstPhasePoseEstimation(unsigned int iter, bool &isoJoIdentity); - virtual void computeVVSInit(); - virtual void computeVVSInteractionMatrixAndResidu(); + virtual void computeVVSInit() override; + virtual void computeVVSInteractionMatrixAndResidu() override; virtual void computeVVSInteractionMatrixAndResidu(const vpImage &I); virtual void computeVVSWeights(); using vpMbTracker::computeVVSWeights; @@ -497,13 +493,13 @@ class VISP_EXPORT vpMbEdgeTracker : public virtual vpMbTracker void displayFeaturesOnImage(const vpImage &I); void downScale(const unsigned int _scale); virtual std::vector > getFeaturesForDisplayEdge(); - virtual void init(const vpImage &I); + virtual void init(const vpImage &I) override; virtual void initCircle(const vpPoint &p1, const vpPoint &p2, const vpPoint &p3, double radius, int idFace = 0, - const std::string &name = ""); + const std::string &name = "") override; virtual void initCylinder(const vpPoint &p1, const vpPoint &p2, double radius, int idFace = 0, - const std::string &name = ""); - virtual void initFaceFromCorners(vpMbtPolygon &polygon); - virtual void initFaceFromLines(vpMbtPolygon &polygon); + const std::string &name = "") override; + virtual void initFaceFromCorners(vpMbtPolygon &polygon) override; + virtual void initFaceFromLines(vpMbtPolygon &polygon) override; unsigned int initMbtTracking(unsigned int &nberrors_lines, unsigned int &nberrors_cylinders, unsigned int &nberrors_circles); void initMovingEdge(const vpImage &I, const vpHomogeneousMatrix &_cMo); @@ -514,7 +510,7 @@ class VISP_EXPORT vpMbEdgeTracker : public virtual vpMbTracker void removeCylinder(const std::string &name); void removeLine(const std::string &name); void resetMovingEdge(); - virtual void testTracking(); + virtual void testTracking() override; void trackMovingEdge(const vpImage &I); void updateMovingEdge(const vpImage &I); void updateMovingEdgeWeights(); diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h index c8a40c05dd..fd8e63dc8d 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h @@ -214,17 +214,17 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker explicit vpMbGenericTracker(const std::vector &trackerTypes); vpMbGenericTracker(const std::vector &cameraNames, const std::vector &trackerTypes); - virtual ~vpMbGenericTracker(); + virtual ~vpMbGenericTracker() override; virtual double computeCurrentProjectionError(const vpImage &I, const vpHomogeneousMatrix &_cMo, - const vpCameraParameters &_cam); + const vpCameraParameters &_cam) override; virtual double computeCurrentProjectionError(const vpImage &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam); virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; virtual void display(const vpImage &I1, const vpImage &I2, const vpHomogeneousMatrix &c1Mo, const vpHomogeneousMatrix &c2Mo, const vpCameraParameters &cam1, @@ -246,7 +246,7 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker virtual std::vector getCameraNames() const; using vpMbTracker::getCameraParameters; - virtual void getCameraParameters(vpCameraParameters &camera) const; + virtual void getCameraParameters(vpCameraParameters &camera) const override; virtual void getCameraParameters(vpCameraParameters &cam1, vpCameraParameters &cam2) const; virtual void getCameraParameters(std::map &mapOfCameraParameters) const; @@ -256,9 +256,9 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker virtual void getClipping(unsigned int &clippingFlag1, unsigned int &clippingFlag2) const; virtual void getClipping(std::map &mapOfClippingFlags) const; - virtual inline vpColVector getError() const { return m_error; } + virtual inline vpColVector getError() const override { return m_error; } - virtual vpMbHiddenFaces &getFaces(); + virtual vpMbHiddenFaces &getFaces() override; virtual vpMbHiddenFaces &getFaces(const std::string &cameraName); #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO) @@ -303,7 +303,7 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker virtual std::vector > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - bool displayFullModel = false); + bool displayFullModel = false) override; virtual void getModelForDisplay(std::map > > &mapOfModels, const std::map &mapOfwidths, const std::map &mapOfheights, @@ -337,30 +337,30 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker virtual unsigned int getNbPoints(unsigned int level = 0) const; virtual void getNbPoints(std::map &mapOfNbPoints, unsigned int level = 0) const; - virtual unsigned int getNbPolygon() const; + virtual unsigned int getNbPolygon() const override; virtual void getNbPolygon(std::map &mapOfNbPolygons) const; - virtual vpMbtPolygon *getPolygon(unsigned int index); + virtual vpMbtPolygon *getPolygon(unsigned int index) override; virtual vpMbtPolygon *getPolygon(const std::string &cameraName, unsigned int index); virtual std::pair, std::vector > > - getPolygonFaces(bool orderPolygons = true, bool useVisibility = true, bool clipPolygon = false); + getPolygonFaces(bool orderPolygons = true, bool useVisibility = true, bool clipPolygon = false) override; virtual void getPolygonFaces(std::map > &mapOfPolygons, std::map > > &mapOfPoints, bool orderPolygons = true, bool useVisibility = true, bool clipPolygon = false); using vpMbTracker::getPose; - virtual void getPose(vpHomogeneousMatrix &cMo) const; + virtual void getPose(vpHomogeneousMatrix &cMo) const override; virtual void getPose(vpHomogeneousMatrix &c1Mo, vpHomogeneousMatrix &c2Mo) const; virtual void getPose(std::map &mapOfCameraPoses) const; virtual std::string getReferenceCameraName() const; - virtual inline vpColVector getRobustWeights() const { return m_w; } + virtual inline vpColVector getRobustWeights() const override { return m_w; } virtual int getTrackerType() const; - virtual void init(const vpImage &I); + virtual void init(const vpImage &I) override; #ifdef VISP_HAVE_MODULE_GUI using vpMbTracker::initClick; @@ -395,7 +395,7 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker const std::map &mapOfInitPoints); using vpMbTracker::initFromPose; - virtual void initFromPose(const vpImage &I, const vpHomogeneousMatrix &cMo); + virtual void initFromPose(const vpImage &I, const vpHomogeneousMatrix &cMo) override; virtual void initFromPose(const vpImage &I1, const vpImage &I2, const std::string &initFile1, const std::string &initFile2); virtual void initFromPose(const vpImage &I_color1, const vpImage &I_color2, @@ -416,7 +416,7 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker virtual void initFromPose(const std::map *> &mapOfColorImages, const std::map &mapOfCameraPoses); - virtual void loadConfigFile(const std::string &configFile, bool verbose = true); + virtual void loadConfigFile(const std::string &configFile, bool verbose = true) override; virtual void loadConfigFile(const std::string &configFile1, const std::string &configFile2, bool verbose = true); virtual void loadConfigFile(const std::map &mapOfConfigFiles, bool verbose = true); @@ -425,7 +425,7 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker #endif virtual void loadModel(const std::string &modelFile, bool verbose = false, - const vpHomogeneousMatrix &T = vpHomogeneousMatrix()); + const vpHomogeneousMatrix &T = vpHomogeneousMatrix()) override; virtual void loadModel(const std::string &modelFile1, const std::string &modelFile2, bool verbose = false, const vpHomogeneousMatrix &T1 = vpHomogeneousMatrix(), const vpHomogeneousMatrix &T2 = vpHomogeneousMatrix()); @@ -461,17 +461,17 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker const std::map &mapOfCameraPoses, bool verbose = false, const std::map &mapOfT = std::map()); - virtual void resetTracker(); + virtual void resetTracker() override; - virtual void setAngleAppear(const double &a); + virtual void setAngleAppear(const double &a) override; virtual void setAngleAppear(const double &a1, const double &a2); virtual void setAngleAppear(const std::map &mapOfAngles); - virtual void setAngleDisappear(const double &a); + virtual void setAngleDisappear(const double &a) override; virtual void setAngleDisappear(const double &a1, const double &a2); virtual void setAngleDisappear(const std::map &mapOfAngles); - virtual void setCameraParameters(const vpCameraParameters &camera); + virtual void setCameraParameters(const vpCameraParameters &camera) override; virtual void setCameraParameters(const vpCameraParameters &camera1, const vpCameraParameters &camera2); virtual void setCameraParameters(const std::map &mapOfCameraParameters); @@ -480,7 +480,7 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker virtual void setCameraTransformationMatrix(const std::map &mapOfTransformationMatrix); - virtual void setClipping(const unsigned int &flags); + virtual void setClipping(const unsigned int &flags) override; virtual void setClipping(const unsigned int &flags1, const unsigned int &flags2); virtual void setClipping(const std::map &mapOfClippingFlags); @@ -497,9 +497,9 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker virtual void setDepthNormalPclPlaneEstimationRansacThreshold(double threshold); virtual void setDepthNormalSamplingStep(unsigned int stepX, unsigned int stepY); - virtual void setDisplayFeatures(bool displayF); + virtual void setDisplayFeatures(bool displayF) override; - virtual void setFarClippingDistance(const double &dist); + virtual void setFarClippingDistance(const double &dist) override; virtual void setFarClippingDistance(const double &dist1, const double &dist2); virtual void setFarClippingDistance(const std::map &mapOfClippingDists); @@ -524,28 +524,28 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker virtual void setKltThresholdAcceptation(double th); #endif - virtual void setLod(bool useLod, const std::string &name = ""); + virtual void setLod(bool useLod, const std::string &name = "") override; - virtual void setMask(const vpImage &mask); + virtual void setMask(const vpImage &mask) override; - virtual void setMinLineLengthThresh(double minLineLengthThresh, const std::string &name = ""); - virtual void setMinPolygonAreaThresh(double minPolygonAreaThresh, const std::string &name = ""); + virtual void setMinLineLengthThresh(double minLineLengthThresh, const std::string &name = "") override; + virtual void setMinPolygonAreaThresh(double minPolygonAreaThresh, const std::string &name = "") override; virtual void setMovingEdge(const vpMe &me); virtual void setMovingEdge(const vpMe &me1, const vpMe &me2); virtual void setMovingEdge(const std::map &mapOfMe); - virtual void setNearClippingDistance(const double &dist); + virtual void setNearClippingDistance(const double &dist) override; virtual void setNearClippingDistance(const double &dist1, const double &dist2); virtual void setNearClippingDistance(const std::map &mapOfDists); - virtual void setOgreShowConfigDialog(bool showConfigDialog); - virtual void setOgreVisibilityTest(const bool &v); + virtual void setOgreShowConfigDialog(bool showConfigDialog) override; + virtual void setOgreVisibilityTest(const bool &v) override; - virtual void setOptimizationMethod(const vpMbtOptimizationMethod &opt); + virtual void setOptimizationMethod(const vpMbtOptimizationMethod &opt) override; - virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo); - virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo); + virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo) override; + virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo) override; virtual void setPose(const vpImage &I1, const vpImage &I2, const vpHomogeneousMatrix &c1Mo, const vpHomogeneousMatrix &c2Mo); @@ -557,15 +557,15 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker virtual void setPose(const std::map *> &mapOfColorImages, const std::map &mapOfCameraPoses); - virtual void setProjectionErrorComputation(const bool &flag); + virtual void setProjectionErrorComputation(const bool &flag) override; - virtual void setProjectionErrorDisplay(bool display); - virtual void setProjectionErrorDisplayArrowLength(unsigned int length); - virtual void setProjectionErrorDisplayArrowThickness(unsigned int thickness); + virtual void setProjectionErrorDisplay(bool display) override; + virtual void setProjectionErrorDisplayArrowLength(unsigned int length) override; + virtual void setProjectionErrorDisplayArrowThickness(unsigned int thickness) override; virtual void setReferenceCameraName(const std::string &referenceCameraName); - virtual void setScanLineVisibilityTest(const bool &v); + virtual void setScanLineVisibilityTest(const bool &v) override; virtual void setTrackerType(int type); virtual void setTrackerType(const std::map &mapOfTrackerTypes); @@ -577,10 +577,10 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker virtual void setUseKltTracking(const std::string &name, const bool &useKltTracking); #endif - virtual void testTracking(); + virtual void testTracking() override; - virtual void track(const vpImage &I); - virtual void track(const vpImage &I_color); + virtual void track(const vpImage &I) override; + virtual void track(const vpImage &I_color) override; virtual void track(const vpImage &I1, const vpImage &I2); virtual void track(const vpImage &I_color1, const vpImage &I_color2); @@ -609,23 +609,23 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker virtual void computeVVS(std::map *> &mapOfImages); - virtual void computeVVSInit(); + virtual void computeVVSInit() override; virtual void computeVVSInit(std::map *> &mapOfImages); - virtual void computeVVSInteractionMatrixAndResidu(); + virtual void computeVVSInteractionMatrixAndResidu() override; virtual void computeVVSInteractionMatrixAndResidu(std::map *> &mapOfImages, std::map &mapOfVelocityTwist); using vpMbTracker::computeVVSWeights; virtual void computeVVSWeights(); virtual void initCircle(const vpPoint &p1, const vpPoint &p2, const vpPoint &p3, double radius, int idFace = 0, - const std::string &name = ""); + const std::string &name = "") override; virtual void initCylinder(const vpPoint &p1, const vpPoint &p2, double radius, int idFace = 0, - const std::string &name = ""); + const std::string &name = "") override; - virtual void initFaceFromCorners(vpMbtPolygon &polygon); + virtual void initFaceFromCorners(vpMbtPolygon &polygon) override; - virtual void initFaceFromLines(vpMbtPolygon &polygon); + virtual void initFaceFromLines(vpMbtPolygon &polygon) override; virtual void loadConfigFileXML(const std::string &configFile, bool verbose = true); #ifdef VISP_HAVE_NLOHMANN_JSON @@ -670,63 +670,60 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker TrackerWrapper(); explicit TrackerWrapper(int trackerType); - virtual ~TrackerWrapper(); - + virtual inline vpColVector getError() const override { return m_error; } - virtual inline vpColVector getError() const { return m_error; } - - virtual inline vpColVector getRobustWeights() const { return m_w; } + virtual inline vpColVector getRobustWeights() const override { return m_w; } virtual inline int getTrackerType() const { return m_trackerType; } virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; virtual std::vector > getFeaturesForDisplay(); virtual std::vector > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - bool displayFullModel = false); + bool displayFullModel = false) override; - virtual void init(const vpImage &I); + virtual void init(const vpImage &I) override; - virtual void loadConfigFile(const std::string &configFile, bool verbose = true); + virtual void loadConfigFile(const std::string &configFile, bool verbose = true) override; virtual void reInitModel(const vpImage &I, const std::string &cad_name, const vpHomogeneousMatrix &cMo, bool verbose = false, - const vpHomogeneousMatrix &T = vpHomogeneousMatrix()); + const vpHomogeneousMatrix &T = vpHomogeneousMatrix()) override; virtual void reInitModel(const vpImage &I_color, const std::string &cad_name, const vpHomogeneousMatrix &cMo, bool verbose = false, const vpHomogeneousMatrix &T = vpHomogeneousMatrix()); - virtual void resetTracker(); + virtual void resetTracker() override; - virtual void setCameraParameters(const vpCameraParameters &camera); + virtual void setCameraParameters(const vpCameraParameters &camera) override; - virtual void setClipping(const unsigned int &flags); + virtual void setClipping(const unsigned int &flags) override; - virtual void setFarClippingDistance(const double &dist); + virtual void setFarClippingDistance(const double &dist) override; - virtual void setNearClippingDistance(const double &dist); + virtual void setNearClippingDistance(const double &dist) override; - virtual void setOgreVisibilityTest(const bool &v); + virtual void setOgreVisibilityTest(const bool &v) override; - virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo); - virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo); + virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo) override; + virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo) override; - virtual void setProjectionErrorComputation(const bool &flag); + virtual void setProjectionErrorComputation(const bool &flag) override; - virtual void setScanLineVisibilityTest(const bool &v); + virtual void setScanLineVisibilityTest(const bool &v) override; virtual void setTrackerType(int type); - virtual void testTracking(); + virtual void testTracking() override; - virtual void track(const vpImage &I); - virtual void track(const vpImage &I_color); + virtual void track(const vpImage &I) override; + virtual void track(const vpImage &I_color) override; #ifdef VISP_HAVE_PCL // Fix error: using declaration ‘using vpMbDepthDenseTracker::setPose’ conflicts with a previous // using declaration that occurs with g++ 4.6.3 on Ubuntu 12.04 @@ -741,22 +738,22 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker protected: virtual void computeVVS(const vpImage *const ptr_I); - virtual void computeVVSInit(); + virtual void computeVVSInit() override; virtual void computeVVSInit(const vpImage *const ptr_I); - virtual void computeVVSInteractionMatrixAndResidu(); + virtual void computeVVSInteractionMatrixAndResidu() override; using vpMbEdgeTracker::computeVVSInteractionMatrixAndResidu; virtual void computeVVSInteractionMatrixAndResidu(const vpImage *const ptr_I); using vpMbTracker::computeVVSWeights; - virtual void computeVVSWeights(); + virtual void computeVVSWeights() override; virtual void initCircle(const vpPoint &p1, const vpPoint &p2, const vpPoint &p3, double radius, int idFace = 0, - const std::string &name = ""); + const std::string &name = "") override; virtual void initCylinder(const vpPoint &p1, const vpPoint &p2, double radius, int idFace = 0, - const std::string &name = ""); + const std::string &name = "") override; - virtual void initFaceFromCorners(vpMbtPolygon &polygon); - virtual void initFaceFromLines(vpMbtPolygon &polygon); + virtual void initFaceFromCorners(vpMbtPolygon &polygon) override; + virtual void initFaceFromLines(vpMbtPolygon &polygon) override; virtual void initMbtTracking(const vpImage *const ptr_I); diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbHiddenFaces.h b/modules/tracker/mbt/include/visp3/mbt/vpMbHiddenFaces.h index 504356f31a..b5a5cea431 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbHiddenFaces.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbHiddenFaces.h @@ -251,7 +251,7 @@ template class vpMbHiddenFaces Ogre rendering options) when Ogre visibility is enabled. By default, this functionality is turned off. */ - inline void setOgreShowConfigDialog(bool showConfigDialog) { ogreShowConfigDialog = showConfigDialog; } + inline void setOgreShowConfigDialog(bool showConfigDialog) override { ogreShowConfigDialog = showConfigDialog; } #endif unsigned int setVisible(unsigned int width, unsigned int height, const vpCameraParameters &cam, @@ -331,9 +331,9 @@ template vpMbHiddenFaces::vpMbHiddenFaces(const vpMbHiddenFaces ©) : Lpol(), nbVisiblePolygon(copy.nbVisiblePolygon), scanlineRender(copy.scanlineRender) #ifdef VISP_HAVE_OGRE - , - ogreBackground(copy.ogreBackground), ogreInitialised(copy.ogreInitialised), nbRayAttempts(copy.nbRayAttempts), - ratioVisibleRay(copy.ratioVisibleRay), ogre(NULL), lOgrePolygons(), ogreShowConfigDialog(copy.ogreShowConfigDialog) + , + ogreBackground(copy.ogreBackground), ogreInitialised(copy.ogreInitialised), nbRayAttempts(copy.nbRayAttempts), + ratioVisibleRay(copy.ratioVisibleRay), ogre(NULL), lOgrePolygons(), ogreShowConfigDialog(copy.ogreShowConfigDialog) #endif { // Copy the list of polygons @@ -594,7 +594,7 @@ bool vpMbHiddenFaces::computeVisibility(const vpHomogeneousMatrix & if (useOgre) #ifdef VISP_HAVE_OGRE testDisappear = - ((!Lpol[i]->isVisible(cMo, angleDisappears, true, cam, width, height)) || !isVisibleOgre(cameraPos, i)); + ((!Lpol[i]->isVisible(cMo, angleDisappears, true, cam, width, height)) || !isVisibleOgre(cameraPos, i)); #else { (void)cameraPos; // Avoid warning @@ -611,21 +611,23 @@ bool vpMbHiddenFaces::computeVisibility(const vpHomogeneousMatrix & // std::endl; changed = true; Lpol[i]->isvisible = false; - } else { - // nbVisiblePolygon++; + } + else { + // nbVisiblePolygon++; Lpol[i]->isvisible = true; // if(nbCornerInsidePrev > Lpol[i]->getNbCornerInsidePrevImage()) // changed = true; } - } else { + } + else { bool testAppear = true; if (testAppear) { if (useOgre) #ifdef VISP_HAVE_OGRE testAppear = - ((Lpol[i]->isVisible(cMo, angleAppears, true, cam, width, height)) && isVisibleOgre(cameraPos, i)); + ((Lpol[i]->isVisible(cMo, angleAppears, true, cam, width, height)) && isVisibleOgre(cameraPos, i)); #else testAppear = (Lpol[i]->isVisible(cMo, angleAppears, false, cam, width, height)); #endif @@ -638,8 +640,9 @@ bool vpMbHiddenFaces::computeVisibility(const vpHomogeneousMatrix & Lpol[i]->isvisible = true; changed = true; // nbVisiblePolygon++; - } else { - // std::cout << "Problem" << std::endl; + } + else { + // std::cout << "Problem" << std::endl; Lpol[i]->isvisible = false; } } @@ -755,7 +758,8 @@ template void vpMbHiddenFaces::displayOgre(cons for (unsigned int i = 0; i < Lpol.size(); i++) { if (Lpol[i]->isVisible()) { lOgrePolygons[i]->setVisible(true); - } else + } + else lOgrePolygons[i]->setVisible(false); } ogre->display(ogreBackground, cMo); @@ -883,7 +887,8 @@ bool vpMbHiddenFaces::isVisibleOgre(const vpTranslationVector &came if (it != result.end()) { if (it->movable->getName() == Ogre::StringConverter::toString(index)) { nbVisible++; - } else { + } + else { distance = it->distance; // Cannot use epsilon for comparison as ray length is slightly // different from the collision distance returned by @@ -892,9 +897,11 @@ bool vpMbHiddenFaces::isVisibleOgre(const vpTranslationVector &came 1e-6 /*std::fabs(distance) * std::numeric_limits::epsilon()*/) nbVisible++; } - } else + } + else nbVisible++; // Collision not detected but present. - } else { + } + else { if (it != result.end()) { distance = it->distance; double distancePrev = distance; @@ -904,7 +911,8 @@ bool vpMbHiddenFaces::isVisibleOgre(const vpTranslationVector &came if (it->movable->getName() == Ogre::StringConverter::toString(index)) { nbVisible++; - } else { + } + else { ++it; while (it != result.end()) { distance = it->distance; @@ -918,7 +926,8 @@ bool vpMbHiddenFaces::isVisibleOgre(const vpTranslationVector &came } ++it; distancePrev = distance; - } else + } + else break; } } @@ -938,7 +947,8 @@ bool vpMbHiddenFaces::isVisibleOgre(const vpTranslationVector &came if (visible) { lOgrePolygons[index]->setVisible(true); Lpol[index]->isvisible = true; - } else { + } + else { lOgrePolygons[index]->setVisible(false); Lpol[index]->isvisible = false; } diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h index 0746228d76..87b99217b2 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h @@ -258,9 +258,9 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker void addCircle(const vpPoint &P1, const vpPoint &P2, const vpPoint &P3, double r, const std::string &name = ""); virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; virtual void display(const vpImage &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false); + const vpColor &col, unsigned int thickness = 1, bool displayFullModel = false) override; /*! Return the address of the circle feature list. */ virtual std::list &getFeaturesCircle() { return circles_disp; } @@ -309,22 +309,22 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker */ inline double getKltThresholdAcceptation() const { return threshold_outlier; } - virtual inline vpColVector getError() const { return m_error_klt; } + virtual inline vpColVector getError() const override { return m_error_klt; } - virtual inline vpColVector getRobustWeights() const { return m_w_klt; } + virtual inline vpColVector getRobustWeights() const override { return m_w_klt; } virtual std::vector > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, - bool displayFullModel = false); + bool displayFullModel = false) override; - virtual void loadConfigFile(const std::string &configFile, bool verbose = true); + virtual void loadConfigFile(const std::string &configFile, bool verbose = true) override; virtual void reInitModel(const vpImage &I, const std::string &cad_name, const vpHomogeneousMatrix &cMo, - bool verbose = false, const vpHomogeneousMatrix &T = vpHomogeneousMatrix()); - void resetTracker(); + bool verbose = false, const vpHomogeneousMatrix &T = vpHomogeneousMatrix()) override; + void resetTracker() override; - void setCameraParameters(const vpCameraParameters &cam); + void setCameraParameters(const vpCameraParameters &cam) override; /*! Set the erosion of the mask used on the Model faces. @@ -355,7 +355,7 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker \param v : True to use it, False otherwise */ - virtual void setOgreVisibilityTest(const bool &v) + virtual void setOgreVisibilityTest(const bool &v) override { vpMbTracker::setOgreVisibilityTest(v); #ifdef VISP_HAVE_OGRE @@ -368,7 +368,7 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker \param v : True to use it, False otherwise */ - virtual void setScanLineVisibilityTest(const bool &v) + virtual void setScanLineVisibilityTest(const bool &v) override { vpMbTracker::setScanLineVisibilityTest(v); @@ -376,8 +376,8 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker (*it)->useScanLine = v; } - virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo); - virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo); + virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo) override; + virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo) override; /*! Set if the projection error criteria has to be computed. @@ -385,19 +385,19 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker \param flag : True if the projection error criteria has to be computed, false otherwise */ - virtual void setProjectionErrorComputation(const bool &flag) + virtual void setProjectionErrorComputation(const bool &flag) override { if (flag) std::cerr << "This option is not yet implemented in vpMbKltTracker, " - "projection error computation set to false." - << std::endl; + "projection error computation set to false." + << std::endl; } void setUseKltTracking(const std::string &name, const bool &useKltTracking); - virtual void testTracking(); - virtual void track(const vpImage &I); - virtual void track(const vpImage &I_color); + virtual void testTracking() override; + virtual void track(const vpImage &I) override; + virtual void track(const vpImage &I_color) override; /*! @name Deprecated functions @@ -453,16 +453,16 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker /** @name Protected Member Functions Inherited from vpMbKltTracker */ //@{ void computeVVS(); - virtual void computeVVSInit(); - virtual void computeVVSInteractionMatrixAndResidu(); + virtual void computeVVSInit() override; + virtual void computeVVSInteractionMatrixAndResidu() override; virtual std::vector > getFeaturesForDisplayKlt(); - virtual void init(const vpImage &I); - virtual void initFaceFromCorners(vpMbtPolygon &polygon); - virtual void initFaceFromLines(vpMbtPolygon &polygon); - virtual void initCircle(const vpPoint &, const vpPoint &, const vpPoint &, double, int, const std::string &name = ""); - virtual void initCylinder(const vpPoint &, const vpPoint &, double, int, const std::string &name = ""); + virtual void init(const vpImage &I) override; + virtual void initFaceFromCorners(vpMbtPolygon &polygon) override; + virtual void initFaceFromLines(vpMbtPolygon &polygon) override; + virtual void initCircle(const vpPoint &, const vpPoint &, const vpPoint &, double, int, const std::string &name = "") override; + virtual void initCylinder(const vpPoint &, const vpPoint &, double, int, const std::string &name = "") override; void preTracking(const vpImage &I); bool postTracking(const vpImage &I, vpColVector &w); diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbTracker.h index 36a12ac67a..32949e7c1b 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbTracker.h @@ -268,8 +268,8 @@ class VISP_EXPORT vpMbTracker // vpTRACE("Warning : The covariance matrix has not been computed. // See setCovarianceComputation() to do it."); std::cerr << "Warning : The covariance matrix has not been computed. " - "See setCovarianceComputation() to do it." - << std::endl; + "See setCovarianceComputation() to do it." + << std::endl; } return covarianceMatrix; @@ -402,15 +402,15 @@ class VISP_EXPORT vpMbTracker } virtual std::pair, std::vector > > - getPolygonFaces(bool orderPolygons = true, bool useVisibility = true, bool clipPolygon = false); + getPolygonFaces(bool orderPolygons = true, bool useVisibility = true, bool clipPolygon = false); - /*! - Get the current pose between the object and the camera. - cMo is the matrix which can be used to express - coordinates from the object frame to camera frame. + /*! + Get the current pose between the object and the camera. + cMo is the matrix which can be used to express + coordinates from the object frame to camera frame. - \param cMo : the pose - */ + \param cMo : the pose + */ virtual inline void getPose(vpHomogeneousMatrix &cMo) const { cMo = m_cMo; } /*! @@ -551,10 +551,10 @@ class VISP_EXPORT vpMbTracker virtual void setNearClippingDistance(const double &dist); /*! - Set the optimization method used during the tracking. - - \param opt : Optimization method to use. - */ + * Set the optimization method used during the tracking. + * + * \param opt : Optimization method to use. + */ virtual inline void setOptimizationMethod(const vpMbtOptimizationMethod &opt) { m_optimizationMethod = opt; } void setProjectionErrorMovingEdge(const vpMe &me); @@ -585,18 +585,21 @@ class VISP_EXPORT vpMbTracker virtual void setProjectionErrorComputation(const bool &flag) { computeProjError = flag; } /*! - Display or not gradient and model orientation when computing the projection error. - */ + * Display or not gradient and model orientation when computing the projection error. + */ virtual void setProjectionErrorDisplay(bool display) { m_projectionErrorDisplay = display; } /*! - Arrow length used to display gradient and model orientation for projection error computation. - */ - virtual void setProjectionErrorDisplayArrowLength(unsigned int length) { m_projectionErrorDisplayLength = length; } + * Arrow length used to display gradient and model orientation for projection error computation. + */ + virtual void setProjectionErrorDisplayArrowLength(unsigned int length) + { + m_projectionErrorDisplayLength = length; + } /*! - Arrow thickness used to display gradient and model orientation for projection error computation. - */ + * Arrow thickness used to display gradient and model orientation for projection error computation. + */ virtual void setProjectionErrorDisplayArrowThickness(unsigned int thickness) { m_projectionErrorDisplayThickness = thickness; diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbtMeEllipse.h b/modules/tracker/mbt/include/visp3/mbt/vpMbtMeEllipse.h index 3ff14f3be1..2a712dc9d4 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbtMeEllipse.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbtMeEllipse.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,13 +29,12 @@ * * Description: * Moving edges. - * -*****************************************************************************/ + */ /*! - \file vpMbtMeEllipse.h - \brief Moving edges on an ellipse -*/ + * \file vpMbtMeEllipse.h + * \brief Moving edges on an ellipse + */ #ifndef vpMbtMeEllipse_HH #define vpMbtMeEllipse_HH @@ -49,12 +47,12 @@ #ifndef DOXYGEN_SHOULD_SKIP_THIS /*! - \class vpMbtMeEllipse - \ingroup group_mbt_features - - \brief Class that tracks an ellipse moving edges with specific capabilities for - model-based tracking. -*/ + * \class vpMbtMeEllipse + * \ingroup group_mbt_features + * + * \brief Class that tracks an ellipse moving edges with specific capabilities for + * model-based tracking. + */ class VISP_EXPORT vpMbtMeEllipse : public vpMeEllipse { public: @@ -62,7 +60,6 @@ class VISP_EXPORT vpMbtMeEllipse : public vpMeEllipse vpMbtMeEllipse(); vpMbtMeEllipse(const vpMbtMeEllipse &me_ellipse); - virtual ~vpMbtMeEllipse(); void computeProjectionError(const vpImage &_I, double &_sumErrorRad, unsigned int &_nbFeatures, const vpMatrix &SobelX, const vpMatrix &SobelY, bool display, unsigned int length, @@ -77,7 +74,7 @@ class VISP_EXPORT vpMbtMeEllipse : public vpMeEllipse private: void reSample(const vpImage &I); - void sample(const vpImage &I, bool doNotTrack = false); + void sample(const vpImage &I, bool doNotTrack = false) override; void suppressPoints(); }; diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbtMeLine.h b/modules/tracker/mbt/include/visp3/mbt/vpMbtMeLine.h index 53991fe4c1..9cf88f3cfd 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbtMeLine.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbtMeLine.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,12 @@ * * Description: * Implementation of a line used by the model-based tracker. - * - * Authors: - * Romain Tallonneau - * -*****************************************************************************/ + */ /*! - \file vpMbtMeLine.h - \brief Implementation of a line used by the model-based tracker. -*/ + * \file vpMbtMeLine.h + * \brief Implementation of a line used by the model-based tracker. + */ #ifndef vpMbtMeLine_HH #define vpMbtMeLine_HH @@ -51,10 +46,9 @@ #ifndef DOXYGEN_SHOULD_SKIP_THIS /*! - \class vpMbtMeLine - \brief Implementation of a line used by the model-based tracker. - \ingroup group_mbt_features - + * \class vpMbtMeLine + * \brief Implementation of a line used by the model-based tracker. + * \ingroup group_mbt_features */ class VISP_EXPORT vpMbtMeLine : public vpMeTracker { @@ -72,7 +66,7 @@ class VISP_EXPORT vpMbtMeLine : public vpMeTracker public: vpMbtMeLine(); - virtual ~vpMbtMeLine(); + virtual ~vpMbtMeLine() override; void computeProjectionError(const vpImage &_I, double &_sumErrorRad, unsigned int &_nbFeatures, const vpMatrix &SobelX, const vpMatrix &SobelY, bool display, unsigned int length, @@ -82,27 +76,27 @@ class VISP_EXPORT vpMbtMeLine : public vpMeTracker using vpMeTracker::display; /*! - Get the a coefficient of the line corresponding to \f$ i \; cos(\theta) + j - \; sin(\theta) - \rho = 0 \f$ - - \return : The a coefficient of the moving edge - */ + * Get the a coefficient of the line corresponding to \f$ i \; cos(\theta) + j + * \; sin(\theta) - \rho = 0 \f$ + * + * \return : The a coefficient of the moving edge + */ inline double get_a() const { return this->a; } /*! - Get the a coefficient of the line corresponding to \f$ i \; cos(\theta) + j - \; sin(\theta) - \rho = 0 \f$ - - \return : The b coefficient of the moving edge - */ + * Get the a coefficient of the line corresponding to \f$ i \; cos(\theta) + j + * \; sin(\theta) - \rho = 0 \f$ + * + * \return : The b coefficient of the moving edge + */ inline double get_b() const { return this->b; } /*! - Get the a coefficient of the line corresponding to \f$ i \; cos(\theta) + j - \; sin(\theta) - \rho = 0 \f$ - - \return : The c coefficient of the moving edge - */ + * Get the a coefficient of the line corresponding to \f$ i \; cos(\theta) + j + * \; sin(\theta) - \rho = 0 \f$ + * + * \return : The c coefficient of the moving edge + */ inline double get_c() const { return this->c; } void initTracking(const vpImage &I, const vpImagePoint &ip1, const vpImagePoint &ip2, double rho, @@ -117,7 +111,7 @@ class VISP_EXPORT vpMbtMeLine : public vpMeTracker private: void bubbleSortI(); void bubbleSortJ(); - virtual void sample(const vpImage &image, bool doNotTrack = false); + void sample(const vpImage &image, bool doNotTrack = false) override; void seekExtremities(const vpImage &I); void setExtremities(); void suppressPoints(const vpImage &I); diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbtPolygon.h b/modules/tracker/mbt/include/visp3/mbt/vpMbtPolygon.h index a1b6810021..7b26bbd01c 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbtPolygon.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbtPolygon.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,17 +29,12 @@ * * Description: * Implements a polygon of the model used by the model-based tracker. - * - * Authors: - * Romain Tallonneau - * Aurelien Yol - * -*****************************************************************************/ + */ /*! - \file vpMbtPolygon.h - \brief Implements a polygon of the model used by the model-based tracker. -*/ + * \file vpMbtPolygon.h + *\brief Implements a polygon of the model used by the model-based tracker. + */ #ifndef vpMbtPolygon_HH #define vpMbtPolygon_HH @@ -53,13 +47,12 @@ #include /*! - \class vpMbtPolygon - - \brief Implementation of a polygon of the model used by the model-based - tracker. - - \ingroup group_mbt_faces - + * \class vpMbtPolygon + * + * \brief Implementation of a polygon of the model used by the model-based + * tracker. + * + * \ingroup group_mbt_faces */ class VISP_EXPORT vpMbtPolygon : public vpPolygon3D { @@ -89,19 +82,18 @@ class VISP_EXPORT vpMbtPolygon : public vpPolygon3D public: vpMbtPolygon(); vpMbtPolygon(const vpMbtPolygon &mbtp); - virtual ~vpMbtPolygon(); /*! - Get the index of the face. - - \return index : the index of the face. - */ + * Get the index of the face. + * + * \return index : the index of the face. + */ inline int getIndex() const { return index; } /*! - Get the name of the face. - - \return Name of the face. + * Get the name of the face. + * + * \return Name of the face. */ inline std::string getName() const { return name; } @@ -115,51 +107,51 @@ class VISP_EXPORT vpMbtPolygon : public vpPolygon3D vpMbtPolygon &operator=(const vpMbtPolygon &mbtp); /*! - Set the index of the face. - - \param i : the new index of the face. - */ + * Set the index of the face. + * + * \param i : the new index of the face. + */ virtual inline void setIndex(int i) { index = i; } // Due to a doxygen warning include the sample code in the doc, we remove // the inline and put the doc in the *.cpp file void setLod(bool use_lod); /*! - Set the threshold for the minimum line length to be considered as visible - in the LOD (level of detail) case. This threshold is only used when - setLoD() is turned on. - - \param min_line_length : threshold for the minimum line length in pixel. - When a single line that doesn't belong to a face is considered by the - tracker, this line is tracked only if its length in pixel is greater than - \e min_line_length. - - \sa setLoD() + * Set the threshold for the minimum line length to be considered as visible + * in the LOD (level of detail) case. This threshold is only used when + * setLoD() is turned on. + * + * \param min_line_length : threshold for the minimum line length in pixel. + * When a single line that doesn't belong to a face is considered by the + * tracker, this line is tracked only if its length in pixel is greater than + * \e min_line_length. + * + * \sa setLoD() */ inline void setMinLineLengthThresh(double min_line_length) { this->minLineLengthThresh = min_line_length; } /*! - Set the minimum polygon area to be considered as visible in the LOD (level - of detail) case. This threshold is only used when setLoD() is turned on. - - \param min_polygon_area : threshold for the minimum polygon area in pixel. - When a face is considered by the tracker, this face is tracked only if its - area in pixel is greater than \e min_polygon_area. - - \sa setLoD() - */ + * Set the minimum polygon area to be considered as visible in the LOD (level + * of detail) case. This threshold is only used when setLoD() is turned on. + * + * \param min_polygon_area : threshold for the minimum polygon area in pixel. + * When a face is considered by the tracker, this face is tracked only if its + * area in pixel is greater than \e min_polygon_area. + * + * \sa setLoD() + */ inline void setMinPolygonAreaThresh(double min_polygon_area) { this->minPolygonAreaThresh = min_polygon_area; } /*! - Set the name of the face. - - \param face_name : name of the face. + * Set the name of the face. + * + * \param face_name : name of the face. */ inline void setName(const std::string &face_name) { this->name = face_name; } /*! - Set if the polygon is oriented or not. - - \param oriented : True if the polygon is oriented, false otherwise. + * Set if the polygon is oriented or not. + * + * \param oriented : True if the polygon is oriented, false otherwise. */ inline void setIsPolygonOriented(const bool &oriented) { this->hasOrientation = oriented; } }; diff --git a/modules/tracker/mbt/src/edge/vpMbtMeEllipse.cpp b/modules/tracker/mbt/src/edge/vpMbtMeEllipse.cpp index 2eb6de1986..e7e81ad3f6 100644 --- a/modules/tracker/mbt/src/edge/vpMbtMeEllipse.cpp +++ b/modules/tracker/mbt/src/edge/vpMbtMeEllipse.cpp @@ -56,10 +56,6 @@ vpMbtMeEllipse::vpMbtMeEllipse() : vpMeEllipse() { } Copy constructor. */ vpMbtMeEllipse::vpMbtMeEllipse(const vpMbtMeEllipse &me_ellipse) : vpMeEllipse(me_ellipse) { } -/*! - Destructor. -*/ -vpMbtMeEllipse::~vpMbtMeEllipse() { } /*! Compute the projection error of the ellipse. diff --git a/modules/tracker/mbt/src/vpMbGenericTracker.cpp b/modules/tracker/mbt/src/vpMbGenericTracker.cpp index 64489dca7c..71b8ee65b0 100644 --- a/modules/tracker/mbt/src/vpMbGenericTracker.cpp +++ b/modules/tracker/mbt/src/vpMbGenericTracker.cpp @@ -5789,8 +5789,6 @@ vpMbGenericTracker::TrackerWrapper::TrackerWrapper(int trackerType) #endif } -vpMbGenericTracker::TrackerWrapper::~TrackerWrapper() { } - // Implemented only for debugging purposes: use TrackerWrapper as a standalone tracker void vpMbGenericTracker::TrackerWrapper::computeVVS(const vpImage *const ptr_I) { diff --git a/modules/tracker/mbt/src/vpMbTracker.cpp b/modules/tracker/mbt/src/vpMbTracker.cpp index 68b7132512..0aa00ffc5f 100644 --- a/modules/tracker/mbt/src/vpMbTracker.cpp +++ b/modules/tracker/mbt/src/vpMbTracker.cpp @@ -1096,7 +1096,7 @@ void vpMbTracker::initFromPose(const vpImage *const I, const vpIm // The six value of the pose vector 0.0000 // \ 0.0000 // | - 1.0000 // | Exemple of value for the pose vector where Z = 1 meter + 1.0000 // | Example of value for the pose vector where Z = 1 meter 0.0000 // | 0.0000 // | 0.0000 // / @@ -1119,7 +1119,7 @@ void vpMbTracker::initFromPose(const vpImage &I, const std::strin // The six value of the pose vector 0.0000 // \ 0.0000 // | - 1.0000 // | Exemple of value for the pose vector where Z = 1 meter + 1.0000 // | Example of value for the pose vector where Z = 1 meter 0.0000 // | 0.0000 // | 0.0000 // / diff --git a/modules/tracker/mbt/src/vpMbtPolygon.cpp b/modules/tracker/mbt/src/vpMbtPolygon.cpp index 4ea7c8485c..5f9a2998c1 100644 --- a/modules/tracker/mbt/src/vpMbtPolygon.cpp +++ b/modules/tracker/mbt/src/vpMbtPolygon.cpp @@ -53,14 +53,13 @@ */ vpMbtPolygon::vpMbtPolygon() : index(-1), isvisible(false), isappearing(false), useLod(false), minLineLengthThresh(50.0), - minPolygonAreaThresh(2500.0), name(""), hasOrientation(true) -{ -} + minPolygonAreaThresh(2500.0), name(""), hasOrientation(true) +{ } vpMbtPolygon::vpMbtPolygon(const vpMbtPolygon &mbtp) : vpPolygon3D(mbtp), index(mbtp.index), isvisible(mbtp.isvisible), isappearing(mbtp.isappearing), useLod(mbtp.useLod), - minLineLengthThresh(mbtp.minLineLengthThresh), minPolygonAreaThresh(mbtp.minPolygonAreaThresh), name(mbtp.name), - hasOrientation(mbtp.hasOrientation) + minLineLengthThresh(mbtp.minLineLengthThresh), minPolygonAreaThresh(mbtp.minPolygonAreaThresh), name(mbtp.name), + hasOrientation(mbtp.hasOrientation) { //*this = mbtp; // Should not be called by copy constructor to avoid multiple // assignements. @@ -81,11 +80,6 @@ vpMbtPolygon &vpMbtPolygon::operator=(const vpMbtPolygon &mbtp) return (*this); } -/*! - Basic destructor. -*/ -vpMbtPolygon::~vpMbtPolygon() {} - /*! Check if the polygon is visible in the image and if the angle between the normal to the face and the line vector going from the optical center to the @@ -215,9 +209,11 @@ bool vpMbtPolygon::isVisible(const vpHomogeneousMatrix &cMo, double alpha, const if (angle < alpha + vpMath::rad(1)) { isappearing = true; - } else if (modulo && (M_PI - angle) < alpha + vpMath::rad(1)) { + } + else if (modulo && (M_PI - angle) < alpha + vpMath::rad(1)) { isappearing = true; - } else { + } + else { isappearing = false; } diff --git a/modules/tracker/me/include/visp3/me/vpMeEllipse.h b/modules/tracker/me/include/visp3/me/vpMeEllipse.h index a15317dcda..cf60d79183 100644 --- a/modules/tracker/me/include/visp3/me/vpMeEllipse.h +++ b/modules/tracker/me/include/visp3/me/vpMeEllipse.h @@ -98,10 +98,11 @@ class VISP_EXPORT vpMeEllipse : public vpMeTracker * Copy constructor. */ vpMeEllipse(const vpMeEllipse &me_ellipse); + /*! * Destructor. */ - virtual ~vpMeEllipse(); + virtual ~vpMeEllipse() override; /*! * Display the ellipse or arc of ellipse * diff --git a/modules/tracker/me/include/visp3/me/vpMeLine.h b/modules/tracker/me/include/visp3/me/vpMeLine.h index 45bca9be7c..fa03bdc306 100644 --- a/modules/tracker/me/include/visp3/me/vpMeLine.h +++ b/modules/tracker/me/include/visp3/me/vpMeLine.h @@ -181,7 +181,7 @@ class VISP_EXPORT vpMeLine : public vpMeTracker /*! * Destructor. */ - virtual ~vpMeLine(); + virtual ~vpMeLine() override; /*! * Display line. diff --git a/modules/tracker/me/include/visp3/me/vpMeNurbs.h b/modules/tracker/me/include/visp3/me/vpMeNurbs.h index 87c6e9bba9..c6f9d5ec28 100644 --- a/modules/tracker/me/include/visp3/me/vpMeNurbs.h +++ b/modules/tracker/me/include/visp3/me/vpMeNurbs.h @@ -164,11 +164,6 @@ class VISP_EXPORT vpMeNurbs : public vpMeTracker */ vpMeNurbs(const vpMeNurbs &menurbs); - /*! - * Destructor. - */ - virtual ~vpMeNurbs(); - /*! * Sets the number of control points used to compute the Nurbs. * diff --git a/modules/tracker/me/include/visp3/me/vpMeTracker.h b/modules/tracker/me/include/visp3/me/vpMeTracker.h index 24df003659..743799af19 100644 --- a/modules/tracker/me/include/visp3/me/vpMeTracker.h +++ b/modules/tracker/me/include/visp3/me/vpMeTracker.h @@ -100,7 +100,7 @@ class VISP_EXPORT vpMeTracker : public vpTracker /*! * Destructor. */ - virtual ~vpMeTracker(); + virtual ~vpMeTracker() override; /** @name Public Member Functions Inherited from vpMeTracker */ //@{ diff --git a/modules/tracker/me/include/visp3/me/vpNurbs.h b/modules/tracker/me/include/visp3/me/vpNurbs.h index 09c7170d5d..ee9fad05f7 100644 --- a/modules/tracker/me/include/visp3/me/vpNurbs.h +++ b/modules/tracker/me/include/visp3/me/vpNurbs.h @@ -167,10 +167,6 @@ class VISP_EXPORT vpNurbs : public vpBSpline */ vpNurbs(const vpNurbs &nurbs); - /*! - * Destructor. - */ - virtual ~vpNurbs(); /*! * Gets all the weights relative to the control points. diff --git a/modules/tracker/me/src/moving-edges/vpMeNurbs.cpp b/modules/tracker/me/src/moving-edges/vpMeNurbs.cpp index 041f51cb5e..d6c28a541b 100644 --- a/modules/tracker/me/src/moving-edges/vpMeNurbs.cpp +++ b/modules/tracker/me/src/moving-edges/vpMeNurbs.cpp @@ -198,8 +198,6 @@ vpMeNurbs::vpMeNurbs(const vpMeNurbs &menurbs) cannyTh2 = menurbs.cannyTh2; } -vpMeNurbs::~vpMeNurbs() { } - void vpMeNurbs::initTracking(const vpImage &I) { std::list ptList; diff --git a/modules/tracker/me/src/moving-edges/vpNurbs.cpp b/modules/tracker/me/src/moving-edges/vpNurbs.cpp index ec9b0ec379..c253f55ea7 100644 --- a/modules/tracker/me/src/moving-edges/vpNurbs.cpp +++ b/modules/tracker/me/src/moving-edges/vpNurbs.cpp @@ -50,8 +50,6 @@ vpNurbs::vpNurbs() : weights() { p = 3; } vpNurbs::vpNurbs(const vpNurbs &nurbs) : vpBSpline(nurbs), weights(nurbs.weights) { } -vpNurbs::~vpNurbs() { } - vpImagePoint vpNurbs::computeCurvePoint(double l_u, unsigned int l_i, unsigned int l_p, std::vector &l_knots, std::vector &l_controlPoints, std::vector &l_weights) { diff --git a/modules/tracker/me/test/testNurbs.cpp b/modules/tracker/me/test/testNurbs.cpp index 97dc6af1b6..4eceb74edc 100644 --- a/modules/tracker/me/test/testNurbs.cpp +++ b/modules/tracker/me/test/testNurbs.cpp @@ -28,7 +28,7 @@ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * Description: - * Exemple of a Nurbs curve. + * Example of a Nurbs curve. */ /*! \example testNurbs.cpp diff --git a/modules/tracker/tt/include/visp3/tt/vpTemplateTrackerWarpHomographySL3.h b/modules/tracker/tt/include/visp3/tt/vpTemplateTrackerWarpHomographySL3.h index 634901ac68..9a531b5efb 100644 --- a/modules/tracker/tt/include/visp3/tt/vpTemplateTrackerWarpHomographySL3.h +++ b/modules/tracker/tt/include/visp3/tt/vpTemplateTrackerWarpHomographySL3.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,17 +29,13 @@ * * Description: * Template tracker. - * - * Authors: - * Amaury Dame - * Aurelien Yol - * -*****************************************************************************/ + */ + /*! - \file vpTemplateTrackerWarpHomographySL3.h - \brief warping function of an homography: the homography is defined on the - sl3 lie algebra H=exp(Sum(p[i]* A_i)) A_i is the basis of the SL3 Algebra -*/ + *\file vpTemplateTrackerWarpHomographySL3.h + *\brief warping function of an homography: the homography is defined on the + *sl3 lie algebra H=exp(Sum(p[i]* A_i)) A_i is the basis of the SL3 Algebra + */ #ifndef vpTemplateTrackerWarpHomographySL3_hh #define vpTemplateTrackerWarpHomographySL3_hh @@ -51,9 +46,9 @@ #include /*! - \class vpTemplateTrackerWarpHomographySL3 - \ingroup group_tt_warp -*/ + * \class vpTemplateTrackerWarpHomographySL3 + * \ingroup group_tt_warp + */ class VISP_EXPORT vpTemplateTrackerWarpHomographySL3 : public vpTemplateTrackerWarp { protected: @@ -63,7 +58,6 @@ class VISP_EXPORT vpTemplateTrackerWarpHomographySL3 : public vpTemplateTrackerW public: vpTemplateTrackerWarpHomographySL3(); - virtual ~vpTemplateTrackerWarpHomographySL3(); void computeCoeff(const vpColVector &p); void computeDenom(vpColVector &X, const vpColVector &); @@ -95,7 +89,7 @@ class VISP_EXPORT vpTemplateTrackerWarpHomographySL3 : public vpTemplateTrackerW void warpX(const int &v1, const int &u1, double &v2, double &u2, const vpColVector &); #ifndef DOXYGEN_SHOULD_SKIP_THIS - void warpXInv(const vpColVector &, vpColVector &, const vpColVector &) {} + void warpXInv(const vpColVector &, vpColVector &, const vpColVector &) { } #endif }; #endif diff --git a/modules/tracker/tt/src/warp/vpTemplateTrackerWarpHomographySL3.cpp b/modules/tracker/tt/src/warp/vpTemplateTrackerWarpHomographySL3.cpp index 9dcf5fdecd..666ee5827b 100644 --- a/modules/tracker/tt/src/warp/vpTemplateTrackerWarpHomographySL3.cpp +++ b/modules/tracker/tt/src/warp/vpTemplateTrackerWarpHomographySL3.cpp @@ -109,8 +109,9 @@ void vpTemplateTrackerWarpHomographySL3::findWarp(const double *ut0, const doubl vpMatrix::computeHLM(H, lambda, HLM); try { dp = HLM.inverseByLU() * G_; - } catch (const vpException &e) { - // std::cout<<"Cannot inverse the matrix by LU "< /*! - \class vpTemplateTrackerMI - \ingroup group_tt_mi_tracker -*/ + * \class vpTemplateTrackerMI + * \ingroup group_tt_mi_tracker + */ class VISP_EXPORT vpTemplateTrackerMI : public vpTemplateTracker { public: /*! Hessian approximation. */ - typedef enum { + typedef enum + { HESSIAN_NONSECOND = -1, HESSIAN_0, HESSIAN_d2I, @@ -120,12 +115,10 @@ class VISP_EXPORT vpTemplateTrackerMI : public vpTemplateTracker void computeMI(double &MI); void computeProba(int &nbpoint); - double getCost(const vpImage &I, const vpColVector &tp); + double getCost(const vpImage &I, const vpColVector &tp) override; double getCost(const vpImage &I) { return getCost(I, p); } double getNormalizedCost(const vpImage &I, const vpColVector &tp); double getNormalizedCost(const vpImage &I) { return getNormalizedCost(I, p); } - virtual void initHessienDesired(const vpImage &I) = 0; - virtual void trackNoPyr(const vpImage &I) = 0; void zeroProbabilities(); // private: @@ -149,18 +142,16 @@ class VISP_EXPORT vpTemplateTrackerMI : public vpTemplateTracker //#endif public: - // constructeur //! Default constructor. vpTemplateTrackerMI() : vpTemplateTracker(), hessianComputation(USE_HESSIEN_NORMAL), ApproxHessian(HESSIAN_0), lambda(0), temp(NULL), - Prt(NULL), dPrt(NULL), Pt(NULL), Pr(NULL), d2Prt(NULL), PrtTout(NULL), dprtemp(NULL), PrtD(NULL), dPrtD(NULL), - influBspline(0), bspline(0), Nc(0), Ncb(0), d2Ix(), d2Iy(), d2Ixy(), MI_preEstimation(0), MI_postEstimation(0), - NMI_preEstimation(0), NMI_postEstimation(0), covarianceMatrix(), computeCovariance(false), m_du(), m_dv(), m_A(), - m_dB(), m_d2u(), m_d2v(), m_dA() - { - } + Prt(NULL), dPrt(NULL), Pt(NULL), Pr(NULL), d2Prt(NULL), PrtTout(NULL), dprtemp(NULL), PrtD(NULL), dPrtD(NULL), + influBspline(0), bspline(0), Nc(0), Ncb(0), d2Ix(), d2Iy(), d2Ixy(), MI_preEstimation(0), MI_postEstimation(0), + NMI_preEstimation(0), NMI_postEstimation(0), covarianceMatrix(), computeCovariance(false), m_du(), m_dv(), m_A(), + m_dB(), m_d2u(), m_d2v(), m_dA() + { } explicit vpTemplateTrackerMI(vpTemplateTrackerWarp *_warp); - virtual ~vpTemplateTrackerMI(); + virtual ~vpTemplateTrackerMI() override; vpMatrix getCovarianceMatrix() const { return covarianceMatrix; } double getMI() const { return MI_postEstimation; } double getMI(const vpImage &I, int &nc, const int &bspline, vpColVector &tp); diff --git a/modules/vision/include/visp3/vision/vpHomography.h b/modules/vision/include/visp3/vision/vpHomography.h index 6811dd0c98..5aa9f40b58 100644 --- a/modules/vision/include/visp3/vision/vpHomography.h +++ b/modules/vision/include/visp3/vision/vpHomography.h @@ -183,8 +183,6 @@ class VISP_EXPORT vpHomography : public vpArray2D vpHomography(const vpThetaUVector &tu, const vpTranslationVector &atb, const vpPlane &bP); //! Construction from translation and rotation and a plane. vpHomography(const vpPoseVector &arb, const vpPlane &bP); - //! Destructor. - virtual ~vpHomography() { }; //! Construction from translation and rotation and a plane void buildFrom(const vpRotationMatrix &aRb, const vpTranslationVector &atb, const vpPlane &bP); diff --git a/modules/vision/include/visp3/vision/vpPoseFeatures.h b/modules/vision/include/visp3/vision/vpPoseFeatures.h index 21f7ff7691..2f2ca927b4 100644 --- a/modules/vision/include/visp3/vision/vpPoseFeatures.h +++ b/modules/vision/include/visp3/vision/vpPoseFeatures.h @@ -305,20 +305,20 @@ class vpPoseSpecificFeatureTemplate : public vpPoseSpecificFeature m_tuple = new std::tuple(args...); } - virtual ~vpPoseSpecificFeatureTemplate() { delete m_tuple; } + virtual ~vpPoseSpecificFeatureTemplate() override { delete m_tuple; } - virtual void createDesired() { buildDesiredFeatureWithTuple(m_desiredFeature, func_ptr, *m_tuple); } + virtual void createDesired() override { buildDesiredFeatureWithTuple(m_desiredFeature, func_ptr, *m_tuple); } - virtual vpColVector error() + virtual vpColVector error() override { // std::cout << "Getting S... : " << std::get<0>(*tuple).get_s() << // std::endl; return m_currentFeature.error(m_desiredFeature); } - virtual vpMatrix currentInteraction() { return m_currentFeature.interaction(); } + virtual vpMatrix currentInteraction() override { return m_currentFeature.interaction(); } - virtual void createCurrent(const vpHomogeneousMatrix &cMo) + virtual void createCurrent(const vpHomogeneousMatrix &cMo) override { buildCurrentFeatureWithTuple(m_currentFeature, cMo, func_ptr, *m_tuple); } @@ -353,15 +353,15 @@ class vpPoseSpecificFeatureTemplateObject : public vpPoseSpecificFeature m_obj = o; } - virtual ~vpPoseSpecificFeatureTemplateObject() { delete m_tuple; } + virtual ~vpPoseSpecificFeatureTemplateObject() override { delete m_tuple; } - virtual void createDesired() { buildDesiredFeatureObjectWithTuple(m_obj, m_desiredFeature, func_ptr, *m_tuple); } + virtual void createDesired() override { buildDesiredFeatureObjectWithTuple(m_obj, m_desiredFeature, func_ptr, *m_tuple); } - virtual vpColVector error() { return m_currentFeature.error(m_desiredFeature); } + virtual vpColVector error() override { return m_currentFeature.error(m_desiredFeature); } - virtual vpMatrix currentInteraction() { return m_currentFeature.interaction(); } + virtual vpMatrix currentInteraction() override { return m_currentFeature.interaction(); } - virtual void createCurrent(const vpHomogeneousMatrix &cMo) + virtual void createCurrent(const vpHomogeneousMatrix &cMo) override { buildCurrentFeatureObjectWithTuple(m_obj, m_currentFeature, cMo, func_ptr, *m_tuple); } diff --git a/modules/visual_features/include/visp3/visual_features/vpBasicFeature.h b/modules/visual_features/include/visp3/visual_features/vpBasicFeature.h index 486199e0ba..7c61681309 100644 --- a/modules/visual_features/include/visp3/visual_features/vpBasicFeature.h +++ b/modules/visual_features/include/visp3/visual_features/vpBasicFeature.h @@ -69,21 +69,20 @@ // #define FEATURE_LINE8 0x80 /*! - \class vpBasicFeature - \ingroup group_core_features - \brief class that defines what is a visual feature -*/ + * \class vpBasicFeature + * \ingroup group_core_features + * \brief class that defines what is a visual feature + */ class VISP_EXPORT vpBasicFeature { -public: // Public constantes +public: static const unsigned int FEATURE_LINE[32]; enum { FEATURE_ALL = 0xffff }; /*! - \enum vpBasicFeatureDeallocatorType - Indicates who should deallocate the feature. - - */ + * \enum vpBasicFeatureDeallocatorType + * Indicates who should deallocate the feature. + */ typedef enum { user, vpServo } vpBasicFeatureDeallocatorType; protected: @@ -92,7 +91,7 @@ class VISP_EXPORT vpBasicFeature //! Dimension of the visual feature. unsigned int dim_s; // int featureLine[8] ; - //! Ensure that all the parameters needed to compute the iteraction matrix + //! Ensure that all the parameters needed to compute the interaction matrix //! are set. bool *flags; //! Number of parameters needed to compute the interaction matrix. diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureDepth.h b/modules/visual_features/include/visp3/visual_features/vpFeatureDepth.h index d8fd609aff..7b4e1ae37f 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureDepth.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureDepth.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * 2D point visual feature. - * -*****************************************************************************/ + */ #ifndef vpFeatureDepth_H #define vpFeatureDepth_H /*! - \file vpFeatureDepth.h - \brief Class that defines 3D point visual feature -*/ + * \file vpFeatureDepth.h + * \brief Class that defines 3D point visual feature + */ #include #include @@ -48,112 +46,111 @@ #include /*! - \class vpFeatureDepth - \ingroup group_visual_features - - \brief Class that defines a 3D point visual feature \f$ s\f$ which - is composed by one parameters that is \f$ log( \frac{Z}{Z^*}) \f$ - that defines the current depth relative to the desired depth. Here - \f$ Z \f$ represents the current depth and \f$ Z^* \f$ the desired - depth. - - In this class \f$ x \f$ and \f$ y \f$ are the 2D coordinates in the - camera frame and are given in meter. \f$ x \f$, \f$ y \f$ and \f$ Z - \f$ are needed during the computation of the interaction matrix \f$ - L \f$. - - The visual features can be set easily thanks to the buildFrom() method. - - As the visual feature \f$ s \f$ represents the current depth - relative to the desired depth, the desired visual feature \f$ s^* - \f$ is set to zero. Once the value of the visual feature is set, the - interaction() method allows to compute the interaction matrix \f$ L - \f$ associated to the visual feature, while the error() method - computes the error vector \f$(s - s^*)\f$ between the current visual - feature and the desired one which is here set to zero. - - The code below shows how to create a eye-in hand visual servoing - task using a 3D depth feature \f$ log( \frac{Z}{Z^*}) \f$ that - corresponds to the current depth relative to the desired depth. To - control six degrees of freedom, at least five other features must be - considered. First we create a current (\f$s\f$) 3D depth - feature. Then we set the task to use the interaction matrix - associated to the current feature \f$L_s\f$. And finally we compute - the camera velocity \f$v=-\lambda \; L_s^+ \; (s-s^*)\f$. The - current feature \f$s\f$ is updated in the while() loop. - - \code -#include -#include - -int main() -{ - vpServo task; // Visual servoing task - - vpFeatureDepth s; //The current point feature. - //Set the current parameters x, y, Z and the desired depth Zs - double x; //You have to compute the value of x. - double y; //You have to compute the value of y. - double Z; //You have to compute the value of Z. - double Zs; //You have to define the desired depth Zs. - //Set the point feature thanks to the current parameters. - s.buildfrom(x, y, Z, log(Z/Zs)); - - // Set eye-in-hand control law. - // The computed velocities will be expressed in the camera frame - task.setServo(vpServo::EYEINHAND_CAMERA); - // Interaction matrix is computed with the desired visual features sd - task.setInteractionMatrixType(vpServo::CURRENT); - - // Add the 3D depth feature to the task - task.addFeature(s); // s* is here considered as zero - - // Control loop - for ( ; ; ) { - // The new parameters x, y and Z must be computed here. - - // Update the current point visual feature - s.buildfrom(x, y, Z, log(Z/Zs)); - - // compute the control law - vpColVector v = task.computeControlLaw(); // camera velocity - } - return 0; -} - \endcode - - If you want to build your own control law, this other example shows how - to create a current (\f$s\f$) and desired (\f$s^*\f$) 2D point visual - feature, compute the corresponding error vector \f$(s-s^*)\f$ and finally - build the interaction matrix \f$L_s\f$. - - \code -#include -#include -#include - -int main() -{ - vpFeatureDepth s; //The current point feature. - //Set the current parameters x, y, Z and the desired depth Zs - double x; //You have to compute the value of x. - double y; //You have to compute the value of y. - double Z; //You have to compute the value of Z. - double Zs; //You have to define the desired depth Zs. - //Set the point feature thanks to the current parameters. - s.buildfrom(x, y, Z, log(Z/Zs)); - - // Compute the interaction matrix L_s for the current point feature - vpMatrix L = s.interaction(); - - // Compute the error vector (s-s*) for the point feature with s* considered as 0. - vpColVector s_star(1); // The dimension is 1. - s_star(1) = 0; // The value of s* is 0. - s.error(s_star); -} - \endcode -*/ - + * \class vpFeatureDepth + * \ingroup group_visual_features + * + * \brief Class that defines a 3D point visual feature \f$ s\f$ which + * is composed by one parameters that is \f$ log( \frac{Z}{Z^*}) \f$ + * that defines the current depth relative to the desired depth. Here + * \f$ Z \f$ represents the current depth and \f$ Z^* \f$ the desired + * depth. + * + * In this class \f$ x \f$ and \f$ y \f$ are the 2D coordinates in the + * camera frame and are given in meter. \f$ x \f$, \f$ y \f$ and \f$ Z + * \f$ are needed during the computation of the interaction matrix \f$ + * L \f$. + * + * The visual features can be set easily thanks to the buildFrom() method. + * + * As the visual feature \f$ s \f$ represents the current depth + * relative to the desired depth, the desired visual feature \f$ s^* + * \f$ is set to zero. Once the value of the visual feature is set, the + * interaction() method allows to compute the interaction matrix \f$ L + * \f$ associated to the visual feature, while the error() method + * computes the error vector \f$(s - s^*)\f$ between the current visual + * feature and the desired one which is here set to zero. + * + * The code below shows how to create a eye-in hand visual servoing + * task using a 3D depth feature \f$ log( \frac{Z}{Z^*}) \f$ that + * corresponds to the current depth relative to the desired depth. To + * control six degrees of freedom, at least five other features must be + * considered. First we create a current (\f$s\f$) 3D depth + * feature. Then we set the task to use the interaction matrix + * associated to the current feature \f$L_s\f$. And finally we compute + * the camera velocity \f$v=-\lambda \; L_s^+ \; (s-s^*)\f$. The + * current feature \f$s\f$ is updated in the while() loop. + * + * \code + * #include + * #include + * + * int main() + * { + * vpServo task; // Visual servoing task + * + * vpFeatureDepth s; //The current point feature. + * //Set the current parameters x, y, Z and the desired depth Zs + * double x; // You have to compute the value of x. + * double y; // You have to compute the value of y. + * double Z; // You have to compute the value of Z. + * double Zs; // You have to define the desired depth Zs. + * //Set the point feature thanks to the current parameters. + * s.buildfrom(x, y, Z, log(Z/Zs)); + * + * // Set eye-in-hand control law. + * // The computed velocities will be expressed in the camera frame + * task.setServo(vpServo::EYEINHAND_CAMERA); + * // Interaction matrix is computed with the desired visual features sd + * task.setInteractionMatrixType(vpServo::CURRENT); + * + * // Add the 3D depth feature to the task + * task.addFeature(s); // s* is here considered as zero + * + * // Control loop + * for ( ; ; ) { + * // The new parameters x, y and Z must be computed here. + * + * // Update the current point visual feature + * s.buildfrom(x, y, Z, log(Z/Zs)); + * + * // compute the control law + * vpColVector v = task.computeControlLaw(); // camera velocity + * } + * return 0; + * } + * \endcode + * + * If you want to build your own control law, this other example shows how + * to create a current (\f$s\f$) and desired (\f$s^*\f$) 2D point visual + * feature, compute the corresponding error vector \f$(s-s^*)\f$ and finally + * build the interaction matrix \f$L_s\f$. + * + * \code + * #include + * #include + * #include + * + * int main() + * { + * vpFeatureDepth s; //The current point feature. + * //Set the current parameters x, y, Z and the desired depth Zs + * double x; // You have to compute the value of x. + * double y; // You have to compute the value of y. + * double Z; // You have to compute the value of Z. + * double Zs; // You have to define the desired depth Zs. + * //Set the point feature thanks to the current parameters. + * s.buildfrom(x, y, Z, log(Z/Zs)); + * + * // Compute the interaction matrix L_s for the current point feature + * vpMatrix L = s.interaction(); + * + * // Compute the error vector (s-s*) for the point feature with s* considered as 0. + * vpColVector s_star(1); // The dimension is 1. + * s_star(1) = 0; // The value of s* is 0. + * s.error(s_star); + * } + * \endcode + */ class VISP_EXPORT vpFeatureDepth : public vpBasicFeature { @@ -170,8 +167,6 @@ class VISP_EXPORT vpFeatureDepth : public vpBasicFeature public: vpFeatureDepth(); - //! Destructor. - virtual ~vpFeatureDepth() {} /* section Set coordinates @@ -180,11 +175,11 @@ class VISP_EXPORT vpFeatureDepth : public vpBasicFeature void buildFrom(double x, double y, double Z, double LogZoverZstar); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; - vpFeatureDepth *duplicate() const; - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); + unsigned int thickness = 1) const override; + vpFeatureDepth *duplicate() const override; + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; double get_x() const; @@ -194,10 +189,10 @@ class VISP_EXPORT vpFeatureDepth : public vpBasicFeature double get_LogZoverZstar() const; - void init(); - vpMatrix interaction(unsigned int select = FEATURE_ALL); + void init() override; + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; - void print(unsigned int select = FEATURE_ALL) const; + void print(unsigned int select = FEATURE_ALL) const override; void set_x(double x); void set_y(double y); diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureEllipse.h b/modules/visual_features/include/visp3/visual_features/vpFeatureEllipse.h index a6c0381932..6dc382d56b 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureEllipse.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureEllipse.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * 2D ellipse visual feature. - * -*****************************************************************************/ + */ #ifndef vpFeatureEllipse_H #define vpFeatureEllipse_H /*! - \file vpFeatureEllipse.h - \brief Class that defines 2D ellipse visual feature -*/ + * \file vpFeatureEllipse.h + * \brief Class that defines 2D ellipse visual feature + */ #include #include @@ -48,10 +46,10 @@ #include /*! - \class vpFeatureEllipse - \ingroup group_visual_features - \brief Class that defines 2D ellipse visual feature. -*/ + * \class vpFeatureEllipse + * \ingroup group_visual_features + * \brief Class that defines 2D ellipse visual feature. + */ class VISP_EXPORT vpFeatureEllipse : public vpBasicFeature { /* @@ -66,8 +64,6 @@ class VISP_EXPORT vpFeatureEllipse : public vpBasicFeature public: //! Default constructor. vpFeatureEllipse(); - //! Destructor. - virtual ~vpFeatureEllipse() { } /*! \section Set coordinates @@ -80,16 +76,15 @@ class VISP_EXPORT vpFeatureEllipse : public vpBasicFeature void buildFrom(double x, double y, double n20, double n11, double n02, double A, double B, double C); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; //! Feature duplication - vpFeatureEllipse *duplicate() const; + vpFeatureEllipse *duplicate() const override; //! compute the error between two visual features from a subset //! a the possible features - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); - + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; /*! * Returns the visual feature corresponding to the ellipse centroid coordinate along camera x-axis. @@ -116,12 +111,12 @@ class VISP_EXPORT vpFeatureEllipse : public vpBasicFeature double get_n02() const { return s[4]; } //! Default initialization. - void init(); + void init() override; //! compute the interaction matrix from a subset a the possible features - vpMatrix interaction(unsigned int select = FEATURE_ALL); + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; - //! print the name of the feature - void print(unsigned int select = FEATURE_ALL) const; + //! Print the name of the feature + void print(unsigned int select = FEATURE_ALL) const override; void set_x(double x); void set_y(double y); diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureLine.h b/modules/visual_features/include/visp3/visual_features/vpFeatureLine.h index f41777f395..807b732fa2 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureLine.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureLine.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * 2D line visual feature. - * -*****************************************************************************/ + */ #ifndef _vpFeatureLine_h_ #define _vpFeatureLine_h_ /*! - \file vpFeatureLine.h - \brief Class that defines 2D line visual feature -*/ + * \file vpFeatureLine.h + * \brief Class that defines 2D line visual feature + */ #include #include @@ -48,191 +46,185 @@ #include /*! - \class vpFeatureLine - \ingroup group_visual_features - - \brief Class that defines a 2D line visual feature \f$ s\f$ which is - composed by two parameters that are \f$ \rho \f$ and \f$ \theta \f$, - the polar coordinates of a line. - - In this class, the equation of the line in the image plane is given by : - \f[ x \; cos(\theta) + y \; sin(\theta) -\rho = 0 \f] Here - \f$ x \f$ and \f$ y \f$ are the coordinates of a point belonging to - the line and they are given in meter. The following image shows the - meanings of the distance \f$\rho\f$ and the angle \f$\theta\f$. - - \image html vpFeatureLine.gif - \image latex vpFeatureLine.ps width=10cm - - You have to note that the \f$ \theta \f$ angle has its value between - \f$ -\pi \f$ and \f$ \pi \f$ and that the \f$ \rho \f$ distance can - be positive or negative. The conventions are illustrated by the - image above. - - The visual features can be set easily from an instance of the - classes vpLine, vpMeLine or vpCylinder. For more precision see the - class vpFeatureBuilder. - - Once the values of the visual features are set, the interaction() - method allows to compute the interaction matrix \f$ L \f$ associated - to the visual feature, while the error() method computes the error - vector \f$(s - s^*)\f$ between the current visual feature and the - desired one. - - The code below shows how to create a eye-in hand visual servoing - task using a 2D line feature \f$(\rho,\theta)\f$ that correspond to - the 2D equation of a line in the image plan. To control six - degrees of freedom, at least four other features must be considered - like two other line features for example. First we create a current - (\f$s\f$) 2D line feature. Then we set the task to use the - interaction matrix associated to the current feature \f$L_s\f$. And - finally we compute the camera velocity \f$v=-\lambda \; L_s^+ \; - (s-s^*)\f$. The current feature \f$s\f$ is updated in the while() - loop. - - \code -#include -#include - -int main() -{ - vpServo task; // Visual servoing task - - vpFeatureLine sd; //The desired line feature. - // Sets the desired features rho and theta - double rhod = 0; - double thetad = 0; - // Sets the parameters which describe the equation of a plane in the camera frame : AX+BY+CZ+D=0. - // The line described by the features belongs to this plan. - // Normally two plans are needed to describe a line. But to compute the interaction matrix only - // one equation of the two plans is needed. - // Notes that the Dd value must not be equal to zero ! - double Ad = 0; - double Bd = 0; - double Cd = 1; - double Dd = -1; - // Set the line feature thanks to the desired parameters. - sd.buildfrom(rhod, thetad, Ad,Bd, Cd, Dd); - - vpFeatureLine s; //The current line feature. - // Sets the current features rho and theta - double rho; // You have to compute the value of rho. - double theta; // You have to compute the value of theta. - // Set the line feature thanks to the current parameters. - s.buildfrom(rho, theta); - // In this case the parameters A, B, C, D are not needed because the interaction matrix is computed - // with the desired visual feature. - - // Set eye-in-hand control law. - // The computed velocities will be expressed in the camera frame - task.setServo(vpServo::EYEINHAND_CAMERA); - // Interaction matrix is computed with the desired visual features sd - task.setInteractionMatrixType(vpServo::DESIRED); - - // Add the 2D line feature to the task - task.addFeature(s, sd); - - // Control loop - for ( ; ; ) { - // The new parameters rho and theta must be computed here. - - // Update the current line visual feature - s.buildfrom(rho, theta); - - // Compute the control law - vpColVector v = task.computeControlLaw(); // camera velocity - } - return 0; -} - \endcode - - If you want to build your own control law, this other example shows how to -create a current (\f$s\f$) and desired (\f$s^*\f$) 2D line visual feature, -compute the corresponding error vector \f$(s-s^*)\f$ and finally build the -interaction matrix \f$L_s\f$. - - \code -#include -#include - -int main() -{ - vpFeatureLine sd; //The desired line feature. - // Sets the desired features rho and theta - double rhod = 0; - double thetad = 0; - // Sets the parameters which describe the equation of a plane in the camera frame : AX+BY+CZ+D=0. - double Ad = 0; double Bd = 0; double Cd = 1; double Dd = -1; - // Set the line feature thanks to the desired parameters. - sd.buildfrom(rhod, thetad, Ad,Bd, Cd, Dd); - - vpFeatureLine s; // The current line feature. - // Sets the current features rho and theta - double rho; // You have to compute the value of rho. - double theta; // You have to compute the value of theta. - // Sets the parameters which describe the equation of a plane in the camera frame : AX+BY+CZ+D=0. - double A; // You have to compute the value of A. - double B; // You have to compute the value of B. - double C; // You have to compute the value of C. - double D; // You have to compute the value of D. D must not be equal to zero ! - // Set the line feature thanks to the current parameters. - s.buildfrom(rho, theta, A, B, C, D); - - // Compute the interaction matrix L_s for the current line feature - vpMatrix L = s.interaction(); - // You can also compute the interaction matrix L_s for the desired line feature - // The corresponding line of code is : vpMatrix L = sd.interaction(); - - // Compute the error vector (s-sd) for the line feature - s.error(s_star); -} - \endcode - -*/ + * \class vpFeatureLine + * \ingroup group_visual_features + * + * \brief Class that defines a 2D line visual feature \f$ s\f$ which is + * composed by two parameters that are \f$ \rho \f$ and \f$ \theta \f$, + * the polar coordinates of a line. + * + * In this class, the equation of the line in the image plane is given by : + * \f[ x \; cos(\theta) + y \; sin(\theta) -\rho = 0 \f] Here + * \f$ x \f$ and \f$ y \f$ are the coordinates of a point belonging to + * the line and they are given in meter. The following image shows the + * meanings of the distance \f$\rho\f$ and the angle \f$\theta\f$. + * + * \image html vpFeatureLine.gif + * \image latex vpFeatureLine.ps width=10cm + * + * You have to note that the \f$ \theta \f$ angle has its value between + * \f$ -\pi \f$ and \f$ \pi \f$ and that the \f$ \rho \f$ distance can + * be positive or negative. The conventions are illustrated by the + * image above. + * + * The visual features can be set easily from an instance of the + * classes vpLine, vpMeLine or vpCylinder. For more precision see the + * class vpFeatureBuilder. + * + * Once the values of the visual features are set, the interaction() + * method allows to compute the interaction matrix \f$ L \f$ associated + * to the visual feature, while the error() method computes the error + * vector \f$(s - s^*)\f$ between the current visual feature and the + * desired one. + * + * The code below shows how to create a eye-in hand visual servoing + * task using a 2D line feature \f$(\rho,\theta)\f$ that correspond to + * the 2D equation of a line in the image plan. To control six + * degrees of freedom, at least four other features must be considered + * like two other line features for example. First we create a current + * (\f$s\f$) 2D line feature. Then we set the task to use the + * interaction matrix associated to the current feature \f$L_s\f$. And + * finally we compute the camera velocity \f$v=-\lambda \; L_s^+ \; + * (s-s^*)\f$. The current feature \f$s\f$ is updated in the while() + * loop. + * + * \code + * #include + * #include + * + * int main() + * { + * vpServo task; // Visual servoing task + * + * vpFeatureLine sd; //The desired line feature. + * // Sets the desired features rho and theta + * double rhod = 0; + * double thetad = 0; + * // Sets the parameters which describe the equation of a plane in the camera frame : AX+BY+CZ+D=0. + * // The line described by the features belongs to this plan. + * // Normally two plans are needed to describe a line. But to compute the interaction matrix only + * // one equation of the two plans is needed. + * // Notes that the Dd value must not be equal to zero ! + * double Ad = 0; + * double Bd = 0; + * double Cd = 1; + * double Dd = -1; + * // Set the line feature thanks to the desired parameters. + * sd.buildfrom(rhod, thetad, Ad,Bd, Cd, Dd); + * + * vpFeatureLine s; //The current line feature. + * // Sets the current features rho and theta + * double rho; // You have to compute the value of rho. + * double theta; // You have to compute the value of theta. + * // Set the line feature thanks to the current parameters. + * s.buildfrom(rho, theta); + * // In this case the parameters A, B, C, D are not needed because the interaction matrix is computed + * // with the desired visual feature. + * + * // Set eye-in-hand control law. + * // The computed velocities will be expressed in the camera frame + * task.setServo(vpServo::EYEINHAND_CAMERA); + * // Interaction matrix is computed with the desired visual features sd + * task.setInteractionMatrixType(vpServo::DESIRED); + * + * // Add the 2D line feature to the task + * task.addFeature(s, sd); + * + * // Control loop + * for ( ; ; ) { + * // The new parameters rho and theta must be computed here. + * + * // Update the current line visual feature + * s.buildfrom(rho, theta); + * + * // Compute the control law + * vpColVector v = task.computeControlLaw(); // camera velocity + * } + * return 0; + * } + * \endcode + * + * If you want to build your own control law, this other example shows how to + * create a current (\f$s\f$) and desired (\f$s^*\f$) 2D line visual feature, + * compute the corresponding error vector \f$(s-s^*)\f$ and finally build the + * interaction matrix \f$L_s\f$. + * + * \code + * #include + * #include + * + * int main() + * { + * vpFeatureLine sd; //The desired line feature. + * // Sets the desired features rho and theta + * double rhod = 0; + * double thetad = 0; + * // Sets the parameters which describe the equation of a plane in the camera frame : AX+BY+CZ+D=0. + * double Ad = 0; double Bd = 0; double Cd = 1; double Dd = -1; + * // Set the line feature thanks to the desired parameters. + * sd.buildfrom(rhod, thetad, Ad,Bd, Cd, Dd); + * + * vpFeatureLine s; // The current line feature. + * // Sets the current features rho and theta + * double rho; // You have to compute the value of rho. + * double theta; // You have to compute the value of theta. + * // Sets the parameters which describe the equation of a plane in the camera frame : AX+BY+CZ+D=0. + * double A; // You have to compute the value of A. + * double B; // You have to compute the value of B. + * double C; // You have to compute the value of C. + * double D; // You have to compute the value of D. D must not be equal to zero ! + * // Set the line feature thanks to the current parameters. + * s.buildfrom(rho, theta, A, B, C, D); + * + * // Compute the interaction matrix L_s for the current line feature + * vpMatrix L = s.interaction(); + * // You can also compute the interaction matrix L_s for the desired line feature + * // The corresponding line of code is : vpMatrix L = sd.interaction(); + * + * // Compute the error vector (s-sd) for the line feature + * s.error(s_star); + * } + * \endcode + */ class VISP_EXPORT vpFeatureLine : public vpBasicFeature { /*! - attributes and members directly related to the vpBasicFeature needs - other functionalities ar useful but not mandatory - */ + * Attributes and members directly related to the vpBasicFeature needs + * other functionalities ar useful but not mandatory + */ private: //! FeatureLine depth (required to compute the interaction matrix) - //! equation of a plane + //! equation of a plane double A, B, C, D; public: vpFeatureLine(); - //! Destructor. - virtual ~vpFeatureLine() { } - // void buildFrom(const vpLine &l) ; - // void buildFrom(const vpCylinder &c, int l) ; void buildFrom(double rho, double theta); void buildFrom(double rho, double theta, double A, double B, double C, double D); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; - vpFeatureLine *duplicate() const; + unsigned int thickness = 1) const override; + vpFeatureLine *duplicate() const override; - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); - // vpColVector error(const int select = FEATURE_ALL) ; + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; /*! - Return the \f$ \rho \f$ subset value of the visual feature \f$ s \f$. - */ + * Return the \f$ \rho \f$ subset value of the visual feature \f$ s \f$. + */ double getRho() const { return s[0]; } /*! - Return the \f$ \theta \f$ subset value of the visual feature \f$ s \f$. - */ + * Return the \f$ \theta \f$ subset value of the visual feature \f$ s \f$. + */ double getTheta() const { return s[1]; } - void init(); - vpMatrix interaction(unsigned int select = FEATURE_ALL); + void init() override; + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; - void print(unsigned int select = FEATURE_ALL) const; + void print(unsigned int select = FEATURE_ALL) const override; void setRhoTheta(double rho, double theta); void setABCD(double A, double B, double C, double D); diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureLuminance.h b/modules/visual_features/include/visp3/visual_features/vpFeatureLuminance.h index 0067c88104..5fc3e56f5b 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureLuminance.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureLuminance.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Luminance based feature. - * -*****************************************************************************/ + */ #ifndef vpFeatureLuminance_h #define vpFeatureLuminance_h @@ -41,20 +39,20 @@ #include /*! - \file vpFeatureLuminance.h - \brief Class that defines the image luminance visual feature - - For more details see \cite Collewet08c. -*/ + * \file vpFeatureLuminance.h + * \brief Class that defines the image luminance visual feature + * + * For more details see \cite Collewet08c. + */ #ifndef DOXYGEN_SHOULD_SKIP_THIS /*! - \class vpLuminance - \brief Class that defines the luminance and gradient of a point - - \sa vpFeatureLuminance -*/ + * \class vpLuminance + * \brief Class that defines the luminance and gradient of a point. + * + * \sa vpFeatureLuminance + */ class VISP_EXPORT vpLuminance { public: @@ -66,13 +64,12 @@ class VISP_EXPORT vpLuminance #endif /*! - \class vpFeatureLuminance - \ingroup group_visual_features - \brief Class that defines the image luminance visual feature - - For more details see \cite Collewet08c. -*/ - + * \class vpFeatureLuminance + * \ingroup group_visual_features + * \brief Class that defines the image luminance visual feature + * + * For more details see \cite Collewet08c. + */ class VISP_EXPORT vpFeatureLuminance : public vpBasicFeature { protected: @@ -95,30 +92,30 @@ class VISP_EXPORT vpFeatureLuminance : public vpBasicFeature vpFeatureLuminance(); vpFeatureLuminance(const vpFeatureLuminance &f); //! Destructor. - virtual ~vpFeatureLuminance(); + virtual ~vpFeatureLuminance() override; void buildFrom(vpImage &I); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; - vpFeatureLuminance *duplicate() const; + vpFeatureLuminance *duplicate() const override; - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; void error(const vpBasicFeature &s_star, vpColVector &e); double get_Z() const; - void init(); + void init() override; void init(unsigned int _nbr, unsigned int _nbc, double _Z); - vpMatrix interaction(unsigned int select = FEATURE_ALL); + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; void interaction(vpMatrix &L); vpFeatureLuminance &operator=(const vpFeatureLuminance &f); - void print(unsigned int select = FEATURE_ALL) const; + void print(unsigned int select = FEATURE_ALL) const override; void setCameraParameters(vpCameraParameters &_cam); void set_Z(double Z); diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMoment.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMoment.h index 9bfe4344da..2517506534 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMoment.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMoment.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,18 +29,15 @@ * * Description: * Base for all moment features - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ -/*! -\file vpFeatureMoment.h -\brief Base class for moment features. + */ -Handles common system operations like selection, duplication. Functionality is -computed in derived classes. -*/ +/*! + * \file vpFeatureMoment.h + * \brief Base class for moment features. + * + * Handles common system operations like selection, duplication. Functionality is + * computed in derived classes. + */ #ifndef _vpFeatureMoment_h_ #define _vpFeatureMoment_h_ @@ -56,106 +52,106 @@ class vpFeatureMomentDatabase; class vpMoment; /*! -\class vpFeatureMoment - -\ingroup group_visual_features - -\brief This class defines shared system methods/attributes for 2D moment -features but no functional code. It is used to compute interaction matrices -for moment features. - -While vpMoment-type classes do only compute moment values and can by used for -almost anything, vpFeatureMoment-type classes are specifically designed for -visual servoing. More importantly, a vpFeatureMoment is used to compute the -interaction matrix associated to it's moment primitive. - -This class is virtual and cannot be used directly. It defines the following -characteristics common to all moment features: -- Plane orientation parameters (A,B,C): -Each camera frame corresponds to a physical planar object contained in a -plane. This plane's equation has the following form: \f$ A \times x+B \times y -+ C = \frac{1}{Z} \f$. These parameters can be updated anytime. -- Get corresponding moment primitive: for example a vpFeatureMomentCInvariant -will provide access to a vpMomentCInvariant instance. -- Provide access to a feature database (vpFeatureMomentDatabase). -- All interaction matrices (different from vpBasicFeature::interaction which -selects the required interaction matrix). - -Like vpMoment, vpFeatureMoment provides a vpFeatureMoment::update() method. -But unlike vpMoment::update() which only acknowledges the new object, the -vpFeatureMoment::update() acknowledges the new plane parameters AND computes -the interaction matrices associated with the feature. - -A vpFeatureMoment will be often part of a vpFeatureMomentDatabase in the same -way a vpMoment is part of a vpMomentDatabase. This database is specified -inside the vpFeatureMoment::vpFeatureMoment() constructor. As a result, a -vpFeatureMoment will be able to access other vpFeatureMoments through this -database. - -A vpBasicFeature can be duplicated into a vpMomentGenericFeature. In that -case, all data in the vpBasicFeature is copied but the feature's name is lost. -For example if a vpFeatureMomentCInvariant is duplicated, the duplicata will -be operational but could not be used in a vpFeatureMomentDatabase. - -Note that you can use vpFeatureMoment to do visual servoing but it is not it's -only purpose. You may compute your interaction matrices with -vpFeatureMoment::update() and use them for any purpose. - -\attention - A vpFeatureMoment is not responsible for updating the moment -primitives it depends on. Make sure your vpMoments are all up to date before -computing an interaction matrix using vpFeatureMoment. - -\attention - Be careful with orders. Often, computing a feature of order n -requires vpMoment primitives of order n+1. Make sure to check the -documentation of the specialised vpFeatureMoment classes when deciding to -which order you want to initialize the object. An object of order 6 should be -sufficient for all classic implementations of vpFeatureMoment. - -Here is an example of how to use a vpFeatureMoment (in this case -vpFeatureMomentBasic). -\code -#include -#include -#include -#include -#include -#include - -int main() -{ - vpPoint p; - std::vector vec_p; // vector that contains the vertices - - p.set_x(1); p.set_y(1); // coordinates in meters in the image plane (vertex 1) - vec_p.push_back(p); - p.set_x(2); p.set_y(2); // coordinates in meters in the image plane (vertex 2) - vec_p.push_back(p); - - //////////////////////////////REFERENCE VALUES//////////////////////////////// - // Init object of order 3 because we need vpFeatureMomentBasic of order 2 which - // implies third-order moment primitives - vpMomentObject obj(3); - obj.setType(vpMomentObject::DISCRETE); // Discrete mode for object - obj.fromVector(vec_p); - - vpMomentDatabase mdb; //database for moment primitives. This will - //only contain the basic moment. - vpMomentBasic bm; //basic moment (this particular moment is nothing - //more than a shortcut to the vpMomentObject) - bm.linkTo(mdb); //add basic moment to moment database - - vpFeatureMomentBasic fmb(mdb,0,0,1,NULL); - - //update and compute the vpMoment BEFORE doing any operations with vpFeatureMoment - bm.update(obj); - bm.compute(); - - fmb.update(0,0,1); //update the vpFeatureMoment with a plane - //configuration - std::cout << fmb.interaction(1,1) << std::endl; -} -\endcode -*/ + * \class vpFeatureMoment + * + * \ingroup group_visual_features + * + * \brief This class defines shared system methods/attributes for 2D moment + * features but no functional code. It is used to compute interaction matrices + * for moment features. + * + * While vpMoment-type classes do only compute moment values and can by used for + * almost anything, vpFeatureMoment-type classes are specifically designed for + * visual servoing. More importantly, a vpFeatureMoment is used to compute the + * interaction matrix associated to it's moment primitive. + * + * This class is virtual and cannot be used directly. It defines the following + * characteristics common to all moment features: + * - Plane orientation parameters (A,B,C): + * Each camera frame corresponds to a physical planar object contained in a + * plane. This plane's equation has the following form: \f$ A \times x+B \times y + * + C = \frac{1}{Z} \f$. These parameters can be updated anytime. + * - Get corresponding moment primitive: for example a vpFeatureMomentCInvariant + * will provide access to a vpMomentCInvariant instance. + * - Provide access to a feature database (vpFeatureMomentDatabase). + * - All interaction matrices (different from vpBasicFeature::interaction which + * selects the required interaction matrix). + * + * Like vpMoment, vpFeatureMoment provides a vpFeatureMoment::update() method. + * But unlike vpMoment::update() which only acknowledges the new object, the + * vpFeatureMoment::update() acknowledges the new plane parameters AND computes + * the interaction matrices associated with the feature. + * + * A vpFeatureMoment will be often part of a vpFeatureMomentDatabase in the same + * way a vpMoment is part of a vpMomentDatabase. This database is specified + * inside the vpFeatureMoment::vpFeatureMoment() constructor. As a result, a + * vpFeatureMoment will be able to access other vpFeatureMoments through this + * database. + * + * A vpBasicFeature can be duplicated into a vpMomentGenericFeature. In that + * case, all data in the vpBasicFeature is copied but the feature's name is lost. + * For example if a vpFeatureMomentCInvariant is duplicated, the duplicate will + * be operational but could not be used in a vpFeatureMomentDatabase. + * + * Note that you can use vpFeatureMoment to do visual servoing but it is not it's + * only purpose. You may compute your interaction matrices with + * vpFeatureMoment::update() and use them for any purpose. + * + * \attention - A vpFeatureMoment is not responsible for updating the moment + * primitives it depends on. Make sure your vpMoments are all up to date before + * computing an interaction matrix using vpFeatureMoment. + * + * \attention - Be careful with orders. Often, computing a feature of order n + * requires vpMoment primitives of order n+1. Make sure to check the + * documentation of the specialized vpFeatureMoment classes when deciding to + * which order you want to initialize the object. An object of order 6 should be + * sufficient for all classic implementations of vpFeatureMoment. + * + * Here is an example of how to use a vpFeatureMoment (in this case + * vpFeatureMomentBasic). + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * vpPoint p; + * std::vector vec_p; // vector that contains the vertices + * + * p.set_x(1); p.set_y(1); // coordinates in meters in the image plane (vertex 1) + * vec_p.push_back(p); + * p.set_x(2); p.set_y(2); // coordinates in meters in the image plane (vertex 2) + * vec_p.push_back(p); + * + * //////////////////////////////REFERENCE VALUES//////////////////////////////// + * // Init object of order 3 because we need vpFeatureMomentBasic of order 2 which + * // implies third-order moment primitives + * vpMomentObject obj(3); + * obj.setType(vpMomentObject::DISCRETE); // Discrete mode for object + * obj.fromVector(vec_p); + * + * vpMomentDatabase mdb; //database for moment primitives. This will + * //only contain the basic moment. + * vpMomentBasic bm; //basic moment (this particular moment is nothing + * //more than a shortcut to the vpMomentObject) + * bm.linkTo(mdb); //add basic moment to moment database + * + * vpFeatureMomentBasic fmb(mdb,0,0,1,NULL); + * + * //update and compute the vpMoment BEFORE doing any operations with vpFeatureMoment + * bm.update(obj); + * bm.compute(); + * + * fmb.update(0,0,1); //update the vpFeatureMoment with a plane + * //configuration + * std::cout << fmb.interaction(1,1) << std::endl; + * } + * \endcode + */ class VISP_EXPORT vpFeatureMoment : public vpBasicFeature { protected: @@ -188,52 +184,49 @@ class VISP_EXPORT vpFeatureMoment : public vpBasicFeature public: /*! - Initializes the feature with information about the database of moment - primitives, the object plane, feature database and matrix size. \param - data_base : Moment database. The database of moment primitives (first - parameter) is mandatory. It is used to access different moment values later - used to compute the final matrix. \param A_ : Plane coefficient in a \f$ A - \times x+B \times y + C = \frac{1}{Z} \f$ plane. \param B_ : Plane - coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = - \frac{1}{Z} \f$ plane. \param featureMoments : Feature database \param - nbmatrices : If you want to create a new vpFeatureMoment implementation, - your feature will often have a matrix size of n lines. You can specify the - number of lines by this parameter. - */ + * Initializes the feature with information about the database of moment + * primitives, the object plane, feature database and matrix size. + * \param data_base : Moment database. The database of moment primitives (first + * parameter) is mandatory. It is used to access different moment values later + * used to compute the final matrix. + * \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param featureMoments : Feature database + * \param nbmatrices : If you want to create a new vpFeatureMoment implementation, + * your feature will often have a matrix size of n lines. You can specify the + * number of lines by this parameter. + */ vpFeatureMoment(vpMomentDatabase &data_base, double A_ = 0.0, double B_ = 0.0, double C_ = 0.0, vpFeatureMomentDatabase *featureMoments = NULL, unsigned int nbmatrices = 1) : vpBasicFeature(), moment(NULL), moments(data_base), featureMomentsDataBase(featureMoments), - interaction_matrices(nbmatrices), A(A_), B(B_), C(C_), _name() - { - } - - virtual ~vpFeatureMoment(); + interaction_matrices(nbmatrices), A(A_), B(B_), C(C_), _name() + { } /** @name Inherited functionalities from vpFeatureMoment */ //@{ virtual void compute_interaction(void); - vpBasicFeature *duplicate() const; + vpBasicFeature *duplicate() const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; int getDimension(unsigned int select = FEATURE_ALL) const; - void init(void); - vpMatrix interaction(unsigned int select = FEATURE_ALL); + void init(void) override; + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; void linkTo(vpFeatureMomentDatabase &featureMoments); /*! - Name of the moment corresponding to the feature. This allows to locate - the moment associated with the feature in the provided database. - */ + * Name of the moment corresponding to the feature. This allows to locate + * the moment associated with the feature in the provided database. + */ virtual const char *momentName() const = 0; /*! - Name of the feature used to locate it in the database of features. - */ + * Name of the feature used to locate it in the database of features. + */ virtual const char *name() const = 0; - void print(unsigned int select = FEATURE_ALL) const; + void print(unsigned int select = FEATURE_ALL) const override; virtual void printDependencies(std::ostream &os) const; void update(double A, double B, double C); @@ -243,23 +236,22 @@ class VISP_EXPORT vpFeatureMoment : public vpBasicFeature }; /*! -\class vpMomentGenericFeature - -\ingroup group_visual_features - -\brief This class defines a generic feature used for moment feature -duplication. - -A vpBasicFeature can be duplicated into a vpMomentGenericFeature. In that -case, all data in the vpBasicFeature is copied but the feature's name is lost. -For example if a vpFeatureMomentCInvariant is duplicated, the duplicata will -be operational but could not be used in a vpFeatureMomentDatabase. The reason -for this is that a vpMomentGenericFeature can refer to anything therefore it -has no specific name. - -Duplication is mostly used internally in ViSP. - -*/ + * \class vpMomentGenericFeature + * + * \ingroup group_visual_features + * + * \brief This class defines a generic feature used for moment feature + * duplication. + * + * A vpBasicFeature can be duplicated into a vpMomentGenericFeature. In that + * case, all data in the vpBasicFeature is copied but the feature's name is lost. + * For example if a vpFeatureMomentCInvariant is duplicated, the duplicate will + * be operational but could not be used in a vpFeatureMomentDatabase. The reason + * for this is that a vpMomentGenericFeature can refer to anything therefore it + * has no specific name. + * + * Duplication is mostly used internally in ViSP. + */ class VISP_EXPORT vpMomentGenericFeature : public vpFeatureMoment { public: @@ -269,13 +261,15 @@ class VISP_EXPORT vpMomentGenericFeature : public vpFeatureMoment { this->moment = p_moment; } + /*! - No specific moment name. - */ + * No specific moment name. + */ const char *momentName() const { return NULL; } + /*! - No specific feature name. - */ + * No specific feature name. + */ virtual const char *name() const { return NULL; } }; diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAlpha.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAlpha.h index 1ff7eb45e3..f8214c084d 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAlpha.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAlpha.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,13 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ + /*! - \file vpFeatureMomentAlpha.h - \brief Implementation of the interaction matrix computation for - vpMomentAlpha. -*/ + * \file vpFeatureMomentAlpha.h + * \brief Implementation of the interaction matrix computation for + * vpMomentAlpha. + */ #ifndef _vpFeatureMomentAlpha_h_ #define _vpFeatureMomentAlpha_h_ @@ -49,88 +45,87 @@ class vpMomentDatabase; /*! - \class vpFeatureMomentAlpha - - \ingroup group_visual_features - - \brief Functionality computation for in-plane rotation moment feature \f$ \alpha \f$: - computes the interaction matrix associated with vpMomentAlpha. - - The interaction matrix for the feature can be deduced from \cite Tahri05z. - - This class computes the interaction matrix associated to \f$ \alpha = - \frac{1}{2} arctan(\frac{2\mu_{11}}{\mu_{20}-\mu_{02}}) \f$ moment primitive. - - The interaction matrix for the feature has the following form: - \f[{ - \left[ \begin {array}{c} {\frac {\mu_{{1,1}}{\it DA}\,A}{d}}+{\frac { - \left( {\it DA}\,\mu_{{0,2}}+1/2\,d-1/2\,{{\it DA}}^{2} \right) B}{d} -}\\ \noalign{\medskip}{\frac { \left( {\it DA}\,\mu_{{0,2}}-1/2\,d-1/2 -\,{{\it DA}}^{2} \right) A}{d}}-{\frac {B\mu_{{1,1}}{\it DA}}{d}} -\\ \noalign{\medskip}Bw_{{x}}-Aw_{{y}}\\ \noalign{\medskip}{\frac { -\beta\, \left( \mu_{{1,2}} \left( \mu_{{2,0}}-\mu_{{0,2}} \right) +\mu -_{{1,1}} \left( \mu_{{0,3}}-\mu_{{2,1}} \right) \right) +\gamma\,x_{{ -g}} \left( \mu_{{0,2}} \left( \mu_{{2,0}}-\mu_{{0,2}} \right) -2\,{\mu -_{{1,1}}}^{2} \right) +\gamma\,y_{{g}}\mu_{{1,1}} \left( \mu_{{2,0}}+ -\mu_{{0,2}} \right) }{d}}\\ \noalign{\medskip}{\frac {\beta\, \left( -\mu_{{2,1}} \left( \mu_{{0,2}}-\mu_{{2,0}} \right) +\mu_{{1,1}} - \left( \mu_{{3,0}}-\mu_{{1,2}} \right) \right) +\gamma\,x_{{g}}\mu_{ -{1,1}} \left( \mu_{{2,0}}+\mu_{{0,2}} \right) +\gamma\,y_{{g}} \left( -\mu_{{2,0}} \left( \mu_{{0,2}}-\mu_{{2,0}} \right) -2\,{\mu_{{1,1}}}^{ -2} \right) }{d}}\\ \noalign{\medskip}-1\end {array} \right] -}^t -\f] -with \f${\it DA} = \mu_{{2,0}}-\mu_{{0,2}}\f$ and \f${\it d} = -DA^2+4{\mu_{1,1}}^2\f$. - - - In the discrete case: - \f$beta = 4\f$,\f$gamma = 2\f$. - - In the dense case: - \f$beta = 5\f$,\f$gamma = 1\f$. - - - The interaction matrix computed is single-dimension (no selection possible) - and can be obtained by calling vpFeatureMomentAlpha::interaction(). - - This feature is often used in moment-based visual servoing to control the - planar rotation parameter. - - Minimum vpMomentObject order needed to compute this feature: 4. - - This feature depends on: - - vpMomentCentered - - vpMomentGravityCenter -*/ + * \class vpFeatureMomentAlpha + * + * \ingroup group_visual_features + * + * \brief Functionality computation for in-plane rotation moment feature \f$ \alpha \f$: + * computes the interaction matrix associated with vpMomentAlpha. + * + * The interaction matrix for the feature can be deduced from \cite Tahri05z. + * + * This class computes the interaction matrix associated to \f$ \alpha = + * \frac{1}{2} arctan(\frac{2\mu_{11}}{\mu_{20}-\mu_{02}}) \f$ moment primitive. + * + * The interaction matrix for the feature has the following form: + * \f[{ + * \left[ \begin {array}{c} {\frac {\mu_{{1,1}}{\it DA}\,A}{d}}+{\frac { + * \left( {\it DA}\,\mu_{{0,2}}+1/2\,d-1/2\,{{\it DA}}^{2} \right) B}{d} + * }\\ \noalign{\medskip}{\frac { \left( {\it DA}\,\mu_{{0,2}}-1/2\,d-1/2 + * \,{{\it DA}}^{2} \right) A}{d}}-{\frac {B\mu_{{1,1}}{\it DA}}{d}} + * \\ \noalign{\medskip}Bw_{{x}}-Aw_{{y}}\\ \noalign{\medskip}{\frac { + * \beta\, \left( \mu_{{1,2}} \left( \mu_{{2,0}}-\mu_{{0,2}} \right) +\mu + * _{{1,1}} \left( \mu_{{0,3}}-\mu_{{2,1}} \right) \right) +\gamma\,x_{{ + * g}} \left( \mu_{{0,2}} \left( \mu_{{2,0}}-\mu_{{0,2}} \right) -2\,{\mu + * _{{1,1}}}^{2} \right) +\gamma\,y_{{g}}\mu_{{1,1}} \left( \mu_{{2,0}}+ + * \mu_{{0,2}} \right) }{d}}\\ \noalign{\medskip}{\frac {\beta\, \left( + * \mu_{{2,1}} \left( \mu_{{0,2}}-\mu_{{2,0}} \right) +\mu_{{1,1}} + * \left( \mu_{{3,0}}-\mu_{{1,2}} \right) \right) +\gamma\,x_{{g}}\mu_{ + * {1,1}} \left( \mu_{{2,0}}+\mu_{{0,2}} \right) +\gamma\,y_{{g}} \left( + * \mu_{{2,0}} \left( \mu_{{0,2}}-\mu_{{2,0}} \right) -2\,{\mu_{{1,1}}}^{ + * 2} \right) }{d}}\\ \noalign{\medskip}-1\end {array} \right] + * }^t + * \f] + * with \f${\it DA} = \mu_{{2,0}}-\mu_{{0,2}}\f$ and \f${\it d} = + * DA^2+4{\mu_{1,1}}^2\f$. + * + * - In the discrete case: + * \f$beta = 4\f$,\f$gamma = 2\f$. + * - In the dense case: + * \f$beta = 5\f$,\f$gamma = 1\f$. + * + * The interaction matrix computed is single-dimension (no selection possible) + * and can be obtained by calling vpFeatureMomentAlpha::interaction(). + * + * This feature is often used in moment-based visual servoing to control the + * planar rotation parameter. + * + * Minimum vpMomentObject order needed to compute this feature: 4. + * + * This feature depends on: + * - vpMomentCentered + * - vpMomentGravityCenter + */ class VISP_EXPORT vpFeatureMomentAlpha : public vpFeatureMoment { public: /*! - Initializes the feature with information about the database of moment - primitives, the object plane and feature database. - \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. - It is used to access different moment values later used to compute the final matrix. - \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param featureMoments : Feature database. - - */ + * Initializes the feature with information about the database of moment + * primitives, the object plane and feature database. + * \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. + * It is used to access different moment values later used to compute the final matrix. + * \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param featureMoments : Feature database. + */ vpFeatureMomentAlpha(vpMomentDatabase &data_base, double A_, double B_, double C_, vpFeatureMomentDatabase *featureMoments = NULL) : vpFeatureMoment(data_base, A_, B_, C_, featureMoments, 1) - { - } + { } + + void compute_interaction() override; - void compute_interaction(); /*! - Associated moment name. - */ - const char *momentName() const { return "vpMomentAlpha"; } + * Associated moment name. + */ + const char *momentName() const override { return "vpMomentAlpha"; } + /*! - Feature name. - */ - const char *name() const { return "vpFeatureMomentAlpha"; } + * Feature name. + */ + const char *name() const override { return "vpFeatureMomentAlpha"; } - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; }; #endif diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentArea.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentArea.h index e45ee07240..08d16431f9 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentArea.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentArea.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,13 @@ * * Description: * Definition of vpFeatureMomentArea associated to vpMomentArea - * - * Authors: - * Manikandan Bakthavatchalam - * -*****************************************************************************/ + */ + /*! - \file vpFeatureMomentArea.h - \brief Implementation of the interaction matrix computation for - vpMomentArea. -*/ + * \file vpFeatureMomentArea.h + * \brief Implementation of the interaction matrix computation for + * vpMomentArea. + */ #ifndef _vpFeatureMomentArea_h_ #define _vpFeatureMomentArea_h_ @@ -48,43 +44,40 @@ class vpMomentDatabase; /*! - \class vpFeatureMomentArea - - \ingroup group_visual_features - - \brief Surface moment feature. Computes the interaction matrix associated - with vpMomentArea. - -*/ - + * \class vpFeatureMomentArea + * + * \ingroup group_visual_features + * + * \brief Surface moment feature. Computes the interaction matrix associated + * with vpMomentArea. + */ class VISP_EXPORT vpFeatureMomentArea : public vpFeatureMoment { public: /*! - Initializes the feature with information about the database of moment - primitives, the object plane and feature database. - \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. - It is used to access different moment values later used to compute the final matrix. - \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param featureMoments : Feature database. - - */ + * Initializes the feature with information about the database of moment + * primitives, the object plane and feature database. + * \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. + * It is used to access different moment values later used to compute the final matrix. + * \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param featureMoments : Feature database. + */ vpFeatureMomentArea(vpMomentDatabase &data_base, double A_, double B_, double C_, vpFeatureMomentDatabase *featureMoments = NULL) : vpFeatureMoment(data_base, A_, B_, C_, featureMoments, 1) - { - } + { } - void compute_interaction(); + void compute_interaction() override; /*! - associated moment name - */ - const char *momentName() const { return "vpMomentArea"; } + * associated moment name + */ + const char *momentName() const override { return "vpMomentArea"; } + /*! - feature name - */ - const char *name() const { return "vpFeatureMomentArea"; } + * feature name + */ + const char *name() const override { return "vpFeatureMomentArea"; } }; #endif diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAreaNormalized.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAreaNormalized.h index 61c074ba47..4cc159960b 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAreaNormalized.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentAreaNormalized.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,13 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ + /*! - \file vpFeatureMomentAreaNormalized.h - \brief Implementation of the interaction matrix computation for - vpMomentAreaNormalized. -*/ + * \file vpFeatureMomentAreaNormalized.h + * \brief Implementation of the interaction matrix computation for + * vpMomentAreaNormalized. + */ #ifndef _vpFeatureMomentAreaNormalized_h_ #define _vpFeatureMomentAreaNormalized_h_ @@ -49,162 +45,161 @@ class vpMomentDatabase; /*! - \class vpFeatureMomentAreaNormalized - - \ingroup group_visual_features - - \brief Functionality computation for normalized surface moment feature. - Computes the interaction matrix associated with vpMomentAreaNormalized. - - The interaction matrix for the moment feature can be deduced from \cite - Tahri05z. - - To do so, one must derive it and obtain a combination of interaction - matrices by using (1). It allows to compute the interaction matrix for \f$ - a_n \f$. - - The interaction matrix computed is single-dimension (no selection possible) - and can be obtained by calling - vpFeatureMomentGravityCenterNormalized::interaction. - - This feature is often used in moment-based visual servoing to control the - depth parameter. - - Minimum vpMomentObject order needed to compute this feature: 1 in dense mode - and 3 in discrete mode. - - This feature depends on: - - vpMomentCentered - - vpFeatureMomentCentered - - vpMomentAreaNormalized - - vpFeatureMomentBasic - + * \class vpFeatureMomentAreaNormalized + * + * \ingroup group_visual_features + * + * \brief Functionality computation for normalized surface moment feature. + * Computes the interaction matrix associated with vpMomentAreaNormalized. + * + * The interaction matrix for the moment feature can be deduced from \cite + * Tahri05z. + * + * To do so, one must derive it and obtain a combination of interaction + * matrices by using (1). It allows to compute the interaction matrix for \f$ + * a_n \f$. + * + * The interaction matrix computed is single-dimension (no selection possible) + * and can be obtained by calling + * vpFeatureMomentGravityCenterNormalized::interaction. + * + * This feature is often used in moment-based visual servoing to control the + * depth parameter. + * + * Minimum vpMomentObject order needed to compute this feature: 1 in dense mode + * and 3 in discrete mode. + * + * This feature depends on: + * - vpMomentCentered + * - vpFeatureMomentCentered + * - vpMomentAreaNormalized + * - vpFeatureMomentBasic */ class VISP_EXPORT vpFeatureMomentAreaNormalized : public vpFeatureMoment { public: /*! - Initializes the feature with information about the database of moment - primitives, the object plane and feature database. - \param database : Moment database. The database of moment primitives (first parameter) is mandatory. - It is used to access different moment values later used to compute the final matrix. - \param A : Plane coefficient in a \f$ A \times x+B \times y + C = - \frac{1}{Z} \f$ plane. - \param B : Plane coefficient in a \f$ A \times x+B - \times y + C = \frac{1}{Z} \f$ plane. - \param C : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param featureMoments : Feature database. - - */ + * Initializes the feature with information about the database of moment + * primitives, the object plane and feature database. + * \param database : Moment database. The database of moment primitives (first parameter) is mandatory. + * It is used to access different moment values later used to compute the final matrix. + * \param A : Plane coefficient in a \f$ A \times x+B \times y + C = + * \frac{1}{Z} \f$ plane. + * \param B : Plane coefficient in a \f$ A \times x+B + * \times y + C = \frac{1}{Z} \f$ plane. + * \param C : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param featureMoments : Feature database. + */ vpFeatureMomentAreaNormalized(vpMomentDatabase &database, double A_, double B_, double C_, vpFeatureMomentDatabase *featureMoments = NULL) : vpFeatureMoment(database, A_, B_, C_, featureMoments, 1) - { - } - void compute_interaction(); + { } + void compute_interaction() override; + /*! - associated moment name - */ - const char *momentName() const { return "vpMomentAreaNormalized"; } + * Associated moment name. + */ + const char *momentName() const override { return "vpMomentAreaNormalized"; } + /*! - feature name - */ - const char *name() const { return "vpFeatureMomentAreaNormalized"; } + * Feature name. + */ + const char *name() const override { return "vpFeatureMomentAreaNormalized"; } }; #else class vpMomentDatabase; /*! - \class vpFeatureMomentAreaNormalized - - \ingroup group_visual_features - - \brief Functionality computation for normalized surface moment feature. - Computes the interaction matrix associated with vpMomentAreaNormalized. - - The interaction matrix for the moment has the following form: - - In the discrete case: - \f[ - L_{a_n} = - { - \left[ - \begin {array}{c} - a_{{n}}Ae_{{2,0}}+a_{{n}}Be_{{1,1}} \\ - \noalign{\medskip}a_{{n}}Ae_{{1,1}}+a_{{n}}Be_{{0,2}} \\ - \noalign{\medskip}-a_{{n}}C+Bw_{{x}}-Aw_{{y}} \\ - \noalign{\medskip}- \left( e_{{2,0}}+2\,e_{{0,2}} \right) y_{{g}}-e_{{2,1}}-x_{{g}}e_{{1,1}}+\eta_{{1,1}}e_{{1,0}}-e_{{0,3}}+\eta_{{0,2}}e_{{0,1}} \\ - \noalign{\medskip} \left( 2\,e_{{2,0}}+e_{{0,2}} \right) x_{{g}}+e_{{3,0}}+y_{{g}}e_{{1,1}}-\eta_{{2,0}}e_{{1,0}}+e_{{1,2}}-\eta_{{1,1}}e_{{0,1}} \\ - \noalign{\medskip}0 - \end {array} - \right] - }^t - \f] - - In the dense case: - \f[ - L_{a_n} = - { - \left[ - \begin {array}{c} - 1/2\,a_{{n}}A \\ - \noalign{\medskip}1/2\,a_{{n}}B \\ - \noalign{\medskip}-a_{{n}}C-3/2\,Ax_{{n}}-3/2\,By_{{n}} \\ - \noalign{\medskip}-3/2\,y_{{n}} \\ - \noalign{\medskip}3/2\,x_{{n}} \\ - \noalign{\medskip}0 - \end {array} - \right] - }^t - \f] - with: - - \f$e_{i,j}=\frac{\mu_{i,j}}{NA}\f$ - - \f$NA=\mu_{2,0}+\mu_{0,2}\f$ - - \f$\eta\f$ is the centered and normalized moment. - To do so, one must derive it and obtain a combination of interaction - matrices by using (1). It allows to compute the interaction matrix for \f$ a_n \f$. - - The interaction matrix computed is single-dimension (no selection possible) - and can be obtained by calling vpFeatureMomentGravityCenterNormalized::interaction. - - This feature is often used in moment-based visual servoing to control the depth parameter. - - Minimum vpMomentObject order needed to compute this feature: 1 in dense mode - and 3 in discrete mode. - - This feature depends on: - - vpMomentCentered - - vpMomentAreaNormalized - - vpMomentGravityCenter - -*/ + * \class vpFeatureMomentAreaNormalized + * + * \ingroup group_visual_features + * + * \brief Functionality computation for normalized surface moment feature. + * Computes the interaction matrix associated with vpMomentAreaNormalized. + * + * The interaction matrix for the moment has the following form: + * - In the discrete case: + * \f[ + * L_{a_n} = + * { + * \left[ + * \begin {array}{c} + * a_{{n}}Ae_{{2,0}}+a_{{n}}Be_{{1,1}} \\ + * \noalign{\medskip}a_{{n}}Ae_{{1,1}}+a_{{n}}Be_{{0,2}} \\ + * \noalign{\medskip}-a_{{n}}C+Bw_{{x}}-Aw_{{y}} \\ + * \noalign{\medskip}- \left( e_{{2,0}}+2\,e_{{0,2}} \right) y_{{g}}-e_{{2,1}}-x_{{g}}e_{{1,1}}+\eta_{{1,1}}e_{{1,0}}-e_{{0,3}}+\eta_{{0,2}}e_{{0,1}} \\ + * \noalign{\medskip} \left( 2\,e_{{2,0}}+e_{{0,2}} \right) x_{{g}}+e_{{3,0}}+y_{{g}}e_{{1,1}}-\eta_{{2,0}}e_{{1,0}}+e_{{1,2}}-\eta_{{1,1}}e_{{0,1}} \\ + * \noalign{\medskip}0 + * \end {array} + * \right] + * }^t + * \f] + * + * - In the dense case: + * \f[ + * L_{a_n} = + * { + * \left[ + * \begin {array}{c} + * 1/2\,a_{{n}}A \\ + * \noalign{\medskip}1/2\,a_{{n}}B \\ + * \noalign{\medskip}-a_{{n}}C-3/2\,Ax_{{n}}-3/2\,By_{{n}} \\ + * \noalign{\medskip}-3/2\,y_{{n}} \\ + * \noalign{\medskip}3/2\,x_{{n}} \\ + * \noalign{\medskip}0 + * \end {array} + * \right] + * }^t + * \f] + * with: + * - \f$e_{i,j}=\frac{\mu_{i,j}}{NA}\f$ + * - \f$NA=\mu_{2,0}+\mu_{0,2}\f$ + * - \f$\eta\f$ is the centered and normalized moment. + * To do so, one must derive it and obtain a combination of interaction + * matrices by using (1). It allows to compute the interaction matrix for \f$ a_n \f$. + * + * The interaction matrix computed is single-dimension (no selection possible) + * and can be obtained by calling vpFeatureMomentGravityCenterNormalized::interaction. + * + * This feature is often used in moment-based visual servoing to control the depth parameter. + * + * Minimum vpMomentObject order needed to compute this feature: 1 in dense mode + * and 3 in discrete mode. + * + * This feature depends on: + * - vpMomentCentered + * - vpMomentAreaNormalized + * - vpMomentGravityCenter + */ class VISP_EXPORT vpFeatureMomentAreaNormalized : public vpFeatureMoment { public: /*! - Initializes the feature with information about the database of moment - primitives, the object plane and feature database. - \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. - It is used to access different moment values later used to compute the final matrix. - \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param featureMoments : Feature database. - - */ + * Initializes the feature with information about the database of moment + * primitives, the object plane and feature database. + * \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. + * It is used to access different moment values later used to compute the final matrix. + * \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param featureMoments : Feature database. + */ vpFeatureMomentAreaNormalized(vpMomentDatabase &data_base, double A_, double B_, double C_, vpFeatureMomentDatabase *featureMoments = NULL) : vpFeatureMoment(data_base, A_, B_, C_, featureMoments, 1) - { - } - void compute_interaction(); + { } + void compute_interaction() override; + /*! - associated moment name - */ - const char *momentName() const { return "vpMomentAreaNormalized"; } + * Associated moment name + */ + const char *momentName() const override { return "vpMomentAreaNormalized"; } + /*! - feature name - */ - const char *name() const { return "vpFeatureMomentAreaNormalized"; } + * Feature name + */ + const char *name() const override { return "vpFeatureMomentAreaNormalized"; } }; #endif #endif diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentBasic.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentBasic.h index 4708d1cea9..9a8855bb27 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentBasic.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentBasic.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,13 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ + /*! - \file vpFeatureMomentBasic.h - \brief Implementation of the interaction matrix computation for - vpMomentBasic. -*/ + * \file vpFeatureMomentBasic.h + * \brief Implementation of the interaction matrix computation for + * vpMomentBasic. + */ #ifndef _vpFeatureMomentBasic_h_ #define _vpFeatureMomentBasic_h_ @@ -50,33 +46,33 @@ class vpMomentDatabase; /*! - \class vpFeatureMomentBasic - - \ingroup group_visual_features - - \brief Functionality computation for basic moment feature. Computes the - interaction matrix associated with vpMomentBasic. - - The interaction matrix for the basic moment feature is defined in - \cite Tahri05z, equation (13). This vpFeatureMoment, as well as it's - corresponding moment primitive is double-indexed. The interaction matrix \f$ - L_{m_{ij}} \f$ is obtained by calling vpFeatureMomentBasic::interaction - (i,j) and is associated to \f$ m_{ij} \f$ obtained by vpMomentBasic::get - (i,j). vpFeatureMomentBasic computes interaction matrices all interaction - matrices up to vpMomentObject::getOrder()-1. \attention The maximum order - reached by vpFeatureMomentBasic is NOT the maximum order of the - vpMomentObject, it is one unit smaller. For example if you define your - vpMomentObject up to order n then vpFeatureMomentBasic will be able to - compute interaction matrices up to order n-1 that is \f$ L_{m_{ij}} \f$ with - \f$ i+j<=n-1 \f$. - - You can see an example of vpFeatureMomentBasic by looking at the - documentation of the vpFeatureMoment class. - - This feature depends on: - - vpMomentBasic - -*/ + * \class vpFeatureMomentBasic + * + * \ingroup group_visual_features + * + * \brief Functionality computation for basic moment feature. Computes the + * interaction matrix associated with vpMomentBasic. + * + * The interaction matrix for the basic moment feature is defined in + * \cite Tahri05z, equation (13). This vpFeatureMoment, as well as it's + * corresponding moment primitive is double-indexed. The interaction matrix \f$ + * L_{m_{ij}} \f$ is obtained by calling vpFeatureMomentBasic::interaction + * (i,j) and is associated to \f$ m_{ij} \f$ obtained by vpMomentBasic::get + * (i,j). vpFeatureMomentBasic computes interaction matrices all interaction + * matrices up to vpMomentObject::getOrder()-1. + * + * \attention The maximum order reached by vpFeatureMomentBasic is NOT the maximum order of the + * vpMomentObject, it is one unit smaller. For example if you define your + * vpMomentObject up to order n then vpFeatureMomentBasic will be able to + * compute interaction matrices up to order n-1 that is \f$ L_{m_{ij}} \f$ with + * \f$ i+j<=n-1 \f$. + * + * You can see an example of vpFeatureMomentBasic by looking at the + * documentation of the vpFeatureMoment class. + * + * This feature depends on: + * - vpMomentBasic + */ class VISP_EXPORT vpFeatureMomentBasic : public vpFeatureMoment { protected: @@ -85,24 +81,26 @@ class VISP_EXPORT vpFeatureMomentBasic : public vpFeatureMoment public: vpFeatureMomentBasic(vpMomentDatabase &moments, double A, double B, double C, vpFeatureMomentDatabase *featureMoments = NULL); - void compute_interaction(); + void compute_interaction() override; #ifndef DOXYGEN_SHOULD_SKIP_THIS /* Add function due to pure virtual definition in vpBasicFeature.h */ - vpMatrix interaction(unsigned int /* select = FEATURE_ALL */) + vpMatrix interaction(unsigned int /* select = FEATURE_ALL */) override { throw vpException(vpException::functionNotImplementedError, "Not implemented!"); } #endif vpMatrix interaction(unsigned int select_one, unsigned int select_two) const; + /*! - Associated moment name. - */ - const char *momentName() const { return "vpMomentBasic"; } + * Associated moment name. + */ + const char *momentName() const override { return "vpMomentBasic"; } + /*! - Feature name. - */ - const char *name() const { return "vpFeatureMomentBasic"; } + * Feature name. + */ + const char *name() const override { return "vpFeatureMomentBasic"; } }; #endif diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCInvariant.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCInvariant.h index 9f04fa0a68..466fd87c58 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCInvariant.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCInvariant.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,12 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ + /*! - \file vpFeatureMomentCInvariant.h - \brief Implementation of the interaction matrix computation for - vpMomentCInvariant. -*/ + * \file vpFeatureMomentCInvariant.h + * \brief Implementation of the interaction matrix computation for vpMomentCInvariant. + */ #ifndef _vpFeatureMomentCInvariant_h_ #define _vpFeatureMomentCInvariant_h_ @@ -47,135 +42,134 @@ #ifdef VISP_MOMENTS_COMBINE_MATRICES /*! - \class vpFeatureMomentCInvariant - - \ingroup group_visual_features - - \brief Functionality computation for 2D rotation/translation/scale - non-symmetric invariant moment feature. Computes the interaction matrix - associated with vpMomentCInvariant. - - The interaction matrix for the moment feature can be deduced from \cite - Tahri05z, equations (9). To do so, one must derive them and obtain a - combination of interaction matrices by using (1). It allows to compute the - interaction matrix for \f$ c_i, i \in [1..10] \f$. - - These interaction matrices may be selected afterwards by calling - vpFeatureMomentCInvariant::interaction(). The selection by the - vpFeatureMomentCInvariant::selectCi method for \f$ L_{c_i} \f$. For example, - to select \f$ L_{c_1} \f$ you should input - vpFeatureMomentCInvariant::selectC1() into ViSP's selector. Special matrices - for features \f$ S_x \f$ and \f$ S_y \f$ are selected by - vpFeatureMomentCInvariant::selectSx() and - vpFeatureMomentCInvariant::selectSy() respectively. Special matrices for - features \f$ P_x \f$ and \f$ P_y \f$ are selected by - vpFeatureMomentCInvariant::selectPx() and - vpFeatureMomentCInvariant::selectPy() respectively. - - These features are often used in moment-based visual servoing to control the - two out-of-plane rotations. - - Be careful about the nature of your object when selecting the right - features. Use \f$ L_{S_{x}} \f$ and \f$ L_{S_{y}} \f$ when you're dealing - with a symmetric object all other features otherwise. - - Minimum vpMomentObject order needed to compute this feature: 6. This is the - highest ordrer required by classic features. - - This feature depends on: - - vpMomentCentered - - vpFeatureMomentCentered - - vpMomentCInvariant - - vpFeatureMomentBasic - - An example of how to use vpFeatureMomentCInvariant in a complete visual - servoing example is given in vpFeatureMomentCommon. - -*/ + * \class vpFeatureMomentCInvariant + * + * \ingroup group_visual_features + * + * \brief Functionality computation for 2D rotation/translation/scale + * non-symmetric invariant moment feature. Computes the interaction matrix + * associated with vpMomentCInvariant. + * + * The interaction matrix for the moment feature can be deduced from \cite + * Tahri05z, equations (9). To do so, one must derive them and obtain a + * combination of interaction matrices by using (1). It allows to compute the + * interaction matrix for \f$ c_i, i \in [1..10] \f$. + * + * These interaction matrices may be selected afterwards by calling + * vpFeatureMomentCInvariant::interaction(). The selection by the + * vpFeatureMomentCInvariant::selectCi method for \f$ L_{c_i} \f$. For example, + * to select \f$ L_{c_1} \f$ you should input + * vpFeatureMomentCInvariant::selectC1() into ViSP's selector. Special matrices + * for features \f$ S_x \f$ and \f$ S_y \f$ are selected by + * vpFeatureMomentCInvariant::selectSx() and + * vpFeatureMomentCInvariant::selectSy() respectively. Special matrices for + * features \f$ P_x \f$ and \f$ P_y \f$ are selected by + * vpFeatureMomentCInvariant::selectPx() and + * vpFeatureMomentCInvariant::selectPy() respectively. + * + * These features are often used in moment-based visual servoing to control the + * two out-of-plane rotations. + * + * Be careful about the nature of your object when selecting the right + * features. Use \f$ L_{S_{x}} \f$ and \f$ L_{S_{y}} \f$ when you're dealing + * with a symmetric object all other features otherwise. + * + * Minimum vpMomentObject order needed to compute this feature: 6. This is the + * highest order required by classic features. + * + * This feature depends on: + * - vpMomentCentered + * - vpFeatureMomentCentered + * - vpMomentCInvariant + * - vpFeatureMomentBasic + * + * An example of how to use vpFeatureMomentCInvariant in a complete visual + * servoing example is given in vpFeatureMomentCommon. + */ class VISP_EXPORT vpFeatureMomentCInvariant : public vpFeatureMoment { public: /*! - Initializes the feature with information about the database of moment - primitives, the object plane and feature database. - \param moments : Moment database. The database of moment primitives (first parameter) is mandatory. - It is used to access different moment values later used to compute the final matrix. - \param A : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param B : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param C : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param featureMoments : Feature database. - - */ + * Initializes the feature with information about the database of moment + * primitives, the object plane and feature database. + * \param moments : Moment database. The database of moment primitives (first parameter) is mandatory. + * It is used to access different moment values later used to compute the final matrix. + * \param A : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param B : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param C : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param featureMoments : Feature database. + */ vpFeatureMomentCInvariant(vpMomentDatabase &moments, double A, double B, double C, vpFeatureMomentDatabase *featureMoments = NULL) : vpFeatureMoment(moments, A, B, C, featureMoments, 16) - { - } - void compute_interaction(); + { } + void compute_interaction() override; + /*! - associated moment name - */ - const char *momentName() const { return "vpMomentCInvariant"; } + * Associated moment name. + */ + const char *momentName() const override { return "vpMomentCInvariant"; } + /*! - feature name - */ - const char *name() const { return "vpFeatureMomentCInvariant"; } + * Feature name. + */ + const char *name() const override { return "vpFeatureMomentCInvariant"; } /*! - Shortcut selector for \f$C_1\f$. - */ + * Shortcut selector for \f$C_1\f$. + */ static unsigned int selectC1() { return 1 << 0; } /*! - Shortcut selector for \f$C_2\f$. - */ + * Shortcut selector for \f$C_2\f$. + */ static unsigned int selectC2() { return 1 << 1; } /*! - Shortcut selector for \f$C_3\f$. - */ + * Shortcut selector for \f$C_3\f$. + */ static unsigned int selectC3() { return 1 << 2; } /*! - Shortcut selector for \f$C_4\f$. - */ + * Shortcut selector for \f$C_4\f$. + */ static unsigned int selectC4() { return 1 << 3; } /*! - Shortcut selector for \f$C_5\f$. - */ + * Shortcut selector for \f$C_5\f$. + */ static unsigned int selectC5() { return 1 << 4; } /*! - Shortcut selector for \f$C_6\f$. - */ + * Shortcut selector for \f$C_6\f$. + */ static unsigned int selectC6() { return 1 << 5; } /*! - Shortcut selector for \f$C_7\f$. - */ + * Shortcut selector for \f$C_7\f$. + */ static unsigned int selectC7() { return 1 << 6; } /*! - Shortcut selector for \f$C_8\f$. - */ + * Shortcut selector for \f$C_8\f$. + */ static unsigned int selectC8() { return 1 << 7; } /*! - Shortcut selector for \f$C_9\f$. - */ + * Shortcut selector for \f$C_9\f$. + */ static unsigned int selectC9() { return 1 << 8; } /*! - Shortcut selector for \f$C_{10}\f$. - */ + * Shortcut selector for \f$C_{10}\f$. + */ static unsigned int selectC10() { return 1 << 9; } /*! - Shortcut selector for \f$S_x\f$. - */ + * Shortcut selector for \f$S_x\f$. + */ static unsigned int selectSx() { return 1 << 10; } /*! - Shortcut selector for \f$S_y\f$. - */ + * Shortcut selector for \f$S_y\f$. + */ static unsigned int selectSy() { return 1 << 11; } /*! - Shortcut selector for \f$P_x\f$. - */ + * Shortcut selector for \f$P_x\f$. + */ static unsigned int selectPx() { return 1 << 12; } /*! - Shortcut selector for \f$P_y\f$. - */ + * Shortcut selector for \f$P_y\f$. + */ static unsigned int selectPy() { return 1 << 13; } }; @@ -183,51 +177,50 @@ class VISP_EXPORT vpFeatureMomentCInvariant : public vpFeatureMoment class vpMomentDatabase; /*! - \class vpFeatureMomentCInvariant - - \ingroup group_visual_features - - \brief Functionality computation for 2D rotation/translation/scale - non-symmetric invariant moment feature. Computes the interaction matrix - associated with vpMomentCInvariant. - - The interaction matrix for the moment feature can be deduced from - \cite Tahri05z, equations (9). To do so, one must derive them and obtain a - combination of interaction matrices by using (1). It allows to compute the - interaction matrix for \f$ c_i, i \in [1..10] \f$. - - These interaction matrices may be selected afterwards by calling - vpFeatureMomentCInvariant::interaction(). The selection by the - vpFeatureMomentCInvariant::selectCi method for \f$ L_{c_i} \f$. For example, - to select \f$ L_{c_1} \f$ you should input - vpFeatureMomentCInvariant::selectC1() into ViSP's selector. Special matrices - for features \f$ S_x \f$ and \f$ S_y \f$ are selected by - vpFeatureMomentCInvariant::selectSx() and - vpFeatureMomentCInvariant::selectSy() respectively. Special matrices for - features \f$ P_x \f$ and \f$ P_y \f$ are selected by - vpFeatureMomentCInvariant::selectPx() and - vpFeatureMomentCInvariant::selectPy() respectively. - - These features are often used in moment-based visual servoing to control the - two out-of-plane rotations. - - Be careful about the nature of your object when selecting the right - features. Use \f$ L_{S_{x}} \f$ and \f$ L_{S_{y}} \f$ when you're dealing - with a symmetric object all other features otherwise. - - Minimum vpMomentObject order needed to compute this feature: 6. This is the - highest ordrer required by classic features. - - This feature depends on: - - vpMomentCentered - - vpFeatureMomentCentered - - vpMomentCInvariant - - vpFeatureMomentBasic - - An example of how to use vpFeatureMomentCInvariant in a complete visual - servoing example is given in vpFeatureMomentCommon. - -*/ + * \class vpFeatureMomentCInvariant + * + * \ingroup group_visual_features + * + * \brief Functionality computation for 2D rotation/translation/scale + * non-symmetric invariant moment feature. Computes the interaction matrix + * associated with vpMomentCInvariant. + * + * The interaction matrix for the moment feature can be deduced from + * \cite Tahri05z, equations (9). To do so, one must derive them and obtain a + * combination of interaction matrices by using (1). It allows to compute the + * interaction matrix for \f$ c_i, i \in [1..10] \f$. + * + * These interaction matrices may be selected afterwards by calling + * vpFeatureMomentCInvariant::interaction(). The selection by the + * vpFeatureMomentCInvariant::selectCi method for \f$ L_{c_i} \f$. For example, + * to select \f$ L_{c_1} \f$ you should input + * vpFeatureMomentCInvariant::selectC1() into ViSP's selector. Special matrices + * for features \f$ S_x \f$ and \f$ S_y \f$ are selected by + * vpFeatureMomentCInvariant::selectSx() and + * vpFeatureMomentCInvariant::selectSy() respectively. Special matrices for + * features \f$ P_x \f$ and \f$ P_y \f$ are selected by + * vpFeatureMomentCInvariant::selectPx() and + * vpFeatureMomentCInvariant::selectPy() respectively. + * + * These features are often used in moment-based visual servoing to control the + * two out-of-plane rotations. + * + * Be careful about the nature of your object when selecting the right + * features. Use \f$ L_{S_{x}} \f$ and \f$ L_{S_{y}} \f$ when you're dealing + * with a symmetric object all other features otherwise. + * + * Minimum vpMomentObject order needed to compute this feature: 6. This is the + * highest order required by classic features. + * + * This feature depends on: + * - vpMomentCentered + * - vpFeatureMomentCentered + * - vpMomentCInvariant + * - vpFeatureMomentBasic + * + * An example of how to use vpFeatureMomentCInvariant in a complete visual + * servoing example is given in vpFeatureMomentCommon. + */ class VISP_EXPORT vpFeatureMomentCInvariant : public vpFeatureMoment { private: @@ -235,91 +228,89 @@ class VISP_EXPORT vpFeatureMomentCInvariant : public vpFeatureMoment public: /*! - Initializes the feature with information about the database of moment - primitives, the object plane and feature database. - \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. - It is used to access different moment values later used to compute the final matrix. - \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param featureMoments : Feature database. - - */ + * Initializes the feature with information about the database of moment + * primitives, the object plane and feature database. + * \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. + * It is used to access different moment values later used to compute the final matrix. + * \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param featureMoments : Feature database. + */ vpFeatureMomentCInvariant(vpMomentDatabase &data_base, double A_, double B_, double C_, vpFeatureMomentDatabase *featureMoments = NULL) : vpFeatureMoment(data_base, A_, B_, C_, featureMoments, 16), LI(16) - { - } + { } - void compute_interaction(); + void compute_interaction() override; /*! - associated moment name - */ - const char *momentName() const { return "vpMomentCInvariant"; } + * Associated moment name. + */ + const char *momentName() const override { return "vpMomentCInvariant"; } /*! - feature name - */ - const char *name() const { return "vpFeatureMomentCInvariant"; } + * Feature name. + */ + const char *name() const override { return "vpFeatureMomentCInvariant"; } /*! - Shortcut selector for \f$C_1\f$. - */ + * Shortcut selector for \f$C_1\f$. + */ static unsigned int selectC1() { return 1 << 0; } /*! - Shortcut selector for \f$C_2\f$. - */ + * Shortcut selector for \f$C_2\f$. + */ static unsigned int selectC2() { return 1 << 1; } /*! - Shortcut selector for \f$C_3\f$. - */ + * Shortcut selector for \f$C_3\f$. + */ static unsigned int selectC3() { return 1 << 2; } /*! - Shortcut selector for \f$C_4\f$. - */ + * Shortcut selector for \f$C_4\f$. + */ static unsigned int selectC4() { return 1 << 3; } /*! - Shortcut selector for \f$C_5\f$. - */ + * Shortcut selector for \f$C_5\f$. + */ static unsigned int selectC5() { return 1 << 4; } /*! - Shortcut selector for \f$C_6\f$. - */ + * Shortcut selector for \f$C_6\f$. + */ static unsigned int selectC6() { return 1 << 5; } /*! - Shortcut selector for \f$C_7\f$. - */ + * Shortcut selector for \f$C_7\f$. + */ static unsigned int selectC7() { return 1 << 6; } /*! - Shortcut selector for \f$C_8\f$. - */ + * Shortcut selector for \f$C_8\f$. + */ static unsigned int selectC8() { return 1 << 7; } /*! - Shortcut selector for \f$C_9\f$. - */ + * Shortcut selector for \f$C_9\f$. + */ static unsigned int selectC9() { return 1 << 8; } /*! - Shortcut selector for \f$C_{10}\f$. - */ + * Shortcut selector for \f$C_{10}\f$. + */ static unsigned int selectC10() { return 1 << 9; } /*! - Shortcut selector for \f$S_x\f$. - */ + * Shortcut selector for \f$S_x\f$. + */ static unsigned int selectSx() { return 1 << 10; } /*! - Shortcut selector for \f$S_y\f$. - */ + * Shortcut selector for \f$S_y\f$. + */ static unsigned int selectSy() { return 1 << 11; } /*! - Shortcut selector for \f$P_x\f$. - */ + * Shortcut selector for \f$P_x\f$. + */ static unsigned int selectPx() { return 1 << 12; } /*! - Shortcut selector for \f$P_y\f$. - */ + * Shortcut selector for \f$P_y\f$. + */ static unsigned int selectPy() { return 1 << 13; } /*! - Print all the interaction matrices of the moment invariants + * Print all the interaction matrices of the moment invariants */ void printLsofInvariants(std::ostream &os) const; diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCentered.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCentered.h index 3e19e4324a..2733ea9d0b 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCentered.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCentered.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,12 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * Manikandan Bakthavatchalam - *****************************************************************************/ + */ + /*! - \file vpFeatureMomentCentered.h - \brief Implementation of the interaction matrix computation for - vpMomentCentered. -*/ + * \file vpFeatureMomentCentered.h + * \brief Implementation of the interaction matrix computation for vpMomentCentered. + */ #ifndef _vpFeatureMomentCentered_h_ #define _vpFeatureMomentCentered_h_ @@ -48,41 +43,41 @@ class vpMomentDatabase; /*! - \class vpFeatureMomentCentered - - \ingroup group_visual_features - - \brief Functionality computation for centered moment feature. Computes the - interaction matrix associated with vpMomentCentered. - - The interaction matrix for the feature is defined in \cite Tahri05z, - equation (17). This vpFeatureMoment, as well as it's corresponding moment - primitive is double-indexed. The interaction matrix \f$ L_{\mu_{ij}} \f$ is - obtained by calling vpFeatureMomentBasic::interaction (i,j) and is - associated to \f$ \mu_{ij} \f$ obtained by vpFeatureMomentCentered::get - (i,j). - - vpFeatureMomentCentered computes interaction matrices all interaction - matrices up to vpMomentObject::getOrder()-1. \attention The maximum order - reached by vpFeatureMomentBasic is NOT the maximum order of the - vpMomentObject, it is one unit smaller. For example if you define your - vpMomentObject up to order n then vpFeatureMomentBasic will be able to - compute interaction matrices up to order n-1 that is \f$ L_{m_{ij}} \f$ with - \f$ i+j<=n-1 \f$. - - This feature depends on: - - vpFeatureMomentBasic - - vpFeatureMomentGravityCenter - - vpMomentGravityCenter -*/ + * \class vpFeatureMomentCentered + * + * \ingroup group_visual_features + * + * \brief Functionality computation for centered moment feature. Computes the + * interaction matrix associated with vpMomentCentered. + * + * The interaction matrix for the feature is defined in \cite Tahri05z, + * equation (17). This vpFeatureMoment, as well as it's corresponding moment + * primitive is double-indexed. The interaction matrix \f$ L_{\mu_{ij}} \f$ is + * obtained by calling vpFeatureMomentBasic::interaction (i,j) and is + * associated to \f$ \mu_{ij} \f$ obtained by vpFeatureMomentCentered::get + * (i,j). + * + * vpFeatureMomentCentered computes interaction matrices all interaction + * matrices up to vpMomentObject::getOrder()-1. \attention The maximum order + * reached by vpFeatureMomentBasic is NOT the maximum order of the + * vpMomentObject, it is one unit smaller. For example if you define your + * vpMomentObject up to order n then vpFeatureMomentBasic will be able to + * compute interaction matrices up to order n-1 that is \f$ L_{m_{ij}} \f$ with + * \f$ i+j<=n-1 \f$. + * + * This feature depends on: + * - vpFeatureMomentBasic + * - vpFeatureMomentGravityCenter + * - vpMomentGravityCenter + */ class VISP_EXPORT vpFeatureMomentCentered : public vpFeatureMoment { protected: unsigned int order; /*! - Core computation of interaction matrix for moment m_pq - */ + * Core computation of interaction matrix for moment m_pq. + */ vpMatrix compute_Lmu_pq(const unsigned int &p, const unsigned int &q, const double &xg, const double &yg, const vpMatrix &L_xg, const vpMatrix &L_yg, const vpMomentBasic &m, const vpFeatureMomentBasic &feature_moment_m) const; @@ -90,31 +85,32 @@ class VISP_EXPORT vpFeatureMomentCentered : public vpFeatureMoment public: vpFeatureMomentCentered(vpMomentDatabase &moments, double A, double B, double C, vpFeatureMomentDatabase *featureMoments = NULL); - void compute_interaction(); + void compute_interaction() override; #ifndef DOXYGEN_SHOULD_SKIP_THIS /* Add function due to pure virtual definition in vpBasicFeature.h */ - vpMatrix interaction(unsigned int /* select = FEATURE_ALL */) + vpMatrix interaction(unsigned int /* select = FEATURE_ALL */) override { throw vpException(vpException::functionNotImplementedError, "Not implemented!"); } #endif /*! - Interaction matrix corresponding to \f$ \mu_{ij} \f$ moment - \param select_one : first index (i) - \param select_two : second index (j) - \return Interaction matrix corresponding to the moment - */ + * Interaction matrix corresponding to \f$ \mu_{ij} \f$ moment + * \param select_one : first index (i) + * \param select_two : second index (j) + * \return Interaction matrix corresponding to the moment + */ vpMatrix interaction(unsigned int select_one, unsigned int select_two) const; /*! - associated moment name - */ - const char *momentName() const { return "vpMomentCentered"; } + * Associated moment name + */ + const char *momentName() const override { return "vpMomentCentered"; } + /*! - feature name - */ - const char *name() const { return "vpFeatureMomentCentered"; } + * Feature name + */ + const char *name() const override { return "vpFeatureMomentCentered"; } friend VISP_EXPORT std::ostream &operator<<(std::ostream &os, const vpFeatureMomentCentered &v); }; diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCommon.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCommon.h index b5047577fc..39a26f163f 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCommon.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCommon.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -31,17 +30,13 @@ * Description: * Pre-filled pseudo-database used to handle dependencies between common *moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ /*! - \file vpFeatureMomentCommon.h - \brief Pre-filled pseudo-database used to handle dependencies between common - moment features. -*/ + * \file vpFeatureMomentCommon.h + * \brief Pre-filled pseudo-database used to handle dependencies between common + * moment features. + */ #ifndef _vpFeatureMomentCommon_h_ #define _vpFeatureMomentCommon_h_ @@ -59,168 +54,165 @@ class vpMomentDatabase; class vpServo; /*! - \class vpFeatureMomentCommon - - \ingroup group_visual_features - - \brief This class allows to access common vpFeatureMoments in a pre-filled -database. - - It is a vpMomentDatabase filled with the following moments: - - vpFeatureMomentGravityCenter - - vpFeatureMomentGravityCenterNormalized - - vpFeatureMomentAreaNormalized - - vpFeatureMomentCInvariant - - vpFeatureMomentAlpha - - vpFeatureMomentCentered - - vpFeatureMomentBasic - - - There is no need to do the linkTo operations manually nor is it necessary to -care about the order of feature computation. - - This class has an vpMomentCommon::updateAll method capable of updating the -plane parameters AND computing interaction matrices inside the features. - - The moment features computed by this class are classical moments - features used in moment-based visual servoing. For more - information see \cite Tahri05z. - - To initialize this feature set, the user needs to supply a vpMomentDatabase -containing at least the contents of vpMomentCommon. - - The features can be retrieved like from a normal vpFeatureMomentDatabase. -However, some shortcuts to retrieve the features are provided. - - \attention Make sure your object is at least of order 6 when using this -pre-filled database. - - The following code demonstrates the construction of a 6x6 interaction matrix -as described in [1]. -\code -#include -#include -#include -#include -#include -#include -#include -#include - -int main() -{ - // Define source polygon - vpPoint p; - std::vector vec_p; // vector that contains the vertices of the contour polygon - - p.set_x(-0.2); p.set_y(0.1); // coordinates in meters in the image plane (vertex 1) - vec_p.push_back(p); - p.set_x(+0.3); p.set_y(0.1); // coordinates in meters in the image plane (vertex 2) - vec_p.push_back(p); - p.set_x(+0.2); p.set_y(-0.1); // coordinates in meters in the image plane (vertex 3) - vec_p.push_back(p); - p.set_x(-0.2); p.set_y(-0.15); // coordinates in meters in the image plane (vertex 4) - vec_p.push_back(p); - p.set_x(-0.2); p.set_y(0.1); // close the contour (vertex 5 = vertex 1) - vec_p.push_back(p); - - - vpMomentObject src(6); // Create a source moment object with 6 as maximum order - src.setType(vpMomentObject::DENSE_POLYGON); // The object is defined by a countour polygon - src.fromVector(vec_p); // Init the dense object with the source polygon - vec_p.clear(); - - //Define destination polygon. This is the source polygon translated - //of 0.1 on x-axis - p.set_x(-0.1); p.set_y(0.1); // coordinates in meters in the image plane (vertex 1) - vec_p.push_back(p); - p.set_x(+0.4); p.set_y(0.1); // coordinates in meters in the image plane (vertex 2) - vec_p.push_back(p); - p.set_x(+0.3); p.set_y(-0.1); // coordinates in meters in the image plane (vertex 3) - vec_p.push_back(p); - p.set_x(-0.1); p.set_y(-0.15); // coordinates in meters in the image plane (vertex 4) - vec_p.push_back(p); - p.set_x(-0.1); p.set_y(0.1); // close the contour (vertex 5 = vertex 1) - vec_p.push_back(p); - - vpMomentObject dst(6); // Create a destination moment object with 6 as maximum order - dst.setType(vpMomentObject::DENSE_POLYGON); // The object is defined by a countour polygon - dst.fromVector(vec_p); // Init the dense object with the destination - // polygon - - - //init classic moment primitives (for source) - vpMomentCommon mdb_src(vpMomentCommon::getSurface(dst),vpMomentCommon::getMu3(dst),vpMomentCommon::getAlpha(dst),1.); - //Init classic features - vpFeatureMomentCommon fmdb_src(mdb_src); - - ////init classic moment primitives (for destination) - vpMomentCommon mdb_dst(vpMomentCommon::getSurface(dst),vpMomentCommon::getMu3(dst),vpMomentCommon::getAlpha(dst),1.); - //Init classic features - vpFeatureMomentCommon fmdb_dst(mdb_dst); - - //update+compute moment primitives from object (for source) - mdb_src.updateAll(src); - //update+compute features (+interaction matrices) from plane - fmdb_src.updateAll(0.,0.,1.); - - //update+compute moment primitives from object (for destination) - mdb_dst.updateAll(dst); - //update+compute features (+interaction matrices) from plane - fmdb_dst.updateAll(0.,0.,1.); - - //define visual servoing task - vpServo task; - task.setServo(vpServo::EYEINHAND_CAMERA); - task.setInteractionMatrixType(vpServo::CURRENT); - - //Add all classic features to the task - //In this example, source and destination features are translated by 0.1 - //will produce a movement of 0.1 on x-axis. - task.addFeature(fmdb_src.getFeatureGravityNormalized(),fmdb_dst.getFeatureGravityNormalized()); - task.addFeature(fmdb_src.getFeatureAn(),fmdb_dst.getFeatureAn()); - //the object is NOT symmetric - //select C4 and C6 - task.addFeature(fmdb_src.getFeatureCInvariant(),fmdb_dst.getFeatureCInvariant(),(1 << 3) | (1 << 5)); - task.addFeature(fmdb_src.getFeatureAlpha(),fmdb_dst.getFeatureAlpha()); - - task.setLambda(1) ; - vpColVector v = task.computeControlLaw() ; - - task.print(); - - return 0; -} - \endcode -This code produces the following output: -\code -Visual servoing task: -Type of control law -Eye-in-hand configuration -Control in the camera frame -List of visual features : s -0.0166667,-0.00833333, -1, --0.312148,0.0249916, --1.43449, -List of desired visual features : s* -0.116667,-0.00833333, -1, --0.312148,0.0249916, --1.43449, -Interaction Matrix Ls --1 0 -6.938893904e-18 0.007291666667 -1.06875 -0.008333333333 -0 -1 3.469446952e-18 1.0171875 -0.007291666667 -0.01666666667 -0 0 -1 0.0125 0.025 0 -0 0 -4.585529113e-15 -0.2983860943 0.5832596643 -4.376751552e-16 -0 0 -3.58244462e-15 0.08633028234 -0.2484618767 3.63421192e-16 -4.353086256e-17 -1.339411156e-16 -0 -0.03019436997 -0.0168230563 -1 -Error vector (s-s*) --0.1 0 0 1.831867991e-15 -1.072059108e-15 0 -Gain : Zero= 1 Inf= 1 Deriv= 0 - -\endcode -*/ + * \class vpFeatureMomentCommon + * + * \ingroup group_visual_features + * + * \brief This class allows to access common vpFeatureMoments in a pre-filled + * database. + * + * It is a vpMomentDatabase filled with the following moments: + * - vpFeatureMomentGravityCenter + * - vpFeatureMomentGravityCenterNormalized + * - vpFeatureMomentAreaNormalized + * - vpFeatureMomentCInvariant + * - vpFeatureMomentAlpha + * - vpFeatureMomentCentered + * - vpFeatureMomentBasic + * + * There is no need to do the linkTo operations manually nor is it necessary to + * care about the order of feature computation. + * + * This class has an vpMomentCommon::updateAll method capable of updating the + * plane parameters AND computing interaction matrices inside the features. + * + * The moment features computed by this class are classical moments + * features used in moment-based visual servoing. For more + * information see \cite Tahri05z. + * + * To initialize this feature set, the user needs to supply a vpMomentDatabase + * containing at least the contents of vpMomentCommon. + * + * The features can be retrieved like from a normal vpFeatureMomentDatabase. + * However, some shortcuts to retrieve the features are provided. + * + * \attention Make sure your object is at least of order 6 when using this + * pre-filled database. + * + * The following code demonstrates the construction of a 6x6 interaction matrix + * as described in [1]. + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * // Define source polygon + * vpPoint p; + * std::vector vec_p; // vector that contains the vertices of the contour polygon + * + * p.set_x(-0.2); p.set_y(0.1); // coordinates in meters in the image plane (vertex 1) + * vec_p.push_back(p); + * p.set_x(+0.3); p.set_y(0.1); // coordinates in meters in the image plane (vertex 2) + * vec_p.push_back(p); + * p.set_x(+0.2); p.set_y(-0.1); // coordinates in meters in the image plane (vertex 3) + * vec_p.push_back(p); + * p.set_x(-0.2); p.set_y(-0.15); // coordinates in meters in the image plane (vertex 4) + * vec_p.push_back(p); + * p.set_x(-0.2); p.set_y(0.1); // close the contour (vertex 5 = vertex 1) + * vec_p.push_back(p); + * + * vpMomentObject src(6); // Create a source moment object with 6 as maximum order + * src.setType(vpMomentObject::DENSE_POLYGON); // The object is defined by a contour polygon + * src.fromVector(vec_p); // Init the dense object with the source polygon + * vec_p.clear(); + * + * //Define destination polygon. This is the source polygon translated + * //of 0.1 on x-axis + * p.set_x(-0.1); p.set_y(0.1); // coordinates in meters in the image plane (vertex 1) + * vec_p.push_back(p); + * p.set_x(+0.4); p.set_y(0.1); // coordinates in meters in the image plane (vertex 2) + * vec_p.push_back(p); + * p.set_x(+0.3); p.set_y(-0.1); // coordinates in meters in the image plane (vertex 3) + * vec_p.push_back(p); + * p.set_x(-0.1); p.set_y(-0.15); // coordinates in meters in the image plane (vertex 4) + * vec_p.push_back(p); + * p.set_x(-0.1); p.set_y(0.1); // close the contour (vertex 5 = vertex 1) + * vec_p.push_back(p); + * + * vpMomentObject dst(6); // Create a destination moment object with 6 as maximum order + * dst.setType(vpMomentObject::DENSE_POLYGON); // The object is defined by a contour polygon + * dst.fromVector(vec_p); // Init the dense object with the destination + * // polygon + * + * //init classic moment primitives (for source) + * vpMomentCommon mdb_src(vpMomentCommon::getSurface(dst),vpMomentCommon::getMu3(dst),vpMomentCommon::getAlpha(dst),1.); + * //Init classic features + * vpFeatureMomentCommon fmdb_src(mdb_src); + * + * ////init classic moment primitives (for destination) + * vpMomentCommon mdb_dst(vpMomentCommon::getSurface(dst),vpMomentCommon::getMu3(dst),vpMomentCommon::getAlpha(dst),1.); + * //Init classic features + * vpFeatureMomentCommon fmdb_dst(mdb_dst); + * + * //update+compute moment primitives from object (for source) + * mdb_src.updateAll(src); + * //update+compute features (+interaction matrices) from plane + * fmdb_src.updateAll(0.,0.,1.); + * + * //update+compute moment primitives from object (for destination) + * mdb_dst.updateAll(dst); + * //update+compute features (+interaction matrices) from plane + * fmdb_dst.updateAll(0.,0.,1.); + * + * //define visual servoing task + * vpServo task; + * task.setServo(vpServo::EYEINHAND_CAMERA); + * task.setInteractionMatrixType(vpServo::CURRENT); + * + * //Add all classic features to the task + * //In this example, source and destination features are translated by 0.1 + * //will produce a movement of 0.1 on x-axis. + * task.addFeature(fmdb_src.getFeatureGravityNormalized(),fmdb_dst.getFeatureGravityNormalized()); + * task.addFeature(fmdb_src.getFeatureAn(),fmdb_dst.getFeatureAn()); + * //the object is NOT symmetric + * //select C4 and C6 + * task.addFeature(fmdb_src.getFeatureCInvariant(),fmdb_dst.getFeatureCInvariant(),(1 << 3) | (1 << 5)); + * task.addFeature(fmdb_src.getFeatureAlpha(),fmdb_dst.getFeatureAlpha()); + * + * task.setLambda(1) ; + * vpColVector v = task.computeControlLaw() ; + * + * task.print(); + * + * return 0; + * } + * \endcode + * This code produces the following output: + * \code + * Visual servoing task: + * Type of control law + * Eye-in-hand configuration + * Control in the camera frame + * List of visual features : s + * 0.0166667,-0.00833333, + * 1, + * -0.312148,0.0249916, + * -1.43449, + * List of desired visual features : s* + * 0.116667,-0.00833333, + * 1, + * -0.312148,0.0249916, + * -1.43449, + * Interaction Matrix Ls + * -1 0 -6.938893904e-18 0.007291666667 -1.06875 -0.008333333333 + * 0 -1 3.469446952e-18 1.0171875 -0.007291666667 -0.01666666667 + * 0 0 -1 0.0125 0.025 0 + * 0 0 -4.585529113e-15 -0.2983860943 0.5832596643 -4.376751552e-16 + * 0 0 -3.58244462e-15 0.08633028234 -0.2484618767 3.63421192e-16 + * 4.353086256e-17 -1.339411156e-16 -0 -0.03019436997 -0.0168230563 -1 + * Error vector (s-s*) + * -0.1 0 0 1.831867991e-15 -1.072059108e-15 0 + * Gain : Zero= 1 Inf= 1 Deriv= 0 + * + * \endcode + */ class VISP_EXPORT vpFeatureMomentCommon : public vpFeatureMomentDatabase { private: @@ -237,38 +229,41 @@ class VISP_EXPORT vpFeatureMomentCommon : public vpFeatureMomentDatabase vpFeatureMomentCommon(vpMomentDatabase &moments, double A = 0.0, double B = 0.0, double C = 1.0); void updateAll(double A, double B, double C); /*! - Returns alpha. - */ + * Returns alpha. + */ vpFeatureMomentAlpha &getFeatureAlpha() { return featureAlpha; } /*! - Returns normalized surface. - */ + * Returns normalized surface. + */ vpFeatureMomentAreaNormalized &getFeatureAn() { return featureAn; } /*! - Returns basic moment. - */ + * Returns basic moment. + */ vpFeatureMomentBasic &getFeatureMomentBasic() { return featureMomentBasic; } /*! - Returns centered moments. - */ + * Returns centered moments. + */ vpFeatureMomentCentered &getFeatureCentered() { return featureCentered; } /*! - Returns non-symmetric invariants. + * Returns non-symmetric invariants. */ vpFeatureMomentCInvariant &getFeatureCInvariant() { return featureCInvariant; } + /*! - Returns normalized gravity center. - */ + * Returns normalized gravity center. + */ vpFeatureMomentGravityCenterNormalized &getFeatureGravityNormalized() { return featureGravityNormalized; } + /*! - Returns the area - */ + * Returns the area. + */ vpFeatureMomentArea &getFeatureArea() { return feature_moment_area; } + /*! - Returns gravity center - */ + * Returns gravity center. + */ vpFeatureMomentGravityCenter &getFeatureGravityCenter() { return featureGravity; } }; diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentDatabase.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentDatabase.h index 6ee0450bc1..c4856154eb 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentDatabase.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentDatabase.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,12 @@ * * Description: * Pseudo-database used to handle dependencies between moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ /*! - \file vpFeatureMomentDatabase.h - \brief Pseudo-database used to handle dependencies between moment features. -*/ + * \file vpFeatureMomentDatabase.h + * \brief Pseudo-database used to handle dependencies between moment features. + */ #ifndef _vpFeatureMomentDatabase_h_ #define _vpFeatureMomentDatabase_h_ @@ -52,112 +47,113 @@ class vpFeatureMoment; class vpMomentObject; /*! - \class vpFeatureMomentDatabase - - \ingroup group_visual_features - - \brief This class allows to register all feature moments (implemented in -vpFeatureMoment... classes) so they can access each other according to their -dependencies. - - Like moments (implemented in vpMoment... classes), a vpFeatureMoment needs -to have access to other vpFeatureMoment's values to be computed. In most -cases, a vpFeatureMoment needs both: vpMoments and vpFeatureMoments which -explains the two databases (see vpFeatureMoment::vpFeatureMoment). For example -vpFeatureMomentAlpha needs additionnal information about centered moments -vpMomentCentered AND their interaction matrices obtained by -vpFeatureMomentCentered in order to compute the moment's value from a -vpMomentObject. Like the vpMomentCentered is stored in a vpMomentDatabase, the -vpFeatureMomentCentered should be stored in a vpFeatureMomentDatabase. - - All moment features in a database can access each other freely at any time. -They can also verify if a moment feature is present in the database or not. - This code illustrates the use of both databases to handle dependencies -between moment primitives and moment features: - -\code -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -int main() -{ - try { - - vpPoint p; - std::vector vec_p; // vector that contains the vertices - - p.set_x(1); p.set_y(1); // coordinates in meters in the image plane (vertex 1) - vec_p.push_back(p); - p.set_x(2); p.set_y(2); // coordinates in meters in the image plane (vertex 2) - vec_p.push_back(p); - - //////////////////////////////REFERENCE VALUES//////////////////////////////// - vpMomentObject obj(6); // Init object of order 6 because we are - // computing C-invariants - obj.setType(vpMomentObject::DISCRETE); // Discrete mode for object - obj.fromVector(vec_p); - - vpMomentDatabase mdb; // database for moment primitives. This will - // only contain the basic moment. - vpMomentCentered mc; // Centered moment - vpMomentBasic bm; // Basic moment - vpMomentGravityCenter gc; // gravity center - vpMomentCInvariant ci; // C-type invariant - - bm.linkTo(mdb); //add basic moment to moment database - mc.linkTo(mdb); //add centered moment to moment database - gc.linkTo(mdb); //add gravity center to moment database - ci.linkTo(mdb); //add C-invariant to moment database - - vpFeatureMomentDatabase fmdb; // feature moment database to store - // feature dependencies - - // Declare and link moments to database - vpFeatureMomentBasic fmb(mdb,0.,0.,1.,&fmdb); fmb.linkTo(fmdb); - vpFeatureMomentCentered fmc(mdb,0.,0.,1.,&fmdb); fmc.linkTo(fmdb); - vpFeatureMomentCInvariant fci(mdb,0.,0.,1.,&fmdb); fci.linkTo(fmdb); - - // update the whole moment database - mdb.updateAll(obj); - - // Compute moments in the correct order with the object - bm.compute(); - gc.compute(); - mc.compute(); - ci.compute(); - - // update the whole feature moment database with a plane - fmb.update(0.,0.,1.); - fmc.update(0.,0.,1.); - fci.update(0.,0.,1.); - - std::cout << fci.interaction(vpFeatureMomentCInvariant::selectC1()) << std::endl; - } - catch(const vpException &e){ - std::cout << e.getMessage() << std::endl; - } - - return 0; -} -\endcode -*/ + * \class vpFeatureMomentDatabase + * + * \ingroup group_visual_features + * + * \brief This class allows to register all feature moments (implemented in + * vpFeatureMoment... classes) so they can access each other according to their + * dependencies. + * + * Like moments (implemented in vpMoment... classes), a vpFeatureMoment needs + * to have access to other vpFeatureMoment's values to be computed. In most + * cases, a vpFeatureMoment needs both: vpMoments and vpFeatureMoments which + * explains the two databases (see vpFeatureMoment::vpFeatureMoment). For example + * vpFeatureMomentAlpha needs additional information about centered moments + * vpMomentCentered AND their interaction matrices obtained by + * vpFeatureMomentCentered in order to compute the moment's value from a + * vpMomentObject. Like the vpMomentCentered is stored in a vpMomentDatabase, the + * vpFeatureMomentCentered should be stored in a vpFeatureMomentDatabase. + * + * All moment features in a database can access each other freely at any time. + * They can also verify if a moment feature is present in the database or not. + * This code illustrates the use of both databases to handle dependencies + * between moment primitives and moment features: + * + * \code + * #include + * + * #include + * #include + * #include + * #include + * #include + * #include + * + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * try { + * + * vpPoint p; + * std::vector vec_p; // vector that contains the vertices + * + * p.set_x(1); p.set_y(1); // coordinates in meters in the image plane (vertex 1) + * vec_p.push_back(p); + * p.set_x(2); p.set_y(2); // coordinates in meters in the image plane (vertex 2) + * vec_p.push_back(p); + * + * //////////////////////////////REFERENCE VALUES//////////////////////////////// + * vpMomentObject obj(6); // Init object of order 6 because we are + * // computing C-invariants + * obj.setType(vpMomentObject::DISCRETE); // Discrete mode for object + * obj.fromVector(vec_p); + * + * vpMomentDatabase mdb; // database for moment primitives. This will + * // only contain the basic moment. + * vpMomentCentered mc; // Centered moment + * vpMomentBasic bm; // Basic moment + * vpMomentGravityCenter gc; // gravity center + * vpMomentCInvariant ci; // C-type invariant + * + * bm.linkTo(mdb); //add basic moment to moment database + * mc.linkTo(mdb); //add centered moment to moment database + * gc.linkTo(mdb); //add gravity center to moment database + * ci.linkTo(mdb); //add C-invariant to moment database + * + * vpFeatureMomentDatabase fmdb; // feature moment database to store + * // feature dependencies + * + * // Declare and link moments to database + * vpFeatureMomentBasic fmb(mdb,0.,0.,1.,&fmdb); fmb.linkTo(fmdb); + * vpFeatureMomentCentered fmc(mdb,0.,0.,1.,&fmdb); fmc.linkTo(fmdb); + * vpFeatureMomentCInvariant fci(mdb,0.,0.,1.,&fmdb); fci.linkTo(fmdb); + * + * // update the whole moment database + * mdb.updateAll(obj); + * + * // Compute moments in the correct order with the object + * bm.compute(); + * gc.compute(); + * mc.compute(); + * ci.compute(); + * + * // update the whole feature moment database with a plane + * fmb.update(0.,0.,1.); + * fmc.update(0.,0.,1.); + * fci.update(0.,0.,1.); + * + * std::cout << fci.interaction(vpFeatureMomentCInvariant::selectC1()) << std::endl; + * } + * catch(const vpException &e){ + * std::cout << e.getMessage() << std::endl; + * } + * + * return 0; + * } + * \endcode + */ class VISP_EXPORT vpFeatureMomentDatabase { private: - struct vpCmpStr_t { + struct vpCmpStr_t + { bool operator()(const char *a, const char *b) const { return std::strcmp(a, b) < 0; } char *operator=(const char *) { return NULL; } // Only to avoid a warning under Visual with /Wall flag }; @@ -166,13 +162,15 @@ class VISP_EXPORT vpFeatureMomentDatabase public: /*! - Default constructor. - */ - vpFeatureMomentDatabase() : featureMomentsDataBase() {} + * Default constructor. + */ + vpFeatureMomentDatabase() : featureMomentsDataBase() { } + /*! - Virtual destructor that does nothing. - */ - virtual ~vpFeatureMomentDatabase() {} + * Virtual destructor that does nothing. + */ + virtual ~vpFeatureMomentDatabase() { } + virtual void updateAll(double A = 0.0, double B = 0.0, double C = 1.0); vpFeatureMoment &get(const char *type, bool &found); diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenter.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenter.h index 2a5cea3d14..28c148890a 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenter.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenter.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,13 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ + /*! - \file vpFeatureMomentGravityCenter.h - \brief Implementation of the interaction matrix computation for - vpMomentGravityCenter. -*/ + * \file vpFeatureMomentGravityCenter.h + * \brief Implementation of the interaction matrix computation for + * vpMomentGravityCenter. + */ #ifndef _vpFeatureMomentGravityCenter_h_ #define _vpFeatureMomentGravityCenter_h_ @@ -48,212 +44,215 @@ #ifdef VISP_MOMENTS_COMBINE_MATRICES class vpMomentDatabase; /*! - \class vpFeatureMomentGravityCenter - - \ingroup group_visual_features - - \brief Functionality computation for gravity center moment feature. Computes -the interaction matrix associated with vpMomentGravityCenter. - - The interaction matrix for the is defined in \cite Tahri05z, equation (16). - It allows to compute the interaction matrices for \f$ (x_g,y_g) \f$. - - These interaction matrices may be selected afterwards by calling -vpFeatureMomentGravityCenter::interaction(). The selection is done by the -following methods: vpFeatureMomentGravityCenter::selectXg for \f$ L_{x_{g}} -\f$ and vpFeatureMomentGravityCenter::selectYg for \f$ L_{y_{g}} \f$. The -following code demonstrates a selection of \f$ L_{y_{g}} \f$: - - \code -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -int main() -{ - vpPoint p; - std::vector vec_p; // vector that contains the vertices - - p.set_x(1); p.set_y(1); // coordinates in meters in the image plane (vertex 1) - vec_p.push_back(p); - p.set_x(2); p.set_y(2); // coordinates in meters in the image plane (vertex 2) - vec_p.push_back(p); - - //////////////////////////////REFERENCE VALUES//////////////////////////////// - vpMomentObject obj(2); // Init object of order 2 because we need - // vpFeatureMomentBasic of order 1 (for vpFeatureMomentGravityCenter) which - // implies third-order moment primitives - obj.setType(vpMomentObject::DISCRETE); // Discrete mode for object - obj.fromVector(vec_p); - - - vpMomentDatabase mdb; //database for moment primitives. This will - //only contain the basic moment. - vpMomentBasic bm; //basic moment (this particular moment is nothing - //more than a shortcut to the vpMomentObject) - vpMomentGravityCenter gc; //gravity center - - bm.linkTo(mdb); //add basic moment to moment database - gc.linkTo(mdb); //add gravity center to moment database - - vpFeatureMomentDatabase fmdb; //feature moment database to store - //feature dependencies - - //Declare and link moments to database - vpFeatureMomentBasic fmb(mdb,0.,0.,1.,&fmdb); fmb.linkTo(fmdb); - vpFeatureMomentGravityCenter fgc(mdb,0.,0.,1.,&fmdb); fgc.linkTo(fmdb); - - //update and compute the vpMomentBasic before computing vpMomentGravityCenter - bm.update(obj); - bm.compute(); - //update and compute the vpMomentGravityCenter before computing vpFeatureMomentBasic - gc.update(obj); - gc.compute(); - - fmb.update(0.,0.,1.); //update the vpFeatureMoment with a plane - //configuration and compute interaction matrix - - fgc.update(0.,0.,1.); //update the plane configuration for gravity - //center feature and compute it's associated matrix. - - std::cout << fgc.interaction(1 << 1) << std::endl; - - return 0; -} - \endcode - - This code produces the following output: - \code -0 -1 1.5 3.5 -2.5 -1.5 - \endcode - - You can also use the shortcut selectors -vpFeatureMomentGravityCenter::selectXg or -vpFeatureMomentGravityCenter::selectYg as follows: - - \code - task.addFeature(db_src.getFeatureGravityNormalized(), db_dst.getFeatureGravityNormalized(), - vpFeatureMomentGravityCenter::selectXg() | vpFeatureMomentGravityCenter::selectYg()); - \endcode - This feature depends on: - - vpFeatureMomentBasic - - Minimum vpMomentObject order needed to compute this feature: 2. -*/ + * \class vpFeatureMomentGravityCenter + * + * \ingroup group_visual_features + * + * \brief Functionality computation for gravity center moment feature. Computes + * the interaction matrix associated with vpMomentGravityCenter. + * + * The interaction matrix for the is defined in \cite Tahri05z, equation (16). + * It allows to compute the interaction matrices for \f$ (x_g,y_g) \f$. + * + * These interaction matrices may be selected afterwards by calling + * vpFeatureMomentGravityCenter::interaction(). The selection is done by the + * following methods: vpFeatureMomentGravityCenter::selectXg for \f$ L_{x_{g}} + * \f$ and vpFeatureMomentGravityCenter::selectYg for \f$ L_{y_{g}} \f$. The + * following code demonstrates a selection of \f$ L_{y_{g}} \f$: + * + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * vpPoint p; + * std::vector vec_p; // vector that contains the vertices + * + * p.set_x(1); p.set_y(1); // coordinates in meters in the image plane (vertex 1) + * vec_p.push_back(p); + * p.set_x(2); p.set_y(2); // coordinates in meters in the image plane (vertex 2) + * vec_p.push_back(p); + * + * //////////////////////////////REFERENCE VALUES//////////////////////////////// + * vpMomentObject obj(2); // Init object of order 2 because we need + * // vpFeatureMomentBasic of order 1 (for vpFeatureMomentGravityCenter) which + * // implies third-order moment primitives + * obj.setType(vpMomentObject::DISCRETE); // Discrete mode for object + * obj.fromVector(vec_p); + * + * vpMomentDatabase mdb; //database for moment primitives. This will + * //only contain the basic moment. + * vpMomentBasic bm; //basic moment (this particular moment is nothing + * //more than a shortcut to the vpMomentObject) + * vpMomentGravityCenter gc; //gravity center + * + * bm.linkTo(mdb); //add basic moment to moment database + * gc.linkTo(mdb); //add gravity center to moment database + * + * vpFeatureMomentDatabase fmdb; //feature moment database to store + * //feature dependencies + * + * //Declare and link moments to database + * vpFeatureMomentBasic fmb(mdb,0.,0.,1.,&fmdb); fmb.linkTo(fmdb); + * vpFeatureMomentGravityCenter fgc(mdb,0.,0.,1.,&fmdb); fgc.linkTo(fmdb); + * + * //update and compute the vpMomentBasic before computing vpMomentGravityCenter + * bm.update(obj); + * bm.compute(); + * //update and compute the vpMomentGravityCenter before computing vpFeatureMomentBasic + * gc.update(obj); + * gc.compute(); + * + * fmb.update(0.,0.,1.); //update the vpFeatureMoment with a plane + * //configuration and compute interaction matrix + * + * fgc.update(0.,0.,1.); //update the plane configuration for gravity + * //center feature and compute it's associated matrix. + * + * std::cout << fgc.interaction(1 << 1) << std::endl; + * + * return 0; + * } + * \endcode + * + * This code produces the following output: + * \code + * 0 -1 1.5 3.5 -2.5 -1.5 + * \endcode + * + * You can also use the shortcut selectors + * vpFeatureMomentGravityCenter::selectXg or + * vpFeatureMomentGravityCenter::selectYg as follows: + * + * \code + * task.addFeature(db_src.getFeatureGravityNormalized(), db_dst.getFeatureGravityNormalized(), + * vpFeatureMomentGravityCenter::selectXg() | vpFeatureMomentGravityCenter::selectYg()); + * \endcode + * This feature depends on: + * - vpFeatureMomentBasic + * + * Minimum vpMomentObject order needed to compute this feature: 2. + */ class VISP_EXPORT vpFeatureMomentGravityCenter : public vpFeatureMoment { public: /*! - Initializes the feature with information about the database of moment - primitives, the object plane and feature database. - \param database : Moment database. The database of moment primitives (first parameter) is mandatory. - It is used to access different moment values later used to compute the final matrix. - \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param featureMoments : Feature database. - */ + * Initializes the feature with information about the database of moment + * primitives, the object plane and feature database. + * \param database : Moment database. The database of moment primitives (first parameter) is mandatory. + * It is used to access different moment values later used to compute the final matrix. + * \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param featureMoments : Feature database. + */ vpFeatureMomentGravityCenter(vpMomentDatabase &database, double A_, double B_, double C_, vpFeatureMomentDatabase *featureMoments = NULL) : vpFeatureMoment(database, A_, B_, C_, featureMoments, 2) - { - } - void compute_interaction(); + { } + + void compute_interaction() override; + /*! - Associated moment name. - */ - const char *momentName() const { return "vpMomentGravityCenter"; } + * Associated moment name. + */ + const char *momentName() const override { return "vpMomentGravityCenter"; } + /*! - Feature name. - */ - const char *name() const { return "vpFeatureMomentGravityCenter"; } + * Feature name. + */ + const char *name() const override { return "vpFeatureMomentGravityCenter"; } /*! - Shortcut selector for \f$x_g\f$. - */ + * Shortcut selector for \f$x_g\f$. + */ static unsigned int selectXg() { return 1 << 0; } /*! - Shortcut selector for \f$y_g\f$. - */ + * Shortcut selector for \f$y_g\f$. + */ static unsigned int selectYg() { return 1 << 1; } }; #else class vpMomentDatabase; /*! - \class vpFeatureMomentGravityCenter - - \ingroup group_visual_features - - \brief Functionality computation for gravity center moment feature. Computes - the interaction matrix associated with vpMomentGravityCenter. - - The interaction matrix for the is defined in \cite Tahri05z, equation (16). - It allows to compute the interaction matrices for \f$ (x_g,y_g) \f$. - - These interaction matrices may be selected afterwards by calling - vpFeatureMomentGravityCenter::interaction(). The selection is done by the - following methods: vpFeatureMomentGravityCenter::selectXg for \f$ L_{x_{g}} - \f$ and vpFeatureMomentGravityCenter::selectYg for \f$ L_{y_{g}} \f$. - - You can use the selectors vpFeatureMomentGravityCenter::selectXg or - vpFeatureMomentGravityCenter::selectYg as follows: - - \code - task.addFeature(db_src.getFeatureGravityNormalized(), db_dst.getFeatureGravityNormalized(), - vpFeatureMomentGravityCenter::selectXg() | vpFeatureMomentGravityCenter::selectYg()); - \endcode - This feature depends on: - - vpMomentCentered - - vpMomentGravityCenter - - Minimum vpMomentObject order needed to compute this feature: 2. -*/ + * \class vpFeatureMomentGravityCenter + * + * \ingroup group_visual_features + * + * \brief Functionality computation for gravity center moment feature. Computes + * the interaction matrix associated with vpMomentGravityCenter. + * + * The interaction matrix for the is defined in \cite Tahri05z, equation (16). + * It allows to compute the interaction matrices for \f$ (x_g,y_g) \f$. + * + * These interaction matrices may be selected afterwards by calling + * vpFeatureMomentGravityCenter::interaction(). The selection is done by the + * following methods: vpFeatureMomentGravityCenter::selectXg for \f$ L_{x_{g}} + * \f$ and vpFeatureMomentGravityCenter::selectYg for \f$ L_{y_{g}} \f$. + * + * You can use the selectors vpFeatureMomentGravityCenter::selectXg or + * vpFeatureMomentGravityCenter::selectYg as follows: + * + * \code + * task.addFeature(db_src.getFeatureGravityNormalized(), db_dst.getFeatureGravityNormalized(), + * vpFeatureMomentGravityCenter::selectXg() | vpFeatureMomentGravityCenter::selectYg()); + * \endcode + * This feature depends on: + * - vpMomentCentered + * - vpMomentGravityCenter + * + * Minimum vpMomentObject order needed to compute this feature: 2. + */ class VISP_EXPORT vpFeatureMomentGravityCenter : public vpFeatureMoment { public: /*! - Initializes the feature with information about the database of moment - primitives, the object plane and feature database. - \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. - It is used to access different moment values later used to compute the final matrix. - \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param featureMoments : Feature database. - */ + * Initializes the feature with information about the database of moment + * primitives, the object plane and feature database. + * \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. + * It is used to access different moment values later used to compute the final matrix. + * \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param featureMoments : Feature database. + */ vpFeatureMomentGravityCenter(vpMomentDatabase &data_base, double A_, double B_, double C_, vpFeatureMomentDatabase *featureMoments = NULL) : vpFeatureMoment(data_base, A_, B_, C_, featureMoments, 2) - { - } - void compute_interaction(); + { } + + void compute_interaction() override; + /*! - Associated moment name. + * Associated moment name. + */ + const char *momentName() const override { return "vpMomentGravityCenter"; } + + /*! + * Feature name. */ - const char *momentName() const { return "vpMomentGravityCenter"; } - /*! - Feature name. - */ - const char *name() const { return "vpFeatureMomentGravityCenter"; } + const char *name() const override { return "vpFeatureMomentGravityCenter"; } /*! - Shortcut selector for \f$x_g\f$. - */ + * Shortcut selector for \f$x_g\f$. + */ static unsigned int selectXg() { return 1 << 0; } /*! - Shortcut selector for \f$y_g\f$. - */ + * Shortcut selector for \f$y_g\f$. + */ static unsigned int selectYg() { return 1 << 1; } }; diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenterNormalized.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenterNormalized.h index dccfc21dcb..eb408e2b97 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenterNormalized.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentGravityCenterNormalized.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,13 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ + /*! - \file vpFeatureMomentGravityCenterNormalized.h - \brief Implementation of the interaction matrix computation for - vpMomentGravityCenterNormalized. -*/ + * \file vpFeatureMomentGravityCenterNormalized.h + * \brief Implementation of the interaction matrix computation for + * vpMomentGravityCenterNormalized. + */ #ifndef _vpFeatureMomentGravityCenterNormalized_h_ #define _vpFeatureMomentGravityCenterNormalized_h_ @@ -48,230 +44,228 @@ #ifdef VISP_MOMENTS_COMBINE_MATRICES class vpMomentDatabase; /*! - \class vpFeatureMomentGravityCenterNormalized - - \ingroup group_visual_features - - \brief Functionality computation for centered and normalized moment feature. - Computes the interaction matrix associated with - vpMomentGravityCenterNormalized. - - The interaction matrix for the moment feature can be deduced from \cite - Tahri05z, equation (19). To do so, one must derive it and obtain a - combination of interaction matrices by using (1). It allows to compute the - interaction matrices for \f$ (x_n,y_n) \f$. - - These interaction matrices may be selected afterwards by calling - vpFeatureMomentGravityCenterNormalized::interaction. The selection is done - by the following methods: vpFeatureMomentGravityCenterNormalized::selectXn - for \f$ L_{x_{n}} \f$ and vpFeatureMomentGravityCenterNormalized::selectYn - for \f$ L_{y_{n}} \f$. You can use these shortcut selectors as follows: - - \code - task.addFeature(db_src.getFeatureGravityNormalized(), db_dst.getFeatureGravityNormalized(), - vpFeatureMomentGravityCenterNormalized::selectXn() | - vpFeatureMomentGravityCenterNormalized::selectYn()); \endcode - - The behaviour of this feature is very similar to - vpFeatureMomentGravityCenter which also contains a sample code demonstrating - a selection. - - This feature is often used in moment-based visual servoing to control the - planar translation parameters. - - Minimum vpMomentObject order needed to compute this feature: 2 in dense mode - and 3 in discrete mode. - - This feature depends on: - - vpFeatureMomentGravityCenter - - vpMomentGravityCenter - - vpMomentAreaNormalized - - vpFeatureMomentAreaNormalized - -*/ + * \class vpFeatureMomentGravityCenterNormalized + * + * \ingroup group_visual_features + * + * \brief Functionality computation for centered and normalized moment feature. + * Computes the interaction matrix associated with + * vpMomentGravityCenterNormalized. + * + * The interaction matrix for the moment feature can be deduced from \cite + * Tahri05z, equation (19). To do so, one must derive it and obtain a + * combination of interaction matrices by using (1). It allows to compute the + * interaction matrices for \f$ (x_n,y_n) \f$. + * + * These interaction matrices may be selected afterwards by calling + * vpFeatureMomentGravityCenterNormalized::interaction. The selection is done + * by the following methods: vpFeatureMomentGravityCenterNormalized::selectXn + * for \f$ L_{x_{n}} \f$ and vpFeatureMomentGravityCenterNormalized::selectYn + * for \f$ L_{y_{n}} \f$. You can use these shortcut selectors as follows: + * + * \code + * task.addFeature(db_src.getFeatureGravityNormalized(), db_dst.getFeatureGravityNormalized(), + * vpFeatureMomentGravityCenterNormalized::selectXn() | vpFeatureMomentGravityCenterNormalized::selectYn()); + * \endcode + * + * The behaviour of this feature is very similar to + * vpFeatureMomentGravityCenter which also contains a sample code demonstrating + * a selection. + * + * This feature is often used in moment-based visual servoing to control the + * planar translation parameters. + * + * Minimum vpMomentObject order needed to compute this feature: 2 in dense mode + * and 3 in discrete mode. + * + * This feature depends on: + * - vpFeatureMomentGravityCenter + * - vpMomentGravityCenter + * - vpMomentAreaNormalized + * - vpFeatureMomentAreaNormalized + */ class VISP_EXPORT vpFeatureMomentGravityCenterNormalized : public vpFeatureMoment { public: /*! - Initializes the feature with information about the database of moment - primitives, the object plane and feature database. - \param database : Moment database. The database of moment primitives (first parameter) is mandatory. - It is used to access different moment values later used to compute the final matrix. - \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param featureMoments : Feature database. - - */ + * Initializes the feature with information about the database of moment + * primitives, the object plane and feature database. + * \param database : Moment database. The database of moment primitives (first parameter) is mandatory. + * It is used to access different moment values later used to compute the final matrix. + * \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param featureMoments : Feature database. + */ vpFeatureMomentGravityCenterNormalized(vpMomentDatabase &database, double A_, double B_, double C_, vpFeatureMomentDatabase *featureMoments = NULL) : vpFeatureMoment(database, A_, B_, C_, featureMoments, 2) - { - } - void compute_interaction(); + { } + void compute_interaction() override; + /*! - associated moment name - */ - const char *momentName() const { return "vpMomentGravityCenterNormalized"; } + * Associated moment name. + */ + const char *momentName() const override { return "vpMomentGravityCenterNormalized"; } + /*! - feature name - */ - const char *name() const { return "vpFeatureMomentGravityCenterNormalized"; } + * Feature name. + */ + const char *name() const override { return "vpFeatureMomentGravityCenterNormalized"; } /*! - Shortcut selector for \f$x_n\f$. - */ + * Shortcut selector for \f$x_n\f$. + */ static unsigned int selectXn() { return 1 << 0; } /*! - Shortcut selector for \f$y_n\f$. - */ + * Shortcut selector for \f$y_n\f$. + */ static unsigned int selectYn() { return 1 << 1; } }; #else class vpMomentDatabase; /*! - \class vpFeatureMomentGravityCenterNormalized - - \ingroup group_visual_features - - \brief Functionality computation for centered and normalized moment feature. - Computes the interaction matrix associated with vpMomentGravityCenterNormalized. - - It computes the interaction matrices for \f$ (x_n,y_n) \f$. - The interaction matrix for the moment feature has the following expression: - - In the discrete case: - \f[ - L_{x_n} = - { - \left[ - \begin{array}{c} - -Ax_{{n}}\theta+ \left( x_{{n}}e_{{1,1}}-y_{{n}} \right) B-a_{{n}}C \\ - \noalign{\medskip}Ax_{{n}}e_{{1,1}}+Bx_{{n}} \theta \\ - \noalign{\medskip} - \left( - -a_{{n}}-w_{{y}} - \right) - A+Bw_{{x}} \\ - \noalign{\medskip}a_{{n}}e_{{1,1}}{\it NA}+ \left( \eta_{{1,0}}e_{{1,1}}+\eta_{{0,1}}-e_{{2,1}}-x_{{g}}e_{{1,1}}+\eta_{{0,1}}\theta \right) x_{{n}}+ \left( \eta_{{1,0}}-x_{{g}}\theta \right) y_{{n}}-{\frac {x_{{n}}\eta_{{0,3}}}{{\it NA}}} \\ - \noalign{\medskip} \left( -1+\theta \right) a_{{n}}{\it NA}+ \left( e_{{1,2}}+x_{{g}}-\eta_{{0,1}}e_{{1,1}}-2\,\eta_{{1,0}}+e_{{3,0}}+ \left( -x_{{g}}+\eta_{{1,0}} \right) \theta \right) x_{{n}}+e_{{1,1}}x_{{g}}y_{{n}}-a_{{n}} \\ - \noalign{\medskip}y_{{n}} - \end{array} - \right] - }^t - \f] - - \f[ - L_{y_n} = - { - \left[ - \begin{array}{c} - \left( 1-\theta \right) y_{{n}}A+y_{{n}}e_{{1,1}}B \\ - \noalign{\medskip} \left( -x_{{n}}+y_{{n}}e_{{1,1}} - \right) - A+ \left( -1+\theta \right) y_{{n}}B-a_{{n}}C \\ - \noalign{\medskip}-Aw_{{y}}+ \left( -a_{{n}}+w_{{x}} \right) B \\ - \noalign{\medskip}\theta\,a_{{n}}{\it NA}+ - \left( -e_{{2,1}}+\eta_{{1,0}}e_{{1,1}}+\eta_{{0,1}}-x_{{g}}e_{{1,1}}+ \left( \eta_{{0,1}}-y_{{g}} \right) \theta - \right) y_{{n}}+a_{{n}}-{\frac {y_{{n}}\eta_{{0,3}}}{{\it NA}}} \\ - \noalign{\medskip}-a_{{n}}e_{{1,1}}{\it NA}-x_{{n}}\eta_{{0,1}}+\left( e_{{1,2}}+y_{{g}}e_{{1,1}}-\eta_{{0,1}}e_{{1,1}}+x_{{g}}+e_{{3,0}}-2\,\eta_{{1,0}}+ \left( -x_{{g}}+\eta_{{1,0}} \right) \theta - \right) y_{{n}} \\ - \noalign{\medskip}-x_{{n}} - \end{array} - \right] - }^t - \f] - - In the dense case: - \f[ - L_{x_n} = - { - \left[ - \begin {array}{c} -a_{{n}}C-1/2\,Ax_{{n}}-By_{{n}} \\ - \noalign{\medskip}1/2\,Bx_{{n}} \\ - \noalign{\medskip} \left( -a_{{n}}-w_{{y}} \right) A+Bw_{{x}} \\ - \noalign{\medskip} \left( 4\,\eta_{{1,0}}-1/2\,x_{{g}} \right) y_{{n}}+4\,a_{{n}}\eta_{{1,1}}+4\,x_{{n}}\eta_{{0,1}} \\ - \noalign{\medskip} \left( -4\,\eta_{{1,0}}+1/2\,x_{{g}} \right) x_{{n}}+ \left( -1-4\,\eta_{{2,0}} \right) a_{{n}} \\ - \noalign{\medskip}y_{{n}}\end {array} - \right] - }^t - L_{y_n} = - { - \left[ - \begin {array}{c} - 1/2\,Ay_{{n}} \\ - \noalign{\medskip}-1/2\,By_{{n}}-a_{{n}}C-Ax_{{n}} \\ - \noalign{\medskip}-Aw_{{y}}+ \left( -a_{{n}}+w_{{x}} \right) B \\ - \noalign{\medskip}4\,\theta\,a_{{n}}{\it NA}+ \left( 4\,\eta_{{0,1}}-1/2\,y_{{g}} \right) y_{{n}}+a_{{n}} \\ - \noalign{\medskip} \left( -4\,\eta_{{1,0}}+1/2\,x_{{g}} \right) y_{{n}}-4\,a_{{n}}\eta_{{1,1}}-4\,x_{{n}}\eta_{{0,1}} \\ - \noalign{\medskip}-x_{{n}} - \end {array} - \right] - }^t - \f] - with: - - \f$e_{i,j}=\frac{\mu_{i,j}}{NA}\f$ - - \f$NA=\mu_{2,0}+\mu_{0,2}\f$ - - \f$\theta=\frac{\eta_{0,2}}{NA}\f$ - - \f$\eta\f$ is the centered and normalized moment. - - These interaction matrices may be selected afterwards by calling - vpFeatureMomentGravityCenterNormalized::interaction. The selection is done by - the following methods: vpFeatureMomentGravityCenterNormalized::selectXn for - \f$ L_{x_{n}} \f$ and vpFeatureMomentGravityCenterNormalized::selectYn for \f$L_{y_{n}} \f$. - You can use these shortcut selectors as follows: - - \code - task.addFeature(db_src.getFeatureGravityNormalized(),db_dst.getFeatureGravityNormalized(), - vpFeatureMomentGravityCenterNormalized::selectXn() | - vpFeatureMomentGravityCenterNormalized::selectYn()); - \endcode - - The behaviour of this feature is very similar to - vpFeatureMomentGravityCenter which also contains a sample code demonstrating a - selection. - - This feature is often used in moment-based visual servoing to control the - planar translation parameters. - - Minimum vpMomentObject order needed to compute this feature: 2 in dense mode - and 3 in discrete mode. - - This feature depends on: - - vpFeatureMomentGravityCenter - - vpMomentGravityCenter - - vpMomentAreaNormalized - - vpFeatureMomentAreaNormalized - -*/ + * \class vpFeatureMomentGravityCenterNormalized + * + * \ingroup group_visual_features + * + * \brief Functionality computation for centered and normalized moment feature. + * Computes the interaction matrix associated with vpMomentGravityCenterNormalized. + * + * It computes the interaction matrices for \f$ (x_n,y_n) \f$. + * The interaction matrix for the moment feature has the following expression: + * - In the discrete case: + * \f[ + * L_{x_n} = + * { + * \left[ + * \begin{array}{c} + * -Ax_{{n}}\theta+ \left( x_{{n}}e_{{1,1}}-y_{{n}} \right) B-a_{{n}}C \\ + * \noalign{\medskip}Ax_{{n}}e_{{1,1}}+Bx_{{n}} \theta \\ + * \noalign{\medskip} + * \left( + * -a_{{n}}-w_{{y}} + * \right) + * A+Bw_{{x}} \\ + * \noalign{\medskip}a_{{n}}e_{{1,1}}{\it NA}+ \left( \eta_{{1,0}}e_{{1,1}}+\eta_{{0,1}}-e_{{2,1}}-x_{{g}}e_{{1,1}}+\eta_{{0,1}}\theta \right) x_{{n}}+ \left( \eta_{{1,0}}-x_{{g}}\theta \right) y_{{n}}-{\frac {x_{{n}}\eta_{{0,3}}}{{\it NA}}} \\ + * \noalign{\medskip} \left( -1+\theta \right) a_{{n}}{\it NA}+ \left( e_{{1,2}}+x_{{g}}-\eta_{{0,1}}e_{{1,1}}-2\,\eta_{{1,0}}+e_{{3,0}}+ \left( -x_{{g}}+\eta_{{1,0}} \right) \theta \right) x_{{n}}+e_{{1,1}}x_{{g}}y_{{n}}-a_{{n}} \\ + * \noalign{\medskip}y_{{n}} + * \end{array} + * \right] + * }^t + * \f] + * + * \f[ + * L_{y_n} = + * { + * \left[ + * \begin{array}{c} + * \left( 1-\theta \right) y_{{n}}A+y_{{n}}e_{{1,1}}B \\ + * \noalign{\medskip} \left( -x_{{n}}+y_{{n}}e_{{1,1}} + * \right) + * A+ \left( -1+\theta \right) y_{{n}}B-a_{{n}}C \\ + * \noalign{\medskip}-Aw_{{y}}+ \left( -a_{{n}}+w_{{x}} \right) B \\ + * \noalign{\medskip}\theta\,a_{{n}}{\it NA}+ + * \left( -e_{{2,1}}+\eta_{{1,0}}e_{{1,1}}+\eta_{{0,1}}-x_{{g}}e_{{1,1}}+ \left( \eta_{{0,1}}-y_{{g}} \right) \theta + * \right) y_{{n}}+a_{{n}}-{\frac {y_{{n}}\eta_{{0,3}}}{{\it NA}}} \\ + * \noalign{\medskip}-a_{{n}}e_{{1,1}}{\it NA}-x_{{n}}\eta_{{0,1}}+\left( e_{{1,2}}+y_{{g}}e_{{1,1}}-\eta_{{0,1}}e_{{1,1}}+x_{{g}}+e_{{3,0}}-2\,\eta_{{1,0}}+ \left( -x_{{g}}+\eta_{{1,0}} \right) \theta + * \right) y_{{n}} \\ + * \noalign{\medskip}-x_{{n}} + * \end{array} + * \right] + * }^t + * \f] + * + * - In the dense case: + * \f[ + * L_{x_n} = + * { + * \left[ + * \begin {array}{c} -a_{{n}}C-1/2\,Ax_{{n}}-By_{{n}} \\ + * \noalign{\medskip}1/2\,Bx_{{n}} \\ + * \noalign{\medskip} \left( -a_{{n}}-w_{{y}} \right) A+Bw_{{x}} \\ + * \noalign{\medskip} \left( 4\,\eta_{{1,0}}-1/2\,x_{{g}} \right) y_{{n}}+4\,a_{{n}}\eta_{{1,1}}+4\,x_{{n}}\eta_{{0,1}} \\ + * \noalign{\medskip} \left( -4\,\eta_{{1,0}}+1/2\,x_{{g}} \right) x_{{n}}+ \left( -1-4\,\eta_{{2,0}} \right) a_{{n}} \\ + * \noalign{\medskip}y_{{n}}\end {array} + * \right] + * }^t + * L_{y_n} = + * { + * \left[ + * \begin {array}{c} + * 1/2\,Ay_{{n}} \\ + * \noalign{\medskip}-1/2\,By_{{n}}-a_{{n}}C-Ax_{{n}} \\ + * \noalign{\medskip}-Aw_{{y}}+ \left( -a_{{n}}+w_{{x}} \right) B \\ + * \noalign{\medskip}4\,\theta\,a_{{n}}{\it NA}+ \left( 4\,\eta_{{0,1}}-1/2\,y_{{g}} \right) y_{{n}}+a_{{n}} \\ + * \noalign{\medskip} \left( -4\,\eta_{{1,0}}+1/2\,x_{{g}} \right) y_{{n}}-4\,a_{{n}}\eta_{{1,1}}-4\,x_{{n}}\eta_{{0,1}} \\ + * \noalign{\medskip}-x_{{n}} + * \end {array} + * \right] + * }^t + * \f] + * with: + * - \f$e_{i,j}=\frac{\mu_{i,j}}{NA}\f$ + * - \f$NA=\mu_{2,0}+\mu_{0,2}\f$ + * - \f$\theta=\frac{\eta_{0,2}}{NA}\f$ + * - \f$\eta\f$ is the centered and normalized moment. + * + * These interaction matrices may be selected afterwards by calling + * vpFeatureMomentGravityCenterNormalized::interaction. The selection is done by + * the following methods: vpFeatureMomentGravityCenterNormalized::selectXn for + * \f$ L_{x_{n}} \f$ and vpFeatureMomentGravityCenterNormalized::selectYn for \f$L_{y_{n}} \f$. + * You can use these shortcut selectors as follows: + * + * \code + * task.addFeature(db_src.getFeatureGravityNormalized(),db_dst.getFeatureGravityNormalized(), + * vpFeatureMomentGravityCenterNormalized::selectXn() | vpFeatureMomentGravityCenterNormalized::selectYn()); + * \endcode + * + * The behaviour of this feature is very similar to + * vpFeatureMomentGravityCenter which also contains a sample code demonstrating a + * selection. + * + * This feature is often used in moment-based visual servoing to control the + * planar translation parameters. + * + * Minimum vpMomentObject order needed to compute this feature: 2 in dense mode + * and 3 in discrete mode. + * + * This feature depends on: + * - vpFeatureMomentGravityCenter + * - vpMomentGravityCenter + * - vpMomentAreaNormalized + * - vpFeatureMomentAreaNormalized + */ class VISP_EXPORT vpFeatureMomentGravityCenterNormalized : public vpFeatureMoment { public: /*! - Initializes the feature with information about the database of moment - primitives, the object plane and feature database. - \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. - It is used to access different moment values later used to compute the final matrix. - \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. - \param featureMoments : Feature database. - - */ + * Initializes the feature with information about the database of moment + * primitives, the object plane and feature database. + * \param data_base : Moment database. The database of moment primitives (first parameter) is mandatory. + * It is used to access different moment values later used to compute the final matrix. + * \param A_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param B_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param C_ : Plane coefficient in a \f$ A \times x+B \times y + C = \frac{1}{Z} \f$ plane. + * \param featureMoments : Feature database. + */ vpFeatureMomentGravityCenterNormalized(vpMomentDatabase &data_base, double A_, double B_, double C_, vpFeatureMomentDatabase *featureMoments = NULL) : vpFeatureMoment(data_base, A_, B_, C_, featureMoments, 2) - { - } - void compute_interaction(); + { } + void compute_interaction() override; + /*! * Associated moment name. */ - const char *momentName() const { return "vpMomentGravityCenterNormalized"; } + const char *momentName() const override { return "vpMomentGravityCenterNormalized"; } + /*! - * feature name + * Feature name. */ - const char *name() const { return "vpFeatureMomentGravityCenterNormalized"; } + const char *name() const override { return "vpFeatureMomentGravityCenterNormalized"; } /*! * Shortcut selector for \f$x_n\f$. @@ -279,7 +273,7 @@ class VISP_EXPORT vpFeatureMomentGravityCenterNormalized : public vpFeatureMomen static unsigned int selectXn() { return 1 << 0; } /*! - * Shortcut selector for \f$y_n\f$. + * Shortcut selector for \f$y_n\f$. */ static unsigned int selectYn() { return 1 << 1; } }; diff --git a/modules/visual_features/include/visp3/visual_features/vpFeaturePoint.h b/modules/visual_features/include/visp3/visual_features/vpFeaturePoint.h index 6d55e9faff..5949fab755 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeaturePoint.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeaturePoint.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * 2D point visual feature. - * -*****************************************************************************/ + */ #ifndef vpFeaturePoint_H #define vpFeaturePoint_H /*! - \file vpFeaturePoint.h - \brief Class that defines 2D point visual feature -*/ + * \file vpFeaturePoint.h + * \brief Class that defines 2D point visual feature + */ #include #include @@ -49,132 +47,130 @@ #include /*! - \class vpFeaturePoint - \ingroup group_visual_features - - \brief Class that defines a 2D point visual feature \f$ s\f$ which - is composed by two parameters that are the cartesian coordinates \f$ - x \f$ and \f$ y \f$. - - In this class \f$ x \f$ and \f$ y \f$ are the 2D coordinates in the - image plane and are given in meter. \f$ Z \f$ which is the 3D - coordinate representing the depth is also a parameter of the - point. It is needed during the computation of the interaction matrix - \f$ L \f$. - - The visual features can be set easily from an instance of the - classes vpPoint, vpDot or vpDot2. For more precision see the - vpFeatureBuilder class. - - Once the values of the visual features are set, the interaction() - method allows to compute the interaction matrix \f$ L \f$ associated - to the visual feature, while the error() method computes the error - vector \f$(s - s^*)\f$ between the current visual feature and the - desired one. - - The code below shows how to create a eye-in hand visual servoing - task using a 2D point feature \f$(x,y)\f$ that correspond to the 2D - coordinates of a point in the image plane. To control six degrees - of freedom, at least four other features must be considered like two - other point features for example. First we create a current - (\f$s\f$) 2D point feature. Then we set the task to use the - interaction matrix associated to the current feature \f$L_s\f$. And - finally we compute the camera velocity \f$v=-\lambda \; L_s^+ \; - (s-s^*)\f$. The current feature \f$s\f$ is updated in the while() - loop. - - \code -#include -#include - -int main() -{ - vpServo task; // Visual servoing task - - vpFeaturePoint sd; //The desired point feature. - //Set the desired features x and y - double xd = 0; - double yd = 0; - //Set the depth of the point in the camera frame. - double Zd = 1; - //Set the point feature thanks to the desired parameters. - sd.buildFrom(xd, yd, Zd); - - vpFeaturePoint s; //The current point feature. - //Set the current features x and y - double x; //You have to compute the value of x. - double y; //You have to compute the value of y. - double Z; //You have to compute the value of Z. - //Set the point feature thanks to the current parameters. - s.buildFrom(x, y, Z); - //In this case the parameter Z is not necessary because the interaction matrix is computed - //with the desired visual feature. - - // Set eye-in-hand control law. - // The computed velocities will be expressed in the camera frame - task.setServo(vpServo::EYEINHAND_CAMERA); - // Interaction matrix is computed with the desired visual features sd - task.setInteractionMatrixType(vpServo::DESIRED); - - // Add the 2D point feature to the task - task.addFeature(s, sd); - - // Control loop - for ( ; ; ) { - // The new parameters x and y must be computed here. - - // Update the current point visual feature - s.buildFrom(x, y, Z); - - // compute the control law - vpColVector v = task.computeControlLaw(); // camera velocity - } - return 0; -} - \endcode - - If you want to build your own control law, this other example shows how - to create a current (\f$s\f$) and desired (\f$s^*\f$) 2D point visual - feature, compute the corresponding error vector \f$(s-s^*)\f$ and finally - build the interaction matrix \f$L_s\f$. - - \code -#include -#include - -int main() -{ - vpFeaturePoint sd; //The desired point feature. - //Set the desired features x and y - double xd = 0; - double yd = 0; - //Set the depth of the point in the camera frame. - double Zd = 1; - //Set the point feature thanks to the desired parameters. - sd.buildFrom(xd, yd, Zd); - - vpFeaturePoint s; //The current point feature. - //Set the current features x and y - double x; //You have to compute the value of x. - double y; //You have to compute the value of y. - double Z; //You have to compute the value of Z. - //Set the point feature thanks to the current parameters. - s.buildFrom(x, y, Z); - - // Compute the interaction matrix L_s for the current point feature - vpMatrix L = s.interaction(); - // You can also compute the interaction matrix L_s for the desired point feature - // The corresponding line of code is : vpMatrix L = sd.interaction(); - - // Compute the error vector (s-sd) for the point feature - s.error(s_star); -} - \endcode - - An other fully explained example is given in the \ref tutorial-ibvs. - -*/ - + * \class vpFeaturePoint + * \ingroup group_visual_features + * + * \brief Class that defines a 2D point visual feature \f$ s\f$ which + * is composed by two parameters that are the cartesian coordinates \f$ + * x \f$ and \f$ y \f$. + * + * In this class \f$ x \f$ and \f$ y \f$ are the 2D coordinates in the + * image plane and are given in meter. \f$ Z \f$ which is the 3D + * coordinate representing the depth is also a parameter of the + * point. It is needed during the computation of the interaction matrix + * \f$ L \f$. + * + * The visual features can be set easily from an instance of the + * classes vpPoint, vpDot or vpDot2. For more precision see the + * vpFeatureBuilder class. + * + * Once the values of the visual features are set, the interaction() + * method allows to compute the interaction matrix \f$ L \f$ associated + * to the visual feature, while the error() method computes the error + * vector \f$(s - s^*)\f$ between the current visual feature and the + * desired one. + * + * The code below shows how to create a eye-in hand visual servoing + * task using a 2D point feature \f$(x,y)\f$ that correspond to the 2D + * coordinates of a point in the image plane. To control six degrees + * of freedom, at least four other features must be considered like two + * other point features for example. First we create a current + * (\f$s\f$) 2D point feature. Then we set the task to use the + * interaction matrix associated to the current feature \f$L_s\f$. And + * finally we compute the camera velocity \f$v=-\lambda \; L_s^+ \; + * (s-s^*)\f$. The current feature \f$s\f$ is updated in the while() + * loop. + * + * \code + * #include + * #include + * + * int main() + * { + * vpServo task; // Visual servoing task + * + * vpFeaturePoint sd; //The desired point feature. + * // Set the desired features x and y + * double xd = 0; + * double yd = 0; + * // Set the depth of the point in the camera frame. + * double Zd = 1; + * // Set the point feature thanks to the desired parameters. + * sd.buildFrom(xd, yd, Zd); + * + * vpFeaturePoint s; //The current point feature. + * // Set the current features x and y + * double x; // You have to compute the value of x. + * double y; // You have to compute the value of y. + * double Z; // You have to compute the value of Z. + * // Set the point feature thanks to the current parameters. + * s.buildFrom(x, y, Z); + * // In this case the parameter Z is not necessary because the interaction matrix is computed + * // with the desired visual feature. + * + * // Set eye-in-hand control law. + * // The computed velocities will be expressed in the camera frame + * task.setServo(vpServo::EYEINHAND_CAMERA); + * // Interaction matrix is computed with the desired visual features sd + * task.setInteractionMatrixType(vpServo::DESIRED); + * + * // Add the 2D point feature to the task + * task.addFeature(s, sd); + * + * // Control loop + * for ( ; ; ) { + * // The new parameters x and y must be computed here. + * + * // Update the current point visual feature + * s.buildFrom(x, y, Z); + * + * // Compute the control law + * vpColVector v = task.computeControlLaw(); // camera velocity + * } + * return 0; + * } + * \endcode + * + * If you want to build your own control law, this other example shows how + * to create a current (\f$s\f$) and desired (\f$s^*\f$) 2D point visual + * feature, compute the corresponding error vector \f$(s-s^*)\f$ and finally + * build the interaction matrix \f$L_s\f$. + * + * \code + * #include + * #include + * + * int main() + * { + * vpFeaturePoint sd; //The desired point feature. + * // Set the desired features x and y + * double xd = 0; + * double yd = 0; + * // Set the depth of the point in the camera frame. + * double Zd = 1; + * // Set the point feature thanks to the desired parameters. + * sd.buildFrom(xd, yd, Zd); + * + * vpFeaturePoint s; //The current point feature. + * // Set the current features x and y + * double x; // You have to compute the value of x. + * double y; // You have to compute the value of y. + * double Z; // You have to compute the value of Z. + * // Set the point feature thanks to the current parameters. + * s.buildFrom(x, y, Z); + * + * // Compute the interaction matrix L_s for the current point feature + * vpMatrix L = s.interaction(); + * // You can also compute the interaction matrix L_s for the desired point feature + * // The corresponding line of code is : vpMatrix L = sd.interaction(); + * + * // Compute the error vector (s-sd) for the point feature + * s.error(s_star); + * } + * \endcode + * + * An other fully explained example is given in the \ref tutorial-ibvs. + */ class VISP_EXPORT vpFeaturePoint : public vpBasicFeature { private: @@ -184,19 +180,17 @@ class VISP_EXPORT vpFeaturePoint : public vpBasicFeature public: vpFeaturePoint(); - //! Destructor. - virtual ~vpFeaturePoint() { } void buildFrom(double x, double y, double Z); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; - vpFeaturePoint *duplicate() const; + vpFeaturePoint *duplicate() const override; - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; double get_x() const; @@ -204,10 +198,10 @@ class VISP_EXPORT vpFeaturePoint : public vpBasicFeature double get_Z() const; - void init(); - vpMatrix interaction(unsigned int select = FEATURE_ALL); + void init() override; + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; - void print(unsigned int select = FEATURE_ALL) const; + void print(unsigned int select = FEATURE_ALL) const override; void set_x(double x); diff --git a/modules/visual_features/include/visp3/visual_features/vpFeaturePoint3D.h b/modules/visual_features/include/visp3/visual_features/vpFeaturePoint3D.h index b04397b6f8..52908f7da3 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeaturePoint3D.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeaturePoint3D.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * 3D point visual feature. - * -*****************************************************************************/ + */ #ifndef vpFeaturePoint3d_H #define vpFeaturePoint3d_H /*! - \file vpFeaturePoint3D.h - \brief class that defines the 3D point visual feature. -*/ + * \file vpFeaturePoint3D.h + * \brief class that defines the 3D point visual feature. + */ #include #include @@ -49,171 +47,166 @@ #include /*! - \class vpFeaturePoint3D - \ingroup group_visual_features - \brief Class that defines the 3D point visual feature. - - A 3D point visual feature corresponds to a 3D point with \f$ - {\bf X} = (X,Y,Z)\f$ coordinates in the camera frame. - - This class is intended to manipulate the 3D point visual feature - \f$ s = (X,Y,Z) \f$. The interaction matrix related to \f$ s \f$ is given -by: \f[ L = \left[ \begin{array}{rrrrrr} - -1 & 0 & 0 & 0 & -Z & Y \\ - 0 & -1 & 0 & Z & 0 & -X \\ - 0 & 0 & -1 & -Y & X & 0 \\ - \end{array} - \right] - \f] - - Two ways are allowed to initialize the feature. - - - The first way by setting the feature values \f$(X,Y,Z)\f$ using - vpFeaturePoint3D member functions like set_X(), set_Y(), set_Z(), - or also buildFrom(). - - - The second by using the feature builder functionalities to - initialize the feature from a point structure like - vpFeatureBuilder::create (vpFeaturePoint3D &, const vpPoint &). - - The interaction() method allows to compute the interaction matrix - \f$ L\f$ associated to the 3D point visual feature, while the - error() method computes the error vector \f$ (s - s^*)\f$ between the - current visual feature and the desired one. - - The code below shows how to create a eye-in hand visual servoing - task using a 3D point feature \f$(X,Y,Z)\f$ that correspond to the - 3D point coordinates in the camera frame. To control six degrees of - freedom, at least three other features must be considered like - vpFeatureThetaU visual features. First we create a current (\f$s\f$) - and desired (\f$s^*\f$) 3D point feature, set the task to use the - interaction matrix associated to the desired feature \f$L_{s^*}\f$ - and than compute the camera velocity \f$v=-\lambda \; {L_{s^*}}^+ \; - (s-s^*)\f$. The current feature \f$s\f$ is updated in the while() - loop while \f$s^*\f$ is set to \f$Z^*=1\f$. - - \code -#include -#include -#include -#include - -int main() -{ - vpServo task; // Visual servoing task - - // Set the 3D point coordinates in the object frame: oP - vpPoint point(0.1, -0.1, 0); - - vpHomogeneousMatrix cMo; // Pose between the camera and the object frame - cMo.buildFrom(0, 0, 1.2, 0, 0, 0); - // ... cMo need here to be computed from a pose estimation - - point.changeFrame(cMo); // Compute the 3D point coordinates in the camera frame cP = cMo * oP - - // Creation of the current feature s - vpFeaturePoint3D s; - s.buildFrom(point); // Initialize the feature from the 3D point coordinates in the camera frame: s=(X,Y,Z) - s.print(); - - // Creation of the desired feature s*. - vpFeaturePoint3D s_star; - s_star.buildFrom(0, 0, 1); // Z*=1 meter - s_star.print(); - - // Set eye-in-hand control law. - // The computed velocities will be expressed in the camera frame - task.setServo(vpServo::EYEINHAND_CAMERA); - // Interaction matrix is computed with the desired visual features s* - task.setInteractionMatrixType(vpServo::DESIRED); - // Set the constant gain - double lambda = 0.8; - task.setLambda(lambda); - - // Add the 3D point feature to the task - task.addFeature(s, s_star); - - // Control loop - for ( ; ; ) { - // ... cMo need here to be estimated from for example a pose estimation. - point.changeFrame(cMo); // Compute the 3D point coordinates in the camera frame cP = cMo * oP - - // Update the current 3D point visual feature - s.buildFrom(point); - - // compute the control law - vpColVector v = task.computeControlLaw(); // camera velocity - } -} - \endcode - - If you want to deal only with the \f$(X,Y)\f$ subset feature from the 3D - point feature, you have just to modify the addFeature() call in - the previous example by the following line. In that case, the dimension - of \f$s\f$ is two. - - \code - // Add the (X,Y) subset feature from the 3D point visual feature to the task - task.addFeature(s, s_star, vpFeaturePoint3D::selectX() | vpFeaturePoint3D::selectY()); - \endcode - - If you want to build your own control law, this other example shows - how to create a current (\f$s\f$) and desired (\f$s^*\f$) 3D - point visual feature, compute the corresponding error - vector \f$(s-s^*)\f$ and finally build the interaction matrix \f$L_s\f$. - - \code -#include -#include -#include -#include - -int main() -{ - // Set the 3D point coordinates in the object frame: oP - vpPoint point(0.1, -0.1, 0); - - vpHomogeneousMatrix cMo; // Pose between the camera and the object frame - cMo.buildFrom(0, 0, 1.2, 0, 0, 0); - // ... cMo need here to be computed from a pose estimation - - point.changeFrame(cMo); // Compute the 3D point coordinates in the camera frame cP = cMo * oP - - // Creation of the current feature s - vpFeaturePoint3D s; - s.buildFrom(point); // Initialize the feature from the 3D point coordinates in the camera frame - s.print(); - - // Creation of the desired feature s*. - vpFeaturePoint3D s_star; - s_star.buildFrom(0, 0, 1); // Z*=1 meter - s_star.print(); - - // Compute the L_s interaction matrix associated to the current feature - vpMatrix L = s.interaction(); - std::cout << "L: " << L << std::endl; - - // Compute the error vector (s-s*) for the 3D point feature - vpColVector e = s.error(s_star); // e = (s-s*) - - std::cout << "e: " << e << std::endl; -} - \endcode - -*/ + * \class vpFeaturePoint3D + * \ingroup group_visual_features + * \brief Class that defines the 3D point visual feature. + * + * A 3D point visual feature corresponds to a 3D point with \f$ + * {\bf X} = (X,Y,Z)\f$ coordinates in the camera frame. + * + * This class is intended to manipulate the 3D point visual feature + * \f$ s = (X,Y,Z) \f$. The interaction matrix related to \f$ s \f$ is given + * by: \f[ L = \left[ \begin{array}{rrrrrr} + * -1 & 0 & 0 & 0 & -Z & Y \\ + * 0 & -1 & 0 & Z & 0 & -X \\ + * 0 & 0 & -1 & -Y & X & 0 \\ + * \end{array} + * \right] + * \f] + * + * Two ways are allowed to initialize the feature. + * + * - The first way by setting the feature values \f$(X,Y,Z)\f$ using + * vpFeaturePoint3D member functions like set_X(), set_Y(), set_Z(), + * or also buildFrom(). + * + * - The second by using the feature builder functionalities to + * initialize the feature from a point structure like + * vpFeatureBuilder::create (vpFeaturePoint3D &, const vpPoint &). + * + * The interaction() method allows to compute the interaction matrix + * \f$ L\f$ associated to the 3D point visual feature, while the + * error() method computes the error vector \f$ (s - s^*)\f$ between the + * current visual feature and the desired one. + * + * The code below shows how to create a eye-in hand visual servoing + * task using a 3D point feature \f$(X,Y,Z)\f$ that correspond to the + * 3D point coordinates in the camera frame. To control six degrees of + * freedom, at least three other features must be considered like + * vpFeatureThetaU visual features. First we create a current (\f$s\f$) + * and desired (\f$s^*\f$) 3D point feature, set the task to use the + * interaction matrix associated to the desired feature \f$L_{s^*}\f$ + * and than compute the camera velocity \f$v=-\lambda \; {L_{s^*}}^+ \; + * (s-s^*)\f$. The current feature \f$s\f$ is updated in the while() + * loop while \f$s^*\f$ is set to \f$Z^*=1\f$. + * + * \code + * #include + * #include + * #include + * #include + * + * int main() + * { + * vpServo task; // Visual servoing task + * + * // Set the 3D point coordinates in the object frame: oP + * vpPoint point(0.1, -0.1, 0); + * + * vpHomogeneousMatrix cMo; // Pose between the camera and the object frame + * cMo.buildFrom(0, 0, 1.2, 0, 0, 0); + * // ... cMo need here to be computed from a pose estimation + * + * point.changeFrame(cMo); // Compute the 3D point coordinates in the camera frame cP = cMo * oP + * + * // Creation of the current feature s + * vpFeaturePoint3D s; + * s.buildFrom(point); // Initialize the feature from the 3D point coordinates in the camera frame: s=(X,Y,Z) + * s.print(); + * + * // Creation of the desired feature s*. + * vpFeaturePoint3D s_star; + * s_star.buildFrom(0, 0, 1); // Z*=1 meter + * s_star.print(); + * + * // Set eye-in-hand control law. + * // The computed velocities will be expressed in the camera frame + * task.setServo(vpServo::EYEINHAND_CAMERA); + * // Interaction matrix is computed with the desired visual features s* + * task.setInteractionMatrixType(vpServo::DESIRED); + * // Set the constant gain + * double lambda = 0.8; + * task.setLambda(lambda); + * + * // Add the 3D point feature to the task + * task.addFeature(s, s_star); + * + * // Control loop + * for ( ; ; ) { + * // ... cMo need here to be estimated from for example a pose estimation. + * point.changeFrame(cMo); // Compute the 3D point coordinates in the camera frame cP = cMo * oP + * + * // Update the current 3D point visual feature + * s.buildFrom(point); + * + * // compute the control law + * vpColVector v = task.computeControlLaw(); // camera velocity + * } + * } + * \endcode + * + * If you want to deal only with the \f$(X,Y)\f$ subset feature from the 3D + * point feature, you have just to modify the addFeature() call in + * the previous example by the following line. In that case, the dimension + * of \f$s\f$ is two. + * + * \code + * // Add the (X,Y) subset feature from the 3D point visual feature to the task + * task.addFeature(s, s_star, vpFeaturePoint3D::selectX() | vpFeaturePoint3D::selectY()); + * \endcode + * + * If you want to build your own control law, this other example shows + * how to create a current (\f$s\f$) and desired (\f$s^*\f$) 3D + * point visual feature, compute the corresponding error + * vector \f$(s-s^*)\f$ and finally build the interaction matrix \f$L_s\f$. + * + * \code + * #include + * #include + * #include + * #include + * + * int main() + * { + * // Set the 3D point coordinates in the object frame: oP + * vpPoint point(0.1, -0.1, 0); + * + * vpHomogeneousMatrix cMo; // Pose between the camera and the object frame + * cMo.buildFrom(0, 0, 1.2, 0, 0, 0); + * // ... cMo need here to be computed from a pose estimation + * + * point.changeFrame(cMo); // Compute the 3D point coordinates in the camera frame cP = cMo * oP + * + * // Creation of the current feature s + * vpFeaturePoint3D s; + * s.buildFrom(point); // Initialize the feature from the 3D point coordinates in the camera frame + * s.print(); + * + * // Creation of the desired feature s*. + * vpFeaturePoint3D s_star; + * s_star.buildFrom(0, 0, 1); // Z*=1 meter + * s_star.print(); + * + * // Compute the L_s interaction matrix associated to the current feature + * vpMatrix L = s.interaction(); + * std::cout << "L: " << L << std::endl; + * + * // Compute the error vector (s-s*) for the 3D point feature + * vpColVector e = s.error(s_star); // e = (s-s*) + * + * std::cout << "e: " << e << std::endl; + * } + * \endcode + */ class VISP_EXPORT vpFeaturePoint3D : public vpBasicFeature - { - public: // basic constructor vpFeaturePoint3D(); - //! Destructor. Does nothing. - virtual ~vpFeaturePoint3D() {} /* - /section Set coordinates - */ + * Set coordinates + */ // build feature from a point (vpPoint) void buildFrom(const vpPoint &p); @@ -221,16 +214,16 @@ class VISP_EXPORT vpFeaturePoint3D : public vpBasicFeature void buildFrom(double X, double Y, double Z); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; // feature duplication - vpFeaturePoint3D *duplicate() const; + vpFeaturePoint3D *duplicate() const override; // compute the error between two visual features from a subset // a the possible features - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; // get the point X-coordinates double get_X() const; @@ -240,12 +233,12 @@ class VISP_EXPORT vpFeaturePoint3D : public vpBasicFeature double get_Z() const; // basic construction - void init(); + void init() override; // compute the interaction matrix from a subset a the possible features - vpMatrix interaction(unsigned int select = FEATURE_ALL); + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; // print the name of the feature - void print(unsigned int select = FEATURE_ALL) const; + void print(unsigned int select = FEATURE_ALL) const override; // set the point X-coordinates void set_X(double X); diff --git a/modules/visual_features/include/visp3/visual_features/vpFeaturePointPolar.h b/modules/visual_features/include/visp3/visual_features/vpFeaturePointPolar.h index 6e69fb9aa8..cee08b8a78 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeaturePointPolar.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeaturePointPolar.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * 2D point with polar coordinates visual feature. - * -*****************************************************************************/ + */ #ifndef vpFeaturePointPolar_H #define vpFeaturePointPolar_H /*! - \file vpFeaturePointPolar.h - \brief Class that defines a 2D point visual feature with polar coordinates. -*/ + * \file vpFeaturePointPolar.h + * \brief Class that defines a 2D point visual feature with polar coordinates. + */ #include #include @@ -49,209 +47,207 @@ #include /*! - \class vpFeaturePointPolar - \ingroup group_visual_features - - \brief Class that defines 2D image point visual feature with - polar coordinates \f$(\rho,\theta)\f$ described in \cite Corke09a. - - Let us denote \f$(\rho,\theta)\f$ the polar coordinates of an image - point, with \f$\rho\f$ the radius of the feature point with respect - to the optical center and \f$\theta\f$ the angle. From cartesian - coordinates \f$(x,y)\f$ of a image point, polar coordinates are - obtained by: - - \f[\rho = \sqrt{x^2+y^2} \hbox{,}\; \; \theta = \arctan \frac{y}{x}\f] - - From polar coordinates, cartesian coordinates of the feature point - can be obtained by: - - \f[x = \rho \cos\theta \hbox{,}\; \; y = \rho \sin\theta\f] - - This class is intended to manipulate the 2D image point visual - feature in polar coordinates \f$ s = (\rho, \theta) \f$. The - interaction matrix related to \f$ s \f$ is given by: - - \f[ - L = \left[ - \begin{array}{l} - L_{\rho} \\ - \; \\ - L_{\theta}\\ - \end{array} - \right] - = - \left[ - \begin{array}{cccccc} - \frac{-\cos \theta}{Z} & \frac{-\sin \theta}{Z} & \frac{\rho}{Z} & - (1+\rho^2)\sin\theta & -(1+\rho^2)\cos\theta & 0 \\ - \;\\ \ - \frac{\sin\theta}{\rho Z} & \frac{-\cos\theta}{\rho Z} & 0 & \cos\theta - /\rho & \sin\theta/\rho & -1 \\ \end{array} \right] \f] - - where \f$Z\f$ is the 3D depth of the considered point in the camera frame. - - Two ways are allowed to initialize the feature. - - - The first way by setting the feature values \f$(\rho,\theta,Z)\f$ - using vpFeaturePointPolar members like set_rho(), set_theta(), - set_Z(), or set_rhoThetaZ(), or also buildFrom(). - - - The second way by using the feature builder functionalities to - initialize the feature from a dot tracker, like - vpFeatureBuilder::create (vpFeaturePointPolar &, const - vpCameraParameters &, const vpDot &) or vpFeatureBuilder::create - (vpFeaturePointPolar &, const vpCameraParameters &, const vpDot2 - &). Be aware, that in that case only \f$(\rho,\theta)\f$ are - initialized. You may also initialize the 3D depth \f$Z\f$. It is - also possible to initialize the feature from a point structure, - like vpFeatureBuilder::create(vpFeaturePointPolar &, const vpPoint - &) or vpFeatureBuilder::create(vpFeaturePointPolar &, const - vpCameraParameters &, const vpCameraParameters &, const vpPoint - &). In that case all the feature parameters \f$(\rho,\theta,Z)\f$ - would be initialized. - - The interaction() method allows to compute the interaction matrix - \f$L\f$ associated to the visual feature, while the error() method - computes the error vector \f$(s - s^*)\f$ between the current visual - feature and the desired one. - - The code below shows how to create a eye-in hand visual servoing - task using four 2D point features with polar coordinates. First we - create four current features \f$s\f$ (p var name in the code) and - four desired \f$s^*\f$ (pd var name in the code) point features with - polar coordinates, set the task to use the interaction matrix - associated to the current feature \f$L_{s}\f$ and than compute the - camera velocity \f$v=-\lambda \; {L_{s}}^+ \; (s-s^*)\f$. The - current feature \f$s\f$ is updated in the while() loop, while - \f$s^*\f$ is initialized at the beginning. - - \code - #include - #include - #include - #include - - int main() - { - - // Create 4 points to specify the object of interest - vpPoint point[4]; - - // Set the 3D point coordinates in the object frame: oP - point[0].setWorldCoordinates(-0.1, -0.1, 0); - point[1].setWorldCoordinates( 0.1, -0.1, 0); - point[2].setWorldCoordinates( 0.1, 0.1, 0); - point[3].setWorldCoordinates(-0.1, 0.1, 0); - - // Initialize the desired pose between the camera and the object frame - vpHomogeneousMatrix cMod; - cMod.buildFrom(0, 0, 1, 0, 0, 0); - - // Compute the desired position of the point - for (int i = 0 ; i < 4 ; i++) { - // Compute the 3D point coordinates in the camera frame cP = cMod * oP - point[i].changeFrame(cMod); - // Compute the perspective projection to set (x,y) - point[i].projection(); - } - - // Create 4 desired visual features as 2D points with polar coordinates - vpFeaturePointPolar pd[4]; - // Initialize the desired visual feature from the desired point positions - for (int i = 0 ; i < 4 ; i++) - vpFeatureBuilder::create(pd[i], point[i]); - - // Initialize the current pose between the camera and the object frame - vpHomogeneousMatrix cMo; - cMo.buildFrom(0, 0, 1.2, 0, 0, M_PI); - // ... cMo need here to be computed from a pose estimation - - for (int i = 0 ; i < 4 ; i++) { - // Compute the 3D point coordinates in the camera frame cP = cMo * oP - point[i].changeFrame(cMo); - // Compute the perspective projection to set (x,y) - point[i].projection(); - } - // Create 4 current visual features as 2D points with polar coordinates - vpFeaturePointPolar p[4]; - // Initialize the current visual feature from the current point positions - for (int i = 0 ; i < 4 ; i++) - vpFeatureBuilder::create(p[i], point[i]); - - // Visual servo task initialization - vpServo task; - // - Camera is mounted on the robot end-effector and velocities are - // computed in the camera frame - task.setServo(vpServo::EYEINHAND_CAMERA); - // - Interaction matrix is computed with the current visual features s - task.setInteractionMatrixType(vpServo::CURRENT); - // - Set the constant gain to 1 - task.setLambda(1); - // - Add current and desired features - for (int i = 0 ; i < 4 ; i++) - task.addFeature(p[i], pd[i]); - - // Control loop - for ( ; ; ) { - // ... cMo need here to be estimated from for example a pose estimation. - // Computes the point coordinates in the camera frame and its 2D - // coordinates in the image plane - for (int i = 0 ; i < 4 ; i++) - point[i].track(cMo) ; - - // Update the current 2D point visual feature with polar coordinates - for (int i = 0 ; i < 4 ; i++) - vpFeatureBuilder::create(p[i], point[i]); - - // compute the control law - vpColVector v = task.computeControlLaw(); // camera velocity - } - } - \endcode - - If you want to deal only with the \f$\rho\f$ subset feature from the 2D - point feature set, you have just to modify the addFeature() call in the - previous example by the following line. In that case, the dimension of - \f$s\f$ is four. - - \code - // Add the rho subset feature from the 2D point polar coordinates visual features - task.addFeature(p[i], pd[i], vpFeaturePointPolar::selectRho()); - \endcode - - If you want to build your own control law, this other example shows how - to create a current (\f$s\f$) and desired (\f$s^*\f$) 2D point visual - feature with polar coordinates, compute the corresponding error vector - \f$(s-s^*)\f$ and finally build the interaction matrix \f$L_s\f$. - - \code - #include - #include - - int main() - { - // Creation of the current feature s - vpFeaturePointPolar s; - // Initialize the current feature - s.buildFrom(0.1, M_PI, 1); // rho=0.1m, theta=pi, Z=1m - - // Creation of the desired feature s - vpFeaturePointPolar s_star; - // Initialize the desired feature - s.buildFrom(0.15, 0, 0.8); // rho=0.15m, theta=0, Z=0.8m - - // Compute the interaction matrix L_s for the current feature - vpMatrix L = s.interaction(); - - // Compute the error vector (s-s*) for the point feature with polar coordinates - s.error(s_star); - - return 0; - } - \endcode - -*/ + * \class vpFeaturePointPolar + * \ingroup group_visual_features + * + * \brief Class that defines 2D image point visual feature with + * polar coordinates \f$(\rho,\theta)\f$ described in \cite Corke09a. + * + * Let us denote \f$(\rho,\theta)\f$ the polar coordinates of an image + * point, with \f$\rho\f$ the radius of the feature point with respect + * to the optical center and \f$\theta\f$ the angle. From cartesian + * coordinates \f$(x,y)\f$ of a image point, polar coordinates are + * obtained by: + * + * \f[\rho = \sqrt{x^2+y^2} \hbox{,}\; \; \theta = \arctan \frac{y}{x}\f] + * + * From polar coordinates, cartesian coordinates of the feature point + * can be obtained by: + * + * \f[x = \rho \cos\theta \hbox{,}\; \; y = \rho \sin\theta\f] + * + * This class is intended to manipulate the 2D image point visual + * feature in polar coordinates \f$ s = (\rho, \theta) \f$. The + * interaction matrix related to \f$ s \f$ is given by: + * + * \f[ + * L = \left[ + * \begin{array}{l} + * L_{\rho} \\ + * \; \\ + * L_{\theta}\\ + * \end{array} + * \right] + * = + * \left[ + * \begin{array}{cccccc} + * \frac{-\cos \theta}{Z} & \frac{-\sin \theta}{Z} & \frac{\rho}{Z} & + * (1+\rho^2)\sin\theta& -(1+\rho^2)\cos\theta & 0 \\ + * \;\\ \ + * \frac{\sin\theta}{\rho Z} & \frac{-\cos\theta}{\rho Z} & 0 & \cos\theta + * /\rho & \sin\theta/\rho & -1 \\ \end{array} \right] \f] + * + * where \f$Z\f$ is the 3D depth of the considered point in the camera frame. + * + * Two ways are allowed to initialize the feature. + * + * - The first way by setting the feature values \f$(\rho,\theta,Z)\f$ + * using vpFeaturePointPolar members like set_rho(), set_theta(), + * set_Z(), or set_rhoThetaZ(), or also buildFrom(). + * + * - The second way by using the feature builder functionalities to + * initialize the feature from a dot tracker, like + * vpFeatureBuilder::create (vpFeaturePointPolar &, const + * vpCameraParameters &, const vpDot &) or vpFeatureBuilder::create + * (vpFeaturePointPolar &, const vpCameraParameters &, const vpDot2 + * &). Be aware, that in that case only \f$(\rho,\theta)\f$ are + * initialized. You may also initialize the 3D depth \f$Z\f$. It is + * also possible to initialize the feature from a point structure, + * like vpFeatureBuilder::create(vpFeaturePointPolar &, const vpPoint + * &) or vpFeatureBuilder::create(vpFeaturePointPolar &, const + * vpCameraParameters &, const vpCameraParameters &, const vpPoint + * &). In that case all the feature parameters \f$(\rho,\theta,Z)\f$ + * would be initialized. + * + * The interaction() method allows to compute the interaction matrix + * \f$L\f$ associated to the visual feature, while the error() method + * computes the error vector \f$(s - s^*)\f$ between the current visual + * feature and the desired one. + * + * The code below shows how to create a eye-in hand visual servoing + * task using four 2D point features with polar coordinates. First we + * create four current features \f$s\f$ (p var name in the code) and + * four desired \f$s^*\f$ (pd var name in the code) point features with + * polar coordinates, set the task to use the interaction matrix + * associated to the current feature \f$L_{s}\f$ and than compute the + * camera velocity \f$v=-\lambda \; {L_{s}}^+ \; (s-s^*)\f$. The + * current feature \f$s\f$ is updated in the while() loop, while + * \f$s^*\f$ is initialized at the beginning. + * + * \code + * #include + * #include + * #include + * #include + * + * int main() + * { + * // Create 4 points to specify the object of interest + * vpPoint point[4]; + * + * // Set the 3D point coordinates in the object frame: oP + * point[0].setWorldCoordinates(-0.1, -0.1, 0); + * point[1].setWorldCoordinates( 0.1, -0.1, 0); + * point[2].setWorldCoordinates( 0.1, 0.1, 0); + * point[3].setWorldCoordinates(-0.1, 0.1, 0); + * + * // Initialize the desired pose between the camera and the object frame + * vpHomogeneousMatrix cMod; + * cMod.buildFrom(0, 0, 1, 0, 0, 0); + * + * // Compute the desired position of the point + * for (int i = 0 ; i < 4 ; i++) { + * // Compute the 3D point coordinates in the camera frame cP = cMod * oP + * point[i].changeFrame(cMod); + * // Compute the perspective projection to set (x,y) + * point[i].projection(); + * } + * + * // Create 4 desired visual features as 2D points with polar coordinates + * vpFeaturePointPolar pd[4]; + * // Initialize the desired visual feature from the desired point positions + * for (int i = 0 ; i < 4 ; i++) + * vpFeatureBuilder::create(pd[i], point[i]); + * + * // Initialize the current pose between the camera and the object frame + * vpHomogeneousMatrix cMo; + * cMo.buildFrom(0, 0, 1.2, 0, 0, M_PI); + * // ... cMo need here to be computed from a pose estimation + * + * for (int i = 0 ; i < 4 ; i++) { + * // Compute the 3D point coordinates in the camera frame cP = cMo * oP + * point[i].changeFrame(cMo); + * // Compute the perspective projection to set (x,y) + * point[i].projection(); + * } + * // Create 4 current visual features as 2D points with polar coordinates + * vpFeaturePointPolar p[4]; + * // Initialize the current visual feature from the current point positions + * for (int i = 0 ; i < 4 ; i++) + * vpFeatureBuilder::create(p[i], point[i]); + * + * // Visual servo task initialization + * vpServo task; + * // - Camera is mounted on the robot end-effector and velocities are + * // computed in the camera frame + * task.setServo(vpServo::EYEINHAND_CAMERA); + * // - Interaction matrix is computed with the current visual features s + * task.setInteractionMatrixType(vpServo::CURRENT); + * // - Set the constant gain to 1 + * task.setLambda(1); + * // - Add current and desired features + * for (int i = 0 ; i < 4 ; i++) + * task.addFeature(p[i], pd[i]); + * + * // Control loop + * for ( ; ; ) { + * // ... cMo need here to be estimated from for example a pose estimation. + * // Computes the point coordinates in the camera frame and its 2D + * // coordinates in the image plane + * for (int i = 0 ; i < 4 ; i++) + * point[i].track(cMo) ; + + * // Update the current 2D point visual feature with polar coordinates + * for (int i = 0 ; i < 4 ; i++) + * vpFeatureBuilder::create(p[i], point[i]); + + * // compute the control law + * vpColVector v = task.computeControlLaw(); // camera velocity + * } + * } + * \endcode + * + * If you want to deal only with the \f$\rho\f$ subset feature from the 2D + * point feature set, you have just to modify the addFeature() call in the + * previous example by the following line. In that case, the dimension of + * \f$s\f$ is four. + * + * \code + * // Add the rho subset feature from the 2D point polar coordinates visual features + * task.addFeature(p[i], pd[i], vpFeaturePointPolar::selectRho()); + * \endcode + * + * If you want to build your own control law, this other example shows how + * to create a current (\f$s\f$) and desired (\f$s^*\f$) 2D point visual + * feature with polar coordinates, compute the corresponding error vector + * \f$(s-s^*)\f$ and finally build the interaction matrix \f$L_s\f$. + * + * \code + * #include + * #include + * + * int main() + * { + * // Creation of the current feature s + * vpFeaturePointPolar s; + * // Initialize the current feature + * s.buildFrom(0.1, M_PI, 1); // rho=0.1m, theta=pi, Z=1m + * + * // Creation of the desired feature s + * vpFeaturePointPolar s_star; + * // Initialize the desired feature + * s.buildFrom(0.15, 0, 0.8); // rho=0.15m, theta=0, Z=0.8m + * + * // Compute the interaction matrix L_s for the current feature + * vpMatrix L = s.interaction(); + * + * // Compute the error vector (s-s*) for the point feature with polar coordinates + * s.error(s_star); + * + * return 0; + * } + * \endcode + */ class VISP_EXPORT vpFeaturePointPolar : public vpBasicFeature { private: @@ -262,25 +258,23 @@ class VISP_EXPORT vpFeaturePointPolar : public vpBasicFeature public: // basic constructor vpFeaturePointPolar(); - //! Destructor. Does nothing. - virtual ~vpFeaturePointPolar() { } void buildFrom(double rho, double theta, double Z); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; // feature duplication - vpFeaturePointPolar *duplicate() const; + vpFeaturePointPolar *duplicate() const override; // compute the error between two visual features from a subset // a the possible features - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; // basic construction - void init(); + void init() override; // get the point rho-coordinates double get_rho() const; @@ -290,10 +284,10 @@ class VISP_EXPORT vpFeaturePointPolar : public vpBasicFeature double get_Z() const; // compute the interaction matrix from a subset a the possible features - vpMatrix interaction(unsigned int select = FEATURE_ALL); + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; // print the name of the feature - void print(unsigned int select = FEATURE_ALL) const; + void print(unsigned int select = FEATURE_ALL) const override; // set the point rho-coordinates void set_rho(double rho); diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureSegment.h b/modules/visual_features/include/visp3/visual_features/vpFeatureSegment.h index 09a1312220..fa074df8eb 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureSegment.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureSegment.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,19 +29,15 @@ * * Description: * Segment visual feature. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ #ifndef vpFeatureSegment_H #define vpFeatureSegment_H /*! - \file vpFeatureSegment.h - \brief class that defines the Segment visual feature -*/ + * \file vpFeatureSegment.h + * \brief class that defines the Segment visual feature + */ #include #include @@ -51,106 +46,100 @@ #include /*! - \class vpFeatureSegment - \ingroup group_visual_features - - \brief Class that defines a 2D segment visual features. - This class allow to consider two sets of visual features: - - the non normalised features \f${\bf s} = (x_c, y_c, l, \alpha)\f$ where - \f$(x_c,y_c)\f$ are the coordinates of the segment center, \f$ l \f$ the - segment length and \f$ \alpha \f$ the orientation of the segment with - respect to the \f$ x \f$ axis. - - or the normalized features \f${\bf s} = (x_n, y_n, l_n, \alpha)\f$ with - \f$x_n = x_c/l\f$, \f$y_n = y_c/l\f$ and \f$l_n = 1/l\f$. - - - - The selection of the feature set is done either during construction using - vpFeatureSegment(bool), or by setNormalized(bool). - -*/ + * \class vpFeatureSegment + * \ingroup group_visual_features + * + * \brief Class that defines a 2D segment visual features. + * This class allow to consider two sets of visual features: + * - the non normalized features \f${\bf s} = (x_c, y_c, l, \alpha)\f$ where + * \f$(x_c,y_c)\f$ are the coordinates of the segment center, \f$ l \f$ the + * segment length and \f$ \alpha \f$ the orientation of the segment with + * respect to the \f$ x \f$ axis. + * - or the normalized features \f${\bf s} = (x_n, y_n, l_n, \alpha)\f$ with + * \f$x_n = x_c/l\f$, \f$y_n = y_c/l\f$ and \f$l_n = 1/l\f$. + * + * The selection of the feature set is done either during construction using + * vpFeatureSegment(bool), or by setNormalized(bool). + */ class VISP_EXPORT vpFeatureSegment : public vpBasicFeature { public: // empty constructor explicit vpFeatureSegment(bool normalized = false); - //! Destructor. Does nothing. - virtual ~vpFeatureSegment() {} // change values of the segment void buildFrom(double x1, double y1, double Z1, double x2, double y2, double Z2); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; //! Feature duplication. - vpFeatureSegment *duplicate() const; + vpFeatureSegment *duplicate() const override; // compute the error between two visual features from a subset // a the possible features - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; /*! - Get the x coordinate of the segment center in the image plane. - - \return If normalized features are used, return \f$ x_n = x_c / l \f$. - Otherwise return \f$ x_c \f$. + * Get the x coordinate of the segment center in the image plane. + * + * \return If normalized features are used, return \f$ x_n = x_c / l \f$. + * Otherwise return \f$ x_c \f$. */ inline double getXc() const { return s[0]; } /*! - Get the y coordinate of the segment center in the image plane. - - \return If normalized features are used, return \f$ y_n = y_c / l \f$. - Otherwise return \f$ y_c \f$. - */ + * Get the y coordinate of the segment center in the image plane. + * + * \return If normalized features are used, return \f$ y_n = y_c / l \f$. + * Otherwise return \f$ y_c \f$. + */ inline double getYc() const { return s[1]; } /*! - Get the length of the segment. - - \return If normalized features are used, return \f$ l_n = 1 / l \f$. - Otherwise return \f$ l \f$. - - */ + * Get the length of the segment. + * + * \return If normalized features are used, return \f$ l_n = 1 / l \f$. + * Otherwise return \f$ l \f$. + */ inline double getL() const { return s[2]; } /*! - Get the value of \f$ \alpha \f$ which represents the orientation of - the segment. - - \return The value of \f$ \alpha \f$. - */ + * Get the value of \f$ \alpha \f$ which represents the orientation of + * the segment. + * + * \return The value of \f$ \alpha \f$. + */ inline double getAlpha() const { return s[3]; } /*! - Get the value of \f$ Z_1 \f$ which represents the Z coordinate in the - camera frame of the 3D point that corresponds to the segment first point. - - \return The value of the depth \f$ Z_1 \f$. - */ + * Get the value of \f$ Z_1 \f$ which represents the Z coordinate in the + * camera frame of the 3D point that corresponds to the segment first point. + * + * \return The value of the depth \f$ Z_1 \f$. + */ inline double getZ1() const { return Z1_; } /*! - Get the value of \f$ Z_2 \f$ which represents the Z coordinate in the - camera frame of the 3D point that corresponds to the segment second - point. - - \return The value of the depth \f$ Z_2 \f$. - */ + * Get the value of \f$ Z_2 \f$ which represents the Z coordinate in the + * camera frame of the 3D point that corresponds to the segment second + * point. + * + * \return The value of the depth \f$ Z_2 \f$. + */ inline double getZ2() const { return Z2_; } // Basic construction. - void init(); + void init() override; // compute the interaction matrix from a subset a the possible features - vpMatrix interaction(unsigned int select = FEATURE_ALL); + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; - void print(unsigned int select = FEATURE_ALL) const; + void print(unsigned int select = FEATURE_ALL) const override; /*! - Indicates if the normalized features are considered. - */ + * Indicates if the normalized features are considered. + */ bool isNormalized() { return normalized_; }; static unsigned int selectXc(); @@ -159,61 +148,61 @@ class VISP_EXPORT vpFeatureSegment : public vpBasicFeature static unsigned int selectAlpha(); /*! - Set the king of feature to consider. - \param normalized : If true, use normalized features \f${\bf s} = (x_n, - y_n, l_n, \alpha)\f$. If false, use non normalized features \f${\bf s} = - (x_c, y_c, l_c, \alpha)\f$. - */ + * Set the king of feature to consider. + * \param normalized : If true, use normalized features \f${\bf s} = (x_n, + * y_n, l_n, \alpha)\f$. If false, use non normalized features \f${\bf s} = + * (x_c, y_c, l_c, \alpha)\f$. + */ void setNormalized(bool normalized) { normalized_ = normalized; }; - /*! - Set the value of the x coordinate of the segment center - in the image plane. It is one parameter of the visual feature \f$ s \f$. - - \param val : Value to set, that is either equal to \f$ x_n = x_c/l \f$ - when normalized features are considered, or equal to \f$ x_c \f$ - otherwise. - */ + /*! + * Set the value of the x coordinate of the segment center + * in the image plane. It is one parameter of the visual feature \f$ s \f$. + * + * \param val : Value to set, that is either equal to \f$ x_n = x_c/l \f$ + * when normalized features are considered, or equal to \f$ x_c \f$ + * otherwise. + */ inline void setXc(double val) { s[0] = xc_ = val; flags[0] = true; } - /*! - Set the value of the y coordinate of the segment center - in the image plane. It is one parameter of the visual feature \f$ s \f$. - - \param val : Value to set, that is either equal to \f$ y_n = y_c/l \f$ - when normalized features are considered, or equal to \f$ y_c \f$ - otherwise. - */ + /*! + * Set the value of the y coordinate of the segment center + * in the image plane. It is one parameter of the visual feature \f$ s \f$. + * + * \param val : Value to set, that is either equal to \f$ y_n = y_c/l \f$ + * when normalized features are considered, or equal to \f$ y_c \f$ + * otherwise. + */ inline void setYc(double val) { s[1] = yc_ = val; flags[1] = true; } - /*! - - Set the value of the segment length in the image plane. It is one - parameter of the visual feature \f$ s \f$. - \param val : Value to set, that is either equal to \f$l_n= 1/l \f$ when - normalized features are considered, or equal to \f$ l \f$ otherwise. - */ + /*! + * Set the value of the segment length in the image plane. It is one + * parameter of the visual feature \f$ s \f$. + * + * \param val : Value to set, that is either equal to \f$l_n= 1/l \f$ when + * normalized features are considered, or equal to \f$ l \f$ otherwise. + */ inline void setL(double val) { s[2] = l_ = val; flags[2] = true; } - /*! - - Set the value of \f$ \alpha \f$ which represents the orientation of the - segment in the image plane. It is one parameter of the visual feature \f$ - s \f$. - \param val : \f$ \alpha \f$ value to set. - */ + /*! + * Set the value of \f$ \alpha \f$ which represents the orientation of the + * segment in the image plane. It is one parameter of the visual feature \f$ + * s \f$. + * + * \param val : \f$ \alpha \f$ value to set. + */ inline void setAlpha(double val) { s[3] = alpha_ = val; @@ -223,17 +212,16 @@ class VISP_EXPORT vpFeatureSegment : public vpBasicFeature } /*! - - Set the value of \f$ Z_1 \f$ which represents the Z coordinate in the - camera frame of the 3D point that corresponds to the segment first point. - - This value is requested to compute the interaction matrix. - - \param val : \f$ Z_1 \f$ value to set. - - \exception vpFeatureException::badInitializationError : If Z1 is behind - the camera or equal to zero. - */ + * Set the value of \f$ Z_1 \f$ which represents the Z coordinate in the + * camera frame of the 3D point that corresponds to the segment first point. + * + * This value is requested to compute the interaction matrix. + * + * \param val : \f$ Z_1 \f$ value to set. + * + * \exception vpFeatureException::badInitializationError : If Z1 is behind + * the camera or equal to zero. + */ inline void setZ1(double val) { Z1_ = val; @@ -256,17 +244,16 @@ class VISP_EXPORT vpFeatureSegment : public vpBasicFeature } /*! - - Set the value of \f$ Z_2 \f$ which represents the Z coordinate in the - camera frame of the 3D point that corresponds to the segment second point. - - This value is requested to compute the interaction matrix. - - \param val : \f$ Z_2 \f$ value to set. - - \exception vpFeatureException::badInitializationError : If Z2 is behind - the camera or equal to zero. - */ + * Set the value of \f$ Z_2 \f$ which represents the Z coordinate in the + * camera frame of the 3D point that corresponds to the segment second point. + * + * This value is requested to compute the interaction matrix. + * + * \param val : \f$ Z_2 \f$ value to set. + * + * \exception vpFeatureException::badInitializationError : If Z2 is behind + * the camera or equal to zero. + */ inline void setZ2(double val) { Z2_ = val; diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureThetaU.h b/modules/visual_features/include/visp3/visual_features/vpFeatureThetaU.h index 1d239979e3..35356cabbb 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureThetaU.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureThetaU.h @@ -49,178 +49,177 @@ #include /*! - \class vpFeatureThetaU - \ingroup group_visual_features - - \brief Class that defines a 3D visual feature \f$ s\f$ from a \f$ \theta - u \f$ axis/angle parametrization that represent the rotation between - to frames. - - Let us denote \f$ \theta u = (\theta u_x, \theta u_y, \theta u_z)\f$ . - - It is convenient to consider two coordinate frames: the current - camera frame \f$ {\cal{F}}_c \f$ and the desired camera frame \f$ - {\cal{F}}_{c^*} \f$. - - Let \f$^{c^*}R_c \f$ be the rotation matrix that gives the - orientation of the current camera frame relative to the desired camera - frame. Let \f$ \theta u_{^{c^*}R_c} \f$ to corresponding axis/angle - representation of this rotation. - - Furthermore, let \f$^{c}R_{c^*} \f$ the rotation matrix that gives the - orientation of the desired camera frame relative to the current - camera frame. Let \f$ \theta u_{^{c}R_{c^*}} \f$ to corresponding - axis/angle representation of this rotation. - - This class can be used to manipulate two kind of visual features: - - - \f$ s = \theta u_{^{c^*}R_c} \f$ if the orientation of current - camera frame relative to the desired frame has to be - considered. The desired visual feature \f$ s^* \f$ is equal to - zero. The corresponding error is than equal to \f$ e=(s-s^*) = - \theta u_{^{c^*}R_c} \f$. In this case, the interaction matrix - related to \f$ s \f$ is given by \f[ L = \left[ \begin{array}{cc} - 0_3 & L_{\theta u} \end{array} \right] \f] with \f[ - L_{\theta u} = I_3 + \frac{\theta}{2} \; [u]_\times + - \left(1 - \frac{sinc \theta}{sinc^2 \frac{\theta}{2}}\right) - [u]^2_\times \f] where \f$ 0_3 \f$ is a \f$ 3 \times 3 \f$ nul - matrix, \f$ I_3 \f$ is the \f$3 \times 3\f$ identity matrix, and - for more readability \f$ \theta \f$ and \f$ u \f$ respectively the - angle and the axis coordinates of the \f$ \theta u_{^{c^*}R_c} \f$ - representation. - - - \f$ s = \theta u_{^{c}R_{c^*}} \f$ if it is more the orientation - of the desired camera frame relative to the current frame that has - to be considered. The desired visual feature \f$ s^* \f$ is equal - to zero. The corresponding error is than equal to \f$e=(s-s^*) = - \theta u_{^{c}R_{c^*}} \f$. In this case, the interaction matrix - related to \f$ s \f$ is given by \f[ L = \left[ \begin{array}{cc} - 0_3 & L_{\theta u} \end{array} \right] \f] with \f[ - L_{\theta u} = -I_3 + \frac{\theta}{2} \; [u]_\times - - \left(1 - \frac{sinc \theta}{sinc^2 \frac{\theta}{2}}\right) - [u]^2_\times \f] where \f$ 0_3 \f$ is a \f$ 3 \times 3 \f$ nul - matrix, \f$ I_3 \f$ is the \f$3 \times 3\f$ identity matrix, and - for more readability \f$ \theta \f$ and \f$ u \f$ respectively the - angle and the axis coordinates of the \f$ \theta u_{^{c}R_{c^*}} - \f$ representation. - - The kind of visual feature is to set during the construction of the - vpFeatureThetaU() object by using the selector - vpFeatureThetaU::vpFeatureThetaURotationRepresentationType. - - To initialize the feature \f$(\theta u_x, \theta u_y, \theta u_z)\f$ - you may use vpFeatureThetaU member functions like set_TUx(), - set_TUy(), set_TUz(), or also buildFrom() functions. - - Depending on the choice of the visual feature representation, the - interaction() method allows to compute the interaction matrix \f$ - L \f$ associated to the visual feature, while the error() - method computes the error vector \f$(s - s^*)\f$ between the current - visual feature and the desired one. - - To know more on the \f$ \theta u \f$ axis/angle representation for a - 3D rotation see the vpThetaUVector class. - - The code below shows how to create a eye-in hand visual servoing - task using a 3D \f$\theta u\f$ feature \f$(\theta u_x,\theta u_y, - \theta u_z)\f$ that correspond to the 3D rotation between the - current camera frame and the desired camera frame. To control six - degrees of freedom, at least three other features must be considered - like vpFeatureTranslation visual features. First we create a current - (\f$s\f$) 3D \f$\theta u\f$ feature, than set the - task to use the interaction matrix associated to the current feature - \f$L_s\f$ and than compute the camera velocity \f$v=-\lambda \; - L_s^+ \; (s-s^*)\f$. The current feature \f$s\f$ is updated in the - while() loop while \f$s^*\f$ is considered as zero. - - \code -#include -#include -#include - -int main() -{ - vpServo task; // Visual servoing task - - vpHomogeneousMatrix cMcd; - // ... cMcd need here to be initialized from for example a pose estimation. - - // Creation of the current feature s that correspond to the rotation - // in angle/axis parametrization between the current camera frame - // and the desired camera frame - vpFeatureThetaU s(vpFeatureThetaU::cRcd); - s.buildFrom(cMcd); // Initialization of the feature - - // Set eye-in-hand control law. - // The computed velocities will be expressed in the camera frame - task.setServo(vpServo::EYEINHAND_CAMERA); - // Interaction matrix is computed with the current visual features s - task.setInteractionMatrixType(vpServo::CURRENT); - - // Add the 3D ThetaU feature to the task - task.addFeature(s); // s* is here considered as zero - - // Control loop - for ( ; ; ) { - // ... cMcd need here to be initialized from for example a pose estimation. - - // Update the current ThetaU visual feature - s.buildFrom(cMcd); - - // compute the control law - vpColVector v = task.computeControlLaw(); // camera velocity - } -} - \endcode - - If you want to deal only with the \f$(\theta u_x,\theta u_y)\f$ subset - feature from the 3D \f$\theta u\f$ , you have just to modify the - addFeature() call in the previous example by the following line. In - that case, the dimension of \f$s\f$ is two. - - \code - // Add the (ThetaU_x, ThetaU_y) subset features from the 3D ThetaU - // rotation to the task - task.addFeature(s, vpFeatureThetaU::selectTUx() | vpFeatureThetaU::selectTUy()); - \endcode - - If you want to build your own control law, this other example shows - how to create a current (\f$s\f$) and desired (\f$s^*\f$) 3D - \f$\theta u\f$ visual feature, compute the corresponding error - vector \f$(s-s^*)\f$ and finally build the interaction matrix \f$L_s\f$. - - \code -#include -#include -#include - -int main() -{ - vpHomogeneousMatrix cdMc; - // ... cdMc need here to be initialized from for example a pose estimation. - - // Creation of the current feature s - vpFeatureThetaU s(vpFeatureThetaU::cdRc); - s.buildFrom(cdMc); // Initialization of the feature - - // Creation of the desired feature s*. By default this feature is - // initialized to zero - vpFeatureThetaU s_star(vpFeatureThetaU::cdRc); - - // Compute the interaction matrix L_s for the current ThetaU feature - vpMatrix L = s.interaction(); - - // Compute the error vector (s-s*) for the ThetaU feature - s.error(s_star); -} - \endcode - - -*/ + * \class vpFeatureThetaU + * \ingroup group_visual_features + * + * \brief Class that defines a 3D visual feature \f$ s\f$ from a \f$ \theta + * u \f$ axis/angle parametrization that represent the rotation between + * to frames. + * + * Let us denote \f$ \theta u = (\theta u_x, \theta u_y, \theta u_z)\f$ . + * + * It is convenient to consider two coordinate frames: the current + * camera frame \f$ {\cal{F}}_c \f$ and the desired camera frame \f$ + * {\cal{F}}_{c^*} \f$. + * + * Let \f$^{c^*}R_c \f$ be the rotation matrix that gives the + * orientation of the current camera frame relative to the desired camera + * frame. Let \f$ \theta u_{^{c^*}R_c} \f$ to corresponding axis/angle + * representation of this rotation. + * + * Furthermore, let \f$^{c}R_{c^*} \f$ the rotation matrix that gives the + * orientation of the desired camera frame relative to the current + * camera frame. Let \f$ \theta u_{^{c}R_{c^*}} \f$ to corresponding + * axis/angle representation of this rotation. + * + * This class can be used to manipulate two kind of visual features: + * + * - \f$ s = \theta u_{^{c^*}R_c} \f$ if the orientation of current + * camera frame relative to the desired frame has to be + * considered. The desired visual feature \f$ s^* \f$ is equal to + * zero. The corresponding error is than equal to \f$ e=(s-s^*) = + * \theta u_{^{c^*}R_c} \f$. In this case, the interaction matrix + * related to \f$ s \f$ is given by \f[ L = \left[ \begin{array}{cc} + * 0_3 & L_{\theta u} \end{array} \right] \f] with \f[ + * L_{\theta u} = I_3 + \frac{\theta}{2} \; [u]_\times + + * \left(1 - \frac{sinc \theta}{sinc^2 \frac{\theta}{2}}\right) + * [u]^2_\times \f] where \f$ 0_3 \f$ is a \f$ 3 \times 3 \f$ nul + * matrix, \f$ I_3 \f$ is the \f$3 \times 3\f$ identity matrix, and + * for more readability \f$ \theta \f$ and \f$ u \f$ respectively the + * angle and the axis coordinates of the \f$ \theta u_{^{c^*}R_c} \f$ + * representation. + * + * - \f$ s = \theta u_{^{c}R_{c^*}} \f$ if it is more the orientation + * of the desired camera frame relative to the current frame that has + * to be considered. The desired visual feature \f$ s^* \f$ is equal + * to zero. The corresponding error is than equal to \f$e=(s-s^*) = + * \theta u_{^{c}R_{c^*}} \f$. In this case, the interaction matrix + * related to \f$ s \f$ is given by \f[ L = \left[ \begin{array}{cc} + * 0_3 & L_{\theta u} \end{array} \right] \f] with \f[ + * L_{\theta u} = -I_3 + \frac{\theta}{2} \; [u]_\times + * - \left(1 - \frac{sinc \theta}{sinc^2 \frac{\theta}{2}}\right) + * [u]^2_\times \f] where \f$ 0_3 \f$ is a \f$ 3 \times 3 \f$ nul + * matrix, \f$ I_3 \f$ is the \f$3 \times 3\f$ identity matrix, and + * for more readability \f$ \theta \f$ and \f$ u \f$ respectively the + * angle and the axis coordinates of the \f$ \theta u_{^{c}R_{c^*}} + * \f$ representation. + * + * The kind of visual feature is to set during the construction of the + * vpFeatureThetaU() object by using the selector + * vpFeatureThetaU::vpFeatureThetaURotationRepresentationType. + * + * To initialize the feature \f$(\theta u_x, \theta u_y, \theta u_z)\f$ + * you may use vpFeatureThetaU member functions like set_TUx(), + * set_TUy(), set_TUz(), or also buildFrom() functions. + * + * Depending on the choice of the visual feature representation, the + * interaction() method allows to compute the interaction matrix \f$ + * L \f$ associated to the visual feature, while the error() + * method computes the error vector \f$(s - s^*)\f$ between the current + * visual feature and the desired one. + * + * To know more on the \f$ \theta u \f$ axis/angle representation for a + * 3D rotation see the vpThetaUVector class. + * + * The code below shows how to create a eye-in hand visual servoing + * task using a 3D \f$\theta u\f$ feature \f$(\theta u_x,\theta u_y, + * \theta u_z)\f$ that correspond to the 3D rotation between the + * current camera frame and the desired camera frame. To control six + * degrees of freedom, at least three other features must be considered + * like vpFeatureTranslation visual features. First we create a current + * (\f$s\f$) 3D \f$\theta u\f$ feature, than set the + * task to use the interaction matrix associated to the current feature + * \f$L_s\f$ and than compute the camera velocity \f$v=-\lambda \; + * L_s^+ \; (s-s^*)\f$. The current feature \f$s\f$ is updated in the + * while() loop while \f$s^*\f$ is considered as zero. + * + * \code + * #include + * #include + * #include + * + * int main() + * { + * vpServo task; // Visual servoing task + * + * vpHomogeneousMatrix cMcd; + * // ... cMcd need here to be initialized from for example a pose estimation. + * + * // Creation of the current feature s that correspond to the rotation + * // in angle/axis parametrization between the current camera frame + * // and the desired camera frame + * vpFeatureThetaU s(vpFeatureThetaU::cRcd); + * s.buildFrom(cMcd); // Initialization of the feature + * + * // Set eye-in-hand control law. + * // The computed velocities will be expressed in the camera frame + * task.setServo(vpServo::EYEINHAND_CAMERA); + * // Interaction matrix is computed with the current visual features s + * task.setInteractionMatrixType(vpServo::CURRENT); + * + * // Add the 3D ThetaU feature to the task + * task.addFeature(s); // s* is here considered as zero + * + * // Control loop + * for ( ; ; ) { + * // ... cMcd need here to be initialized from for example a pose estimation. + * + * // Update the current ThetaU visual feature + * s.buildFrom(cMcd); + * + * // compute the control law + * vpColVector v = task.computeControlLaw(); // camera velocity + * } + * } + * \endcode + * + * If you want to deal only with the \f$(\theta u_x,\theta u_y)\f$ subset + * feature from the 3D \f$\theta u\f$ , you have just to modify the + * addFeature() call in the previous example by the following line. In + * that case, the dimension of \f$s\f$ is two. + * + * \code + * // Add the (ThetaU_x, ThetaU_y) subset features from the 3D ThetaU + * // rotation to the task + * task.addFeature(s, vpFeatureThetaU::selectTUx() | vpFeatureThetaU::selectTUy()); + * \endcode + * + * If you want to build your own control law, this other example shows + * how to create a current (\f$s\f$) and desired (\f$s^*\f$) 3D + * \f$\theta u\f$ visual feature, compute the corresponding error + * vector \f$(s-s^*)\f$ and finally build the interaction matrix \f$L_s\f$. + * + * \code + * #include + * #include + * #include + * + * int main() + * { + * vpHomogeneousMatrix cdMc; + * // ... cdMc need here to be initialized from for example a pose estimation. + * + * // Creation of the current feature s + * vpFeatureThetaU s(vpFeatureThetaU::cdRc); + * s.buildFrom(cdMc); // Initialization of the feature + * + * // Creation of the desired feature s*. By default this feature is + * // initialized to zero + * vpFeatureThetaU s_star(vpFeatureThetaU::cdRc); + * + * // Compute the interaction matrix L_s for the current ThetaU feature + * vpMatrix L = s.interaction(); + * + * // Compute the error vector (s-s*) for the ThetaU feature + * s.error(s_star); + * } + * \endcode + */ class VISP_EXPORT vpFeatureThetaU : public vpBasicFeature { public: - typedef enum { + typedef enum + { TUx = 1, /*!< Select the subset \f$ \theta u_x \f$ visual feature from the \f$ \theta u\f$ angle/axis representation. */ TUy = 2, /*!< Select the subset \f$ \theta u_y \f$ visual feature @@ -228,7 +227,8 @@ class VISP_EXPORT vpFeatureThetaU : public vpBasicFeature TUz = 4 /*!< Select the subset \f$ \theta u_z \f$ visual feature from the \f$ \theta u\f$ angle/axis representation. */ } vpFeatureThetaUType; - typedef enum { + typedef enum + { cdRc, /*!< Selector used to manipulate the visual feature \f$ s = \theta u_{^{c^*}R_c} \f$. This visual feature represent the orientation of the current camera frame @@ -250,26 +250,24 @@ class VISP_EXPORT vpFeatureThetaU : public vpBasicFeature vpFeatureThetaU(vpThetaUVector &tu, vpFeatureThetaURotationRepresentationType r); vpFeatureThetaU(vpRotationMatrix &R, vpFeatureThetaURotationRepresentationType r); vpFeatureThetaU(vpHomogeneousMatrix &M, vpFeatureThetaURotationRepresentationType r); - //! Destructor. Does nothing. - virtual ~vpFeatureThetaU() {} void buildFrom(vpThetaUVector &tu); // build from a rotation matrix void buildFrom(const vpRotationMatrix &R); - // build from an homogeneous matrix + // build from an homogeneous matrix void buildFrom(const vpHomogeneousMatrix &M); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; //! Feature duplication. - vpFeatureThetaU *duplicate() const; + vpFeatureThetaU *duplicate() const override; // compute the error between two visual features from a subset // a the possible features - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; vpFeatureThetaURotationRepresentationType getFeatureThetaURotationType() const; @@ -278,11 +276,11 @@ class VISP_EXPORT vpFeatureThetaU : public vpBasicFeature double get_TUz() const; // Basic construction. - void init(); + void init() override; // compute the interaction matrix from a subset a the possible features - vpMatrix interaction(unsigned int select = FEATURE_ALL); + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; - void print(unsigned int select = FEATURE_ALL) const; + void print(unsigned int select = FEATURE_ALL) const override; void set_TUx(double tu_x); void set_TUy(double tu_y); diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureTranslation.h b/modules/visual_features/include/visp3/visual_features/vpFeatureTranslation.h index d8d6ce9134..91ea77ca9b 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureTranslation.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureTranslation.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * 3D translation visual feature. - * -*****************************************************************************/ + */ #ifndef vpFeatureTranslation_H #define vpFeatureTranslation_H @@ -48,238 +46,236 @@ #include /*! - \class vpFeatureTranslation - \ingroup group_visual_features - - \brief Class that defines the translation visual feature - \f$s=(t_x,t_y,t_z)\f$. - - It is convenient to consider two coordinate frames noted here \f$ -{\cal{F}}_1 \f$ and \f$ - {\cal{F}}_{2} \f$. - - Let \f$^{{\cal{F}}_2}M_{{\cal{F}}_1} \f$ be the homogeneous matrix that -gives the orientation and the translation of the frame \f$ {\cal{F}}_1 \f$ -with respect to the frame \f$ {\cal{F}}_2 \f$. - - \f[ - ^{{\cal{F}}_2}M_{{\cal{F}}_1} = \left(\begin{array}{cc} - ^{{\cal{F}}_2}R_{{\cal{F}}_1} & ^{{\cal{F}}_2}t_{{\cal{F}}_1} \\ - {\bf 0}_{1\times 3} & 1 - \end{array} - \right) - \f] - - with \f$^{{\cal{F}}_2}R_{{\cal{F}}_1} \f$ the rotation matrix that gives the -orientation of the frame \f$ {\cal{F}}_1 \f$ relative to the frame \f$ -{\cal{F}}_2 \f$ and \f$^{{\cal{F}}_2}t_{{\cal{F}}_1} \f$ the translation -vector that gives the position of the frame \f$ {\cal{F}}_1 \f$ relative to -the frame \f$ {\cal{F}}_2 \f$. To know more about homogeneous matrices see -vpHomogeneousMatrix documentation. - - This class can be used to manipulate three kind of visual features: - - - This class can be used to manipulate the translation visual feature - \f$s= ^{c^*}t_c\f$ which gives the position of - the current camera frame relative to the desired camera frame. It is -composed by the three components \f$(t_x,t_y,t_z)\f$. The desired visual -feature \f$ s^* \f$ is equal to zero. The corresponding error is than equal to -\f$ e=(s-s^*) = ^{c^*}t_c \f$. In this case, the interaction matrix related to -\f$ s \f$ is given by \f[ L = [ - ^{c^*}R_c \;\; 0_3] \f] - - - This class can also be used to manipulate the translation visual feature - \f$s= ^{c}t_{c^*}\f$ which gives the position of - the desired camera frame relative to the current camera frame. It is -composed by the three components \f$(t_x,t_y,t_z)\f$. The desired visual -feature \f$ s^* \f$ is equal to zero. The corresponding error is than equal to -\f$ e=(s-s^*) = ^{c}t_{c^*} \f$. In this case, the interaction matrix related -to \f$ s \f$ is given by \f[ L = [ -I_3 \;\; [^{c}t_{c^*}]_\times] \f] - - - Actually, this class can also be used to manipulate the - translation visual feature \f$s= ^{c}t_o\f$ which gives the position - of the object frame relative to the current camera frame. It is - composed by the three components \f$(t_x,t_y,t_z)\f$ too. The - desired visual feature \f$ s^* \f$ is the translation visual feature - \f$s^*= ^{c^*}t_o\f$ which gives the position of the object frame - relative to the desired camera frame. The corresponding error is - than equal to \f$ e=(s-s^*) = ^{c}t_o - ^{c^*}t_o \f$. In this case, - the interaction matrix related to \f$ s \f$ is given by \f[ L = [ - -I_3 \;\; [^{c}t_o]_\times] \f] - - To initialize the feature \f$(t_x, t_y, t_z)\f$ you may use member - functions like set_Tx(), set_Ty(), set_Tz(), or also buildFrom() - functions. - - The interaction() method allows to compute the interaction matrix - \f$ L\f$ associated to the translation visual feature, while the - error() method computes the error vector \f$(s - s^*)\f$ between the - current visual feature and the desired one. - - The code below shows how to create a eye-in hand visual servoing - task using a 3D translation feature \f$(t_x,t_y,t_z)\f$ that - correspond to the 3D translation between the desired camera frame - and the current camera frame. To control six degrees of freedom, at - least three other features must be considered like vpFeatureThetaU - visual features. First we create a current (\f$s\f$) and desired - (\f$s^*\f$) 3D translation feature, set the task to use the - interaction matrix associated to the current feature \f$L_s\f$ and - than compute the camera velocity \f$v=-\lambda \; L_s^+ \; - (s-s^*)\f$. The current feature \f$s\f$ is updated in the while() loop - while \f$s^*\f$ is set to zero. - - \code -#include -#include -#include - -int main() -{ - vpServo task; // Visual servoing task - - vpHomogeneousMatrix cdMc; - // ... cdMc need here to be initialized from for example a pose estimation. - - // Creation of the current visual feature s - vpFeatureTranslation s(vpFeatureTranslation::cdMc); - s.buildFrom(cdMc); // Initialization of the current feature s=(tx,ty,tz) - - // Set eye-in-hand control law. - // The computed velocities will be expressed in the camera frame - task.setServo(vpServo::EYEINHAND_CAMERA); - // Interaction matrix is computed with the current visual features s - task.setInteractionMatrixType(vpServo::CURRENT); - // Set the constant gain - double lambda = 0.8; - task.setLambda(lambda); - - // Add the 3D translation feature to the task - task.addFeature(s); // s* is here considered as zero - - // Control loop - for ( ; ; ) { - // ... cdMc need here to be initialized from for example a pose estimation. - - // Update the current 3D translation visual feature - s.buildFrom(cdMc); - - // compute the control law - vpColVector v = task.computeControlLaw(); // camera velocity - } -} - \endcode - - If you want to deal only with the \f$(t_x,t_y)\f$ subset feature from the 3D - translation, you have just to modify the addFeature() call in - the previous example by the following line. In that case, the dimension - of \f$s\f$ is two. - - \code - // Add the (tx,ty) subset features from 3D translation to the task - task.addFeature(s, vpFeatureTranslation::selectTx() | vpFeatureTranslation::selectTy()); - \endcode - - If you want to build your own control law, this other example shows - how to create a current (\f$s\f$) and desired (\f$s^*\f$) 3D - translation visual feature, compute the corresponding error - vector \f$(s-s^*)\f$ and finally build the interaction matrix \f$L_s\f$. - - \code -#include -#include -#include - -int main() -{ - vpHomogeneousMatrix cdMc; - // ... cdMc need here to be initialized from for example a pose estimation. - - // Creation of the current feature s - vpFeatureTranslation s(vpFeatureTranslation::cdMc); - s.buildFrom(cdMc); // Initialization of the feature - - // Creation of the desired feature s*. By default this feature is - // initialized to zero - vpFeatureTranslation s_star(vpFeatureTranslation::cdMc); - - // Compute the interaction matrix for the translation feature - vpMatrix L = s.interaction(); - - // Compute the error vector (s-s*) for the translation feature - vpColVector e = s.error(s_star); // e = (s-s*) -} - \endcode - - The code below shows how to create an eye-in hand visual servoing - task using a 3D translation feature \f$(t_x,t_y,t_z)\f$ that - correspond to the 3D translation between the current camera frame - and the object frame. Like with the previous examples, to - control six degrees of freedom, at least three other features must be - considered like vpFeatureThetaU visual features. The way to initialize - the visual features is quite the same as before. The difference is that - the cMo method must be precised and the desired feature is note - necessary equal to zero. - - \code -#include -#include -#include - -int main() -{ - vpServo task; // Visual servoing task - - vpHomogeneousMatrix cdMo; - // ... cdMo need here to be initialized from for example a pose estimation. - - // Creation of the desired visual feature s* - vpFeatureTranslation s_star(vpFeatureTranslation::cMo); - s_star.buildFrom(cdMo); // Initialization of the desired feature s*=(tx*,ty*,tz*) - - vpHomogeneousMatrix cMo; - // ... cMo need here to be computed. - - // Creation of the current visual feature s - vpFeatureTranslation s(vpFeatureTranslation::cMo); - s.buildFrom(cMo); // Initialization of the current feature s=(tx,ty,tz) - - // Set eye-in-hand control law. - // The computed velocities will be expressed in the camera frame - task.setServo(vpServo::EYEINHAND_CAMERA); - // Interaction matrix is computed with the current visual features s - task.setInteractionMatrixType(vpServo::CURRENT); - // Set the constant gain - double lambda = 0.8; - task.setLambda(lambda); - - // Add the 3D translation feature to the task - task.addFeature(s, s_star); // s* is here considered as zero - - // Control loop - for ( ; ; ) { - // ... cMo need here to be computed from for example a pose estimation. - - // Update the current 3D translation visual feature - s.buildFrom(cMo); - - // compute the control law - vpColVector v = task.computeControlLaw(); // camera velocity - } -} - \endcode - -*/ + * \class vpFeatureTranslation + * \ingroup group_visual_features + * + * \brief Class that defines the translation visual feature + * \f$s=(t_x,t_y,t_z)\f$. + * + * It is convenient to consider two coordinate frames noted here \f$ + * {\cal{F}}_1 \f$ and \f$ {\cal{F}}_{2} \f$. + * + * Let \f$^{{\cal{F}}_2}M_{{\cal{F}}_1} \f$ be the homogeneous matrix that + * gives the orientation and the translation of the frame \f$ {\cal{F}}_1 \f$ + * with respect to the frame \f$ {\cal{F}}_2 \f$. + * + * \f[ + * ^{{\cal{F}}_2}M_{{\cal{F}}_1} = \left(\begin{array}{cc} + * ^{{\cal{F}}_2}R_{{\cal{F}}_1} & ^{{\cal{F}}_2}t_{{\cal{F}}_1} \\ + * {\bf 0}_{1\times 3} & 1 + * \end{array} + * \right) + * \f] + * + * with \f$^{{\cal{F}}_2}R_{{\cal{F}}_1} \f$ the rotation matrix that gives the + * orientation of the frame \f$ {\cal{F}}_1 \f$ relative to the frame \f$ + * {\cal{F}}_2 \f$ and \f$^{{\cal{F}}_2}t_{{\cal{F}}_1} \f$ the translation + * vector that gives the position of the frame \f$ {\cal{F}}_1 \f$ relative to + * the frame \f$ {\cal{F}}_2 \f$. To know more about homogeneous matrices see + * vpHomogeneousMatrix documentation. + * + * This class can be used to manipulate three kind of visual features: + * + * - This class can be used to manipulate the translation visual feature + * \f$s= ^{c^*}t_c\f$ which gives the position of + * the current camera frame relative to the desired camera frame. It is + * composed by the three components \f$(t_x,t_y,t_z)\f$. The desired visual + * feature \f$ s^* \f$ is equal to zero. The corresponding error is than equal to + * \f$ e=(s-s^*) = ^{c^*}t_c \f$. In this case, the interaction matrix related to + * \f$ s \f$ is given by \f[ L = [ ^{c^*}R_c \;\; 0_3] \f] + * + * - This class can also be used to manipulate the translation visual feature + * \f$s= ^{c}t_{c^*}\f$ which gives the position of + * the desired camera frame relative to the current camera frame. It is + * composed by the three components \f$(t_x,t_y,t_z)\f$. The desired visual + * feature \f$ s^* \f$ is equal to zero. The corresponding error is than equal to + * \f$ e=(s-s^*) = ^{c}t_{c^*} \f$. In this case, the interaction matrix related + * to \f$ s \f$ is given by \f[ L = [ -I_3 \;\; [^{c}t_{c^*}]_\times] \f] + * + * - Actually, this class can also be used to manipulate the + * translation visual feature \f$s= ^{c}t_o\f$ which gives the position + * of the object frame relative to the current camera frame. It is + * composed by the three components \f$(t_x,t_y,t_z)\f$ too. The + * desired visual feature \f$ s^* \f$ is the translation visual feature + * \f$s^*= ^{c^*}t_o\f$ which gives the position of the object frame + * relative to the desired camera frame. The corresponding error is + * than equal to \f$ e=(s-s^*) = ^{c}t_o - ^{c^*}t_o \f$. In this case, + * the interaction matrix related to \f$ s \f$ is given by \f[ L = [ + * -I_3 \;\; [^{c}t_o]_\times] \f] + * + * To initialize the feature \f$(t_x, t_y, t_z)\f$ you may use member + * functions like set_Tx(), set_Ty(), set_Tz(), or also buildFrom() + * functions. + * + * The interaction() method allows to compute the interaction matrix + * \f$ L\f$ associated to the translation visual feature, while the + * error() method computes the error vector \f$(s - s^*)\f$ between the + * current visual feature and the desired one. + * + * The code below shows how to create a eye-in hand visual servoing + * task using a 3D translation feature \f$(t_x,t_y,t_z)\f$ that + * correspond to the 3D translation between the desired camera frame + * and the current camera frame. To control six degrees of freedom, at + * least three other features must be considered like vpFeatureThetaU + * visual features. First we create a current (\f$s\f$) and desired + * (\f$s^*\f$) 3D translation feature, set the task to use the + * interaction matrix associated to the current feature \f$L_s\f$ and + * than compute the camera velocity \f$v=-\lambda \; L_s^+ \; + * (s-s^*)\f$. The current feature \f$s\f$ is updated in the while() loop + * while \f$s^*\f$ is set to zero. + * + * \code + * #include + * #include + * #include + * + * int main() + * { + * vpServo task; // Visual servoing task + * + * vpHomogeneousMatrix cdMc; + * // ... cdMc need here to be initialized from for example a pose estimation. + * + * // Creation of the current visual feature s + * vpFeatureTranslation s(vpFeatureTranslation::cdMc); + * s.buildFrom(cdMc); // Initialization of the current feature s=(tx,ty,tz) + * + * // Set eye-in-hand control law. + * // The computed velocities will be expressed in the camera frame + * task.setServo(vpServo::EYEINHAND_CAMERA); + * // Interaction matrix is computed with the current visual features s + * task.setInteractionMatrixType(vpServo::CURRENT); + * // Set the constant gain + * double lambda = 0.8; + * task.setLambda(lambda); + * + * // Add the 3D translation feature to the task + * task.addFeature(s); // s* is here considered as zero + * + * // Control loop + * for ( ; ; ) { + * // ... cdMc need here to be initialized from for example a pose estimation. + * + * // Update the current 3D translation visual feature + * s.buildFrom(cdMc); + * + * // compute the control law + * vpColVector v = task.computeControlLaw(); // camera velocity + * } + * } + * \endcode + * + * If you want to deal only with the \f$(t_x,t_y)\f$ subset feature from the 3D + * translation, you have just to modify the addFeature() call in + * the previous example by the following line. In that case, the dimension + * of \f$s\f$ is two. + * + * \code + * // Add the (tx,ty) subset features from 3D translation to the task + * task.addFeature(s, vpFeatureTranslation::selectTx() | vpFeatureTranslation::selectTy()); + * \endcode + * + * If you want to build your own control law, this other example shows + * how to create a current (\f$s\f$) and desired (\f$s^*\f$) 3D + * translation visual feature, compute the corresponding error + * vector \f$(s-s^*)\f$ and finally build the interaction matrix \f$L_s\f$. + * + * \code + * #include + * #include + * #include + * + * int main() + * { + * vpHomogeneousMatrix cdMc; + * // ... cdMc need here to be initialized from for example a pose estimation. + * + * // Creation of the current feature s + * vpFeatureTranslation s(vpFeatureTranslation::cdMc); + * s.buildFrom(cdMc); // Initialization of the feature + * + * // Creation of the desired feature s*. By default this feature is + * // initialized to zero + * vpFeatureTranslation s_star(vpFeatureTranslation::cdMc); + * + * // Compute the interaction matrix for the translation feature + * vpMatrix L = s.interaction(); + * + * // Compute the error vector (s-s*) for the translation feature + * vpColVector e = s.error(s_star); // e = (s-s*) + * } + * \endcode + * + * The code below shows how to create an eye-in hand visual servoing + * task using a 3D translation feature \f$(t_x,t_y,t_z)\f$ that + * correspond to the 3D translation between the current camera frame + * and the object frame. Like with the previous examples, to + * control six degrees of freedom, at least three other features must be + * considered like vpFeatureThetaU visual features. The way to initialize + * the visual features is quite the same as before. The difference is that + * the cMo method must be precised and the desired feature is note + * necessary equal to zero. + * + * \code + * #include + * #include + * #include + * + * int main() + * { + * vpServo task; // Visual servoing task + * + * vpHomogeneousMatrix cdMo; + * // ... cdMo need here to be initialized from for example a pose estimation. + * + * // Creation of the desired visual feature s* + * vpFeatureTranslation s_star(vpFeatureTranslation::cMo); + * s_star.buildFrom(cdMo); // Initialization of the desired feature s*=(tx*,ty*,tz*) + * + * vpHomogeneousMatrix cMo; + * // ... cMo need here to be computed. + * + * // Creation of the current visual feature s + * vpFeatureTranslation s(vpFeatureTranslation::cMo); + * s.buildFrom(cMo); // Initialization of the current feature s=(tx,ty,tz) + * + * // Set eye-in-hand control law. + * // The computed velocities will be expressed in the camera frame + * task.setServo(vpServo::EYEINHAND_CAMERA); + * // Interaction matrix is computed with the current visual features s + * task.setInteractionMatrixType(vpServo::CURRENT); + * // Set the constant gain + * double lambda = 0.8; + * task.setLambda(lambda); + * + * // Add the 3D translation feature to the task + * task.addFeature(s, s_star); // s* is here considered as zero + * + * // Control loop + * for ( ; ; ) { + * // ... cMo need here to be computed from for example a pose estimation. + * + * // Update the current 3D translation visual feature + * s.buildFrom(cMo); + * + * // compute the control law + * vpColVector v = task.computeControlLaw(); // camera velocity + * } + * } + * \endcode + */ class VISP_EXPORT vpFeatureTranslation : public vpBasicFeature { public: /*! - \enum vpFeatureTranslationRepresentationType - Kind of implemented 3D translation feature. + * \enum vpFeatureTranslationRepresentationType + * Kind of implemented 3D translation feature. */ - typedef enum { - /*! Selector used to manipulate the visual feature \f$s= - ^{c^*}t_c\f$ which gives the position of the current camera frame - relative to the desired camera frame.*/ + typedef enum + { +/*! Selector used to manipulate the visual feature \f$s= + ^{c^*}t_c\f$ which gives the position of the current camera frame + relative to the desired camera frame.*/ cdMc, /*! Selector used to manipulate the visual feature \f$s= ^{c}t_{c^*}\f$ which gives the position of the desired camera frame @@ -298,24 +294,22 @@ class VISP_EXPORT vpFeatureTranslation : public vpBasicFeature // constructor : build from an homogeneous matrix // cdMc is the displacement that the camera has to realize vpFeatureTranslation(vpHomogeneousMatrix &f2Mf1, vpFeatureTranslationRepresentationType r); - //! Destructor. Does nothing. - virtual ~vpFeatureTranslation() {} // build from an homogeneous matrix // cdMc is the displacement that the camera has to realize void buildFrom(const vpHomogeneousMatrix &f2Mf1); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; //! Feature duplication - vpFeatureTranslation *duplicate() const; + vpFeatureTranslation *duplicate() const override; // compute the error between two visual features from a subset // a the possible features - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; vpFeatureTranslationRepresentationType getFeatureTranslationType() const; @@ -324,12 +318,12 @@ class VISP_EXPORT vpFeatureTranslation : public vpBasicFeature double get_Tz() const; // basic construction - void init(); + void init() override; // compute the interaction matrix from a subset a the possible features - vpMatrix interaction(unsigned int select = FEATURE_ALL); + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; // print the name of the feature - void print(unsigned int select = FEATURE_ALL) const; + void print(unsigned int select = FEATURE_ALL) const override; void set_Tx(double t_x); void set_Ty(double t_y); diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureVanishingPoint.h b/modules/visual_features/include/visp3/visual_features/vpFeatureVanishingPoint.h index 491c7a53d1..e4da3cb7cb 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureVanishingPoint.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureVanishingPoint.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,19 +29,15 @@ * * Description: * 2D vanishing point visual feature (Z coordinate in 3D space is infinity) - * - * Authors: - * Odile Bourquardez - * -*****************************************************************************/ + */ #ifndef vpFeatureVanishingPoint_H #define vpFeatureVanishingPoint_H /*! - \file vpFeatureVanishingPoint.h \brief Class that defines 2D vanishing - point visual feature (Z coordinate in 3D space is infinity) -*/ + * \file vpFeatureVanishingPoint.h \brief Class that defines 2D vanishing + * point visual feature (Z coordinate in 3D space is infinity) + */ #include #include @@ -52,26 +47,26 @@ #include /*! - \class vpFeatureVanishingPoint - \ingroup group_visual_features - - Class that defines 2D vanishing point visual features. Various features can be considered: - - - Either the cartesian coordinates \f$ (x, y) \f$ of the vanishing point obtained from the intersection of two lines; - in that case \f$ {\bf s} = (x, y) \f$ and the corresponding interaction matrices are: - \f[ L_x = \left[ \begin{array}{cccccc} 0 & 0 & 0 & x y & -(1 + x^2) & y \end{array} \right] \f] - \f[ L_y = \left[ \begin{array}{cccccc} 0 & 0 & 0 & 1 + y * y & -xy & -x \end{array} \right] \f] - - - Rather features function of the polar coordinates of the vanishing point obtained themselves from the polar - coordinates of the two lines \f$(\rho_1, \theta_1)\f$ and \f$(\rho_2, \theta_2)\f$; in that case \f$ {\bf s} = - (\arctan(1/\rho), 1/\rho, \alpha) \f$ with: \f[ 1/\rho = \frac{\sin(\theta_1 - \theta_2)}{\sqrt{\rho_1^2 + \rho_2^2 - - 2 \rho_1 \rho_2 cos(\theta_1 - \theta_2)}} \f] \f[ \alpha = \frac{\rho_1 \cos \theta_2 - \rho_2 cos - \theta_1}{\sqrt{\rho_1^2 + \rho_2^2 - 2 \rho_1 \rho_2 cos(\theta_1 - \theta_2)}} \f] The corresponding interaction - matrices are: \f[ L_{\arctan(\frac{1}{\rho})} = \left[ \begin{array}{cccccc} 0 & 0 & 0 & - \sin \alpha & \cos \alpha & - 0 \end{array} \right] \f] \f[ L_{\frac{1}{\rho}} = \left[ \begin{array}{cccccc} 0 & 0 & 0 & -(1 + \frac{1}{\rho^2}) - \sin \alpha & (1 + \frac{1}{\rho^2}) \cos \alpha & 0 \end{array} \right] \f] \f[ L_{\alpha} = \left[ - \begin{array}{cccccc} 0 & 0 & 0 & \frac{\cos \alpha}{\rho} & \frac{\sin \alpha}{\rho} & -1 \end{array} \right] \f] -*/ + * \class vpFeatureVanishingPoint + * \ingroup group_visual_features + * + * Class that defines 2D vanishing point visual features. Various features can be considered: + + * - Either the cartesian coordinates \f$ (x, y) \f$ of the vanishing point obtained from the intersection of two lines; + * in that case \f$ {\bf s} = (x, y) \f$ and the corresponding interaction matrices are: + * \f[ L_x = \left[ \begin{array}{cccccc} 0 & 0 & 0 & x y & -(1 + x^2) & y \end{array} \right] \f] + * \f[ L_y = \left[ \begin{array}{cccccc} 0 & 0 & 0 & 1 + y * y & -xy & -x \end{array} \right] \f] + * + * - Rather features function of the polar coordinates of the vanishing point obtained themselves from the polar + * coordinates of the two lines \f$(\rho_1, \theta_1)\f$ and \f$(\rho_2, \theta_2)\f$; in that case \f$ {\bf s} = + * (\arctan(1/\rho), 1/\rho, \alpha) \f$ with: \f[ 1/\rho = \frac{\sin(\theta_1 - \theta_2)}{\sqrt{\rho_1^2 + \rho_2^2 - + * 2 \rho_1 \rho_2 cos(\theta_1 - \theta_2)}} \f] \f[ \alpha = \frac{\rho_1 \cos \theta_2 - \rho_2 cos + * \theta_1}{\sqrt{\rho_1^2 + \rho_2^2 - 2 \rho_1 \rho_2 cos(\theta_1 - \theta_2)}} \f] The corresponding interaction + * matrices are: \f[ L_{\arctan(\frac{1}{\rho})} = \left[ \begin{array}{cccccc} 0 & 0 & 0 & - \sin \alpha & \cos \alpha & + * 0 \end{array} \right] \f] \f[ L_{\frac{1}{\rho}} = \left[ \begin{array}{cccccc} 0 & 0 & 0 & -(1 + \frac{1}{\rho^2}) + * \sin \alpha & (1 + \frac{1}{\rho^2}) \cos \alpha & 0 \end{array} \right] \f] \f[ L_{\alpha} = \left[ + * \begin{array}{cccccc} 0 & 0 & 0 & \frac{\cos \alpha}{\rho} & \frac{\sin \alpha}{\rho} & -1 \end{array} \right] \f] + */ class VISP_EXPORT vpFeatureVanishingPoint : public vpBasicFeature { public: @@ -83,19 +78,17 @@ class VISP_EXPORT vpFeatureVanishingPoint : public vpBasicFeature public: vpFeatureVanishingPoint(); - //! Destructor. - virtual ~vpFeatureVanishingPoint() {} void buildFrom(double x, double y); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; - vpFeatureVanishingPoint *duplicate() const; + vpFeatureVanishingPoint *duplicate() const override; - vpColVector error(const vpBasicFeature &s_star, unsigned int select = (selectX() | selectY())); + vpColVector error(const vpBasicFeature &s_star, unsigned int select = (selectX() | selectY())) override; double get_x() const; double get_y() const; @@ -103,10 +96,10 @@ class VISP_EXPORT vpFeatureVanishingPoint : public vpBasicFeature double getOneOverRho() const; double getAlpha() const; - void init(); - vpMatrix interaction(unsigned int select = (selectX() | selectY())); + void init() override; + vpMatrix interaction(unsigned int select = (selectX() | selectY())) override; - void print(unsigned int select = (selectX() | selectY())) const; + void print(unsigned int select = (selectX() | selectY())) const override; void set_x(double x); void set_y(double y); diff --git a/modules/visual_features/include/visp3/visual_features/vpGenericFeature.h b/modules/visual_features/include/visp3/visual_features/vpGenericFeature.h index 9432823071..0af2b86297 100644 --- a/modules/visual_features/include/visp3/visual_features/vpGenericFeature.h +++ b/modules/visual_features/include/visp3/visual_features/vpGenericFeature.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,15 @@ * * Description: * Generic feature (used to create new feature not implemented in ViSP). - * -*****************************************************************************/ + */ #ifndef vpGenericFeature_hh #define vpGenericFeature_hh /*! - \file vpGenericFeature.h - \brief class that defines what is a generic feature (used to create new - feature not implemented in ViSP2 + * \file vpGenericFeature.h + * \brief class that defines what is a generic feature (used to create new + * feature not implemented in ViSP2 */ #include @@ -49,132 +47,130 @@ #include /*! - \class vpGenericFeature - \ingroup group_core_features - - \brief Class that enables to define a feature or a set of features which are -not implemented in ViSP as a specific class. It is indeed possible to create -its own features, to use the corresponding interaction matrix, and to compute -an error between the current and the desired feature. Moreover the created -features can be mixed with features already implemented. - - The following example shows how to use the vpGenericFeature class to create -and use the feature \f$ log(Z) \f$ where Z corresponds to the depth of a point -whose 2D coordinates in the camera frame are \f$ x \f$ and \f$ y \f$. The -interaction matrix corresponding to this feature is \f[ L = -\left[\begin{array}{cccccc} 0 & 0 & -1/Z & -y & x & 0 \end{array}\right]\f]. - \code -#include -#include - -int main() -{ - vpServo task; // Visual servoing task - - //First we have to define the desired feature log(Z*) corresponding to the desired point. - double xd = 0; //The x coordinate of the desired point. - double yd = 0; //The y coordinate of the desired point. - double Zd = 1; //The depth of the desired point. - vpGenericFeature logZd(1); //The dimension of the feature is 1. - logZd.set_s( log(Zd) ); - - //Then we have to define the current feature log(Z) corresponding to the current point. - double x = 1; //The x coordinate of the current point. - double y = 1; //The y coordinate of the current point. - double Z = 2; //The depth of the current point. - vpGenericFeature logZ(1); //The dimension of the feature is 1. - logZ.set_s( log(Z) ); - - // Set eye-in-hand control law. - // The computed velocities will be expressed in the camera frame - task.setServo(vpServo::EYEINHAND_CAMERA); - // Interaction matrix is computed with the current visual features sd - task.setInteractionMatrixType(vpServo::CURRENT); - - // Add the point feature to the task - task.addFeature(logZ, logZd); - - // Control loop - for ( ; ; ) { - // The new parameters x, y and Z must be computed here. - - // Update the current point visual feature - logZ.set_s( log(Z) ) ; - - // We have to compute the interaction matrix corresponding to the feature. - vpMatrix LlogZ(1,6) ; - LlogZ[0][0] = LlogZ[0][1] = LlogZ[0][5] = 0 ; - LlogZ[0][2] = -1/Z; - LlogZ[0][3] = -y; - LlogZ[0][4] = x; - logZ.setInteractionMatrix(LlogZ) ; - - - // compute the control law - vpColVector v = task.computeControlLaw(); // camera velocity - } - return 0; -} - \endcode - -The second example shows how to create and use a feature whose specificity is -to have a desired feature fixed to zero. It is the case for the feature \f$ -log( \frac{Z}{Z^*}) \f$. - - \code -#include -#include - -int main() -{ - vpServo task; // Visual servoing task - - //First we have to define the desired feature log(Z*) corresponding to the desired point. - double xd = 0; //The x coordinate of the desired point. - double yd = 0; //The y coordinate of the desired point. - double Zd = 1; //The depth of the desired point. - - //Then we have to define the current feature log(Z) corresponding to the current point. - double x = 1; //The x coordinate of the current point. - double y = 1; //The y coordinate of the current point. - double Z = 2; //The depth of the current point. - vpGenericFeature logZ(1); //The dimension of the feature is 1. - logZ.set_s( log(Z/Zd) ); - - // Set eye-in-hand control law. - // The computed velocities will be expressed in the camera frame - task.setServo(vpServo::EYEINHAND_CAMERA); - // Interaction matrix is computed with the current visual features sd - task.setInteractionMatrixType(vpServo::CURRENT); - - // Add the point feature to the task - task.addFeature(logZ); - - // Control loop - for ( ; ; ) { - // The new parameters x, y and Z must be computed here. - - // Update the current point visual feature - logZ.set_s( log(Z/Zd) ) ; - - // We have to compute the interaction matrix corresponding to the feature. - vpMatrix LlogZ(1,6) ; - LlogZ[0][0] = LlogZ[0][1] = LlogZ[0][5] = 0 ; - LlogZ[0][2] = -1/Z; - LlogZ[0][3] = -y; - LlogZ[0][4] = x; - logZ.setInteractionMatrix(LlogZ) ; - - - // compute the control law - vpColVector v = task.computeControlLaw(); // camera velocity - } - return 0; -} - \endcode - -If the feature needs to be use with other features, the example -servoSimuPoint2DhalfCamVelocity2.cpp shows how to do it. + * \class vpGenericFeature + * \ingroup group_core_features + * + * \brief Class that enables to define a feature or a set of features which are + * not implemented in ViSP as a specific class. It is indeed possible to create + * its own features, to use the corresponding interaction matrix, and to compute + * an error between the current and the desired feature. Moreover the created + * features can be mixed with features already implemented. + * + * The following example shows how to use the vpGenericFeature class to create + * and use the feature \f$ log(Z) \f$ where Z corresponds to the depth of a point + * whose 2D coordinates in the camera frame are \f$ x \f$ and \f$ y \f$. The + * interaction matrix corresponding to this feature is \f[ L = + * \left[\begin{array}{cccccc} 0 & 0 & -1/Z & -y & x & 0 \end{array}\right]\f]. + * \code + * #include + * #include + * + * int main() + * { + * vpServo task; // Visual servoing task + * + * //First we have to define the desired feature log(Z*) corresponding to the desired point. + * double xd = 0; //The x coordinate of the desired point. + * double yd = 0; //The y coordinate of the desired point. + * double Zd = 1; //The depth of the desired point. + * vpGenericFeature logZd(1); //The dimension of the feature is 1. + * logZd.set_s( log(Zd) ); + * + * //Then we have to define the current feature log(Z) corresponding to the current point. + * double x = 1; //The x coordinate of the current point. + * double y = 1; //The y coordinate of the current point. + * double Z = 2; //The depth of the current point. + * vpGenericFeature logZ(1); //The dimension of the feature is 1. + * logZ.set_s( log(Z) ); + * + * // Set eye-in-hand control law. + * // The computed velocities will be expressed in the camera frame + * task.setServo(vpServo::EYEINHAND_CAMERA); + * // Interaction matrix is computed with the current visual features sd + * task.setInteractionMatrixType(vpServo::CURRENT); + * + * // Add the point feature to the task + * task.addFeature(logZ, logZd); + * + * // Control loop + * for ( ; ; ) { + * // The new parameters x, y and Z must be computed here. + * + * // Update the current point visual feature + * logZ.set_s( log(Z) ) ; + * + * // We have to compute the interaction matrix corresponding to the feature. + * vpMatrix LlogZ(1,6) ; + * LlogZ[0][0] = LlogZ[0][1] = LlogZ[0][5] = 0 ; + * LlogZ[0][2] = -1/Z; + * LlogZ[0][3] = -y; + * LlogZ[0][4] = x; + * logZ.setInteractionMatrix(LlogZ) ; + * + * // compute the control law + * vpColVector v = task.computeControlLaw(); // camera velocity + * } + * return 0; + * } + * \endcode + * + * The second example shows how to create and use a feature whose specificity is + * to have a desired feature fixed to zero. It is the case for the feature \f$ + * log( \frac{Z}{Z^*}) \f$. + * + * \code + * #include + * #include + * + * int main() + * { + * vpServo task; // Visual servoing task + * + * //First we have to define the desired feature log(Z*) corresponding to the desired point. + * double xd = 0; //The x coordinate of the desired point. + * double yd = 0; //The y coordinate of the desired point. + * double Zd = 1; //The depth of the desired point. + * + * //Then we have to define the current feature log(Z) corresponding to the current point. + * double x = 1; //The x coordinate of the current point. + * double y = 1; //The y coordinate of the current point. + * double Z = 2; //The depth of the current point. + * vpGenericFeature logZ(1); //The dimension of the feature is 1. + * logZ.set_s( log(Z/Zd) ); + * + * // Set eye-in-hand control law. + * // The computed velocities will be expressed in the camera frame + * task.setServo(vpServo::EYEINHAND_CAMERA); + * // Interaction matrix is computed with the current visual features sd + * task.setInteractionMatrixType(vpServo::CURRENT); + * + * // Add the point feature to the task + * task.addFeature(logZ); + * + * // Control loop + * for ( ; ; ) { + * // The new parameters x, y and Z must be computed here. + * + * // Update the current point visual feature + * logZ.set_s( log(Z/Zd) ) ; + * + * // We have to compute the interaction matrix corresponding to the feature. + * vpMatrix LlogZ(1,6) ; + * LlogZ[0][0] = LlogZ[0][1] = LlogZ[0][5] = 0 ; + * LlogZ[0][2] = -1/Z; + * LlogZ[0][3] = -y; + * LlogZ[0][4] = x; + * logZ.setInteractionMatrix(LlogZ) ; + * + * // compute the control law + * vpColVector v = task.computeControlLaw(); // camera velocity + * } + * return 0; + * } + * \endcode + * + * If the feature needs to be use with other features, the example + * servoSimuPoint2DhalfCamVelocity2.cpp shows how to do it. */ class VISP_EXPORT vpGenericFeature : public vpBasicFeature { @@ -183,16 +179,15 @@ class VISP_EXPORT vpGenericFeature : public vpBasicFeature public: explicit vpGenericFeature(unsigned int dim); - virtual ~vpGenericFeature(); void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; void display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color = vpColor::green, - unsigned int thickness = 1) const; + unsigned int thickness = 1) const override; - vpGenericFeature *duplicate() const; + vpGenericFeature *duplicate() const override; - vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL); + vpColVector error(const vpBasicFeature &s_star, unsigned int select = FEATURE_ALL) override; vpColVector error(unsigned int select = FEATURE_ALL); @@ -202,11 +197,11 @@ class VISP_EXPORT vpGenericFeature : public vpBasicFeature void get_s(double &s0, double &s1) const; void get_s(double &s0, double &s1, double &s2) const; - void init(); + void init() override; - vpMatrix interaction(unsigned int select = FEATURE_ALL); + vpMatrix interaction(unsigned int select = FEATURE_ALL) override; - void print(unsigned int select = FEATURE_ALL) const; + void print(unsigned int select = FEATURE_ALL) const override; void setInteractionMatrix(const vpMatrix &L); void setError(const vpColVector &error_vector); void set_s(const vpColVector &s); diff --git a/modules/visual_features/src/feature-builder/vpFeatureBuilderSegment.cpp b/modules/visual_features/src/feature-builder/vpFeatureBuilderSegment.cpp index c59c667ecc..9f9d8f2e1c 100644 --- a/modules/visual_features/src/feature-builder/vpFeatureBuilderSegment.cpp +++ b/modules/visual_features/src/feature-builder/vpFeatureBuilderSegment.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,16 +29,12 @@ * * Description: * Segment creation out of dots. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ /*! - \file vpFeatureBuilderSegment.cpp - \brief Segment creation out of dots. -*/ + * \file vpFeatureBuilderSegment.cpp + * \brief Segment creation out of dots. + */ #include #include @@ -47,15 +42,13 @@ #ifdef VISP_HAVE_MODULE_BLOB /*! - Initialize a segment feature out of vpDots and camera parameters. - - \param s : Visual feature to initialize. - \param cam : The parameters of the camera used to acquire the image - containing the point. \param d1 : The dot corresponding to the first point - of the segment. \param d2 : The dot corresponding to the second point of the - segment. - -*/ + * Initialize a segment feature out of vpDots and camera parameters. + * + * \param s : Visual feature to initialize. + * \param cam : The parameters of the camera used to acquire the image containing the point. + * \param d1 : The dot corresponding to the first point of the segment. + * \param d2 : The dot corresponding to the second point of the segment. + */ void vpFeatureBuilder::create(vpFeatureSegment &s, const vpCameraParameters &cam, const vpDot &d1, const vpDot &d2) { double x1 = 0, y1 = 0, x2 = 0, y2 = 0; @@ -71,7 +64,8 @@ void vpFeatureBuilder::create(vpFeatureSegment &s, const vpCameraParameters &cam s.setXc(xc / l); s.setYc(yc / l); s.setL(1 / l); - } else { + } + else { s.setXc(xc); s.setYc(yc); s.setL(l); @@ -81,15 +75,13 @@ void vpFeatureBuilder::create(vpFeatureSegment &s, const vpCameraParameters &cam } /*! - Initialize a segment feature out of vpDots and camera parameters. - - \param s : Visual feature to initialize. - \param cam : The parameters of the camera used to acquire the image - containing the point. \param d1 : The dot corresponding to the first point - of the segment. \param d2 : The dot corresponding to the second point of the - segment. - -*/ + * Initialize a segment feature out of vpDots and camera parameters. + * + * \param s : Visual feature to initialize. + * \param cam : The parameters of the camera used to acquire the image containing the point. + * \param d1 : The dot corresponding to the first point of the segment. + * \param d2 : The dot corresponding to the second point of the segment. + */ void vpFeatureBuilder::create(vpFeatureSegment &s, const vpCameraParameters &cam, const vpDot2 &d1, const vpDot2 &d2) { double x1 = 0, y1 = 0, x2 = 0, y2 = 0; @@ -105,7 +97,8 @@ void vpFeatureBuilder::create(vpFeatureSegment &s, const vpCameraParameters &cam s.setXc(xc / l); s.setYc(yc / l); s.setL(1 / l); - } else { + } + else { s.setXc(xc); s.setYc(yc); s.setL(l); @@ -116,15 +109,13 @@ void vpFeatureBuilder::create(vpFeatureSegment &s, const vpCameraParameters &cam #endif //#ifdef VISP_HAVE_MODULE_BLOB /*! - Initialize a segment feature out of image points and camera parameters. - - \param s : Visual feature to initialize. - \param cam : The parameters of the camera used to acquire the image - containing the point. \param ip1 : The image point corresponding to the - first point of the segment. \param ip2 : The image point corresponding to - the second point of the segment. - -*/ + * Initialize a segment feature out of image points and camera parameters. + * + * \param s : Visual feature to initialize. + * \param cam : The parameters of the camera used to acquire the image containing the point. + * \param ip1 : The image point corresponding to the first point of the segment. + * \param ip2 : The image point corresponding to the second point of the segment. + */ void vpFeatureBuilder::create(vpFeatureSegment &s, const vpCameraParameters &cam, const vpImagePoint &ip1, const vpImagePoint &ip2) { @@ -141,7 +132,8 @@ void vpFeatureBuilder::create(vpFeatureSegment &s, const vpCameraParameters &cam s.setXc(xc / l); s.setYc(yc / l); s.setL(1 / l); - } else { + } + else { s.setXc(xc); s.setYc(yc); s.setL(l); @@ -151,15 +143,13 @@ void vpFeatureBuilder::create(vpFeatureSegment &s, const vpCameraParameters &cam } /*! - - Build a segment visual feature from two points. - - \param s : Visual feature to initialize. - \param P1, P2 : Two points defining the segment. These points must contain - the 3D coordinates in the camera frame (cP) and the projected coordinates in - the image plane (p). - -*/ + * Build a segment visual feature from two points. + * + * \param s : Visual feature to initialize. + * \param P1, P2 : Two points defining the segment. These points must contain + * the 3D coordinates in the camera frame (cP) and the projected coordinates in + * the image plane (p). + */ void vpFeatureBuilder::create(vpFeatureSegment &s, vpPoint &P1, vpPoint &P2) { double x1 = P1.get_x(); diff --git a/modules/visual_features/src/visual-feature/vpFeatureMoment.cpp b/modules/visual_features/src/visual-feature/vpFeatureMoment.cpp index 6d64283a03..5f7e9515af 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureMoment.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureMoment.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Base for all moment features - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ #include #include @@ -51,8 +46,8 @@ class vpBasicFeature; /*! - Initialize common parameters for moment features. -*/ + * Initialize common parameters for moment features. + */ void vpFeatureMoment::init() { // feature dimension @@ -80,8 +75,8 @@ void vpFeatureMoment::init() } /*! - Feature's dimension according to selection. -*/ + * Feature's dimension according to selection. + */ int vpFeatureMoment::getDimension(unsigned int select) const { int dim = 0; @@ -94,8 +89,8 @@ int vpFeatureMoment::getDimension(unsigned int select) const } /*! - Outputs the content of the feature: it's corresponding selected moments. -*/ + * Outputs the content of the feature: it's corresponding selected moments. + */ void vpFeatureMoment::print(unsigned int select) const { for (unsigned int i = 0; i < dim_s; ++i) { @@ -122,9 +117,8 @@ void vpFeatureMoment::display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color, unsigned int thickness) const { @@ -135,21 +129,21 @@ void vpFeatureMoment::display(const vpCameraParameters &cam, const vpImageA = A_; @@ -175,27 +169,28 @@ void vpFeatureMoment::update(double A_, double B_, double C_) flags = new bool[nbParameters]; for (unsigned int i = 0; i < nbParameters; i++) flags[i] = false; - } else + } + else dim_s = 0; compute_interaction(); } /*! - Retrieves the interaction matrix. No computation is done. - - \param select : Feature selector. - - \return The corresponding interaction matrix. - - There is no rule about the format of the feature selector. It may be - different for different features. For example, for - vpFeatureMomentBasic or vpFeatureMomentCentered features, select may - refer to the \f$ (i,j) \f$ couple in the \f$ j \times order + i \f$ - format, but for vpFeatureMomentCInvariant the selector allows to - select couples \f$ (i,j,k,l...) \f$ in the following format: 1 << i - + 1 << j + 1 << k + 1 << l. -*/ + * Retrieves the interaction matrix. No computation is done. + * + * \param select : Feature selector. + * + * \return The corresponding interaction matrix. + * + * There is no rule about the format of the feature selector. It may be + * different for different features. For example, for + * vpFeatureMomentBasic or vpFeatureMomentCentered features, select may + * refer to the \f$ (i,j) \f$ couple in the \f$ j \times order + i \f$ + * format, but for vpFeatureMomentCInvariant the selector allows to + * select couples \f$ (i,j,k,l...) \f$ in the following format: 1 << i + * + 1 << j + 1 << k + 1 << l. + */ vpMatrix vpFeatureMoment::interaction(unsigned int select) { vpMatrix L(0, 0); @@ -209,14 +204,15 @@ vpMatrix vpFeatureMoment::interaction(unsigned int select) return L; } -/*! Duplicates the feature into a vpGenericFeature harbouring the - same properties. The resulting feature is of vpMomentGenericFeature - type. While it still can compute interaction matrices and has acces - to it's moment primitive, it has lost all precise information about - its precise type and therefore cannot be used in a feature database. - - \return The corresponding feature. -*/ +/*! + * Duplicates the feature into a vpGenericFeature harbouring the + * same properties. The resulting feature is of vpMomentGenericFeature + * type. While it still can compute interaction matrices and has access + * to it's moment primitive, it has lost all precise information about + * its precise type and therefore cannot be used in a feature database. + * + * \return The corresponding feature. + */ vpBasicFeature *vpFeatureMoment::duplicate() const { vpFeatureMoment *feat = new vpMomentGenericFeature(moments, A, B, C, featureMomentsDataBase, moment); @@ -236,10 +232,11 @@ vpBasicFeature *vpFeatureMoment::duplicate() const } /*! - Links the feature to the feature's database. NB: The feature's database is - different from the moment's database. \param featureMoments : database in - which the moment features are stored. - + * Links the feature to the feature's database. + * + * \note The feature's database is different from the moment's database. + * \param featureMoments : database in + * which the moment features are stored. */ void vpFeatureMoment::linkTo(vpFeatureMomentDatabase &featureMoments) { @@ -253,35 +250,33 @@ void vpFeatureMoment::linkTo(vpFeatureMomentDatabase &featureMoments) featureMoments.add(*this, _name); } -void vpFeatureMoment::compute_interaction() {} - -vpFeatureMoment::~vpFeatureMoment() {} +void vpFeatureMoment::compute_interaction() { } VISP_EXPORT std::ostream &operator<<(std::ostream &os, const vpFeatureMoment &featM) { /* - A const_cast is forced here since interaction() defined in vpBasicFeature() - is not const But introducing const in vpBasicFeature() can break a lot of - client code - */ - vpMatrix Lcomplete((unsigned int)featM.getDimension(), - 6); // 6 corresponds to 6velocities in standard interaction matrix + * - A static_cast is forced here since interaction() defined in vpBasicFeature() + * is not const. But introducing const in vpBasicFeature() can break a lot of + * client code. + * - 6 corresponds to 6 velocities in standard interaction matrix + */ + vpMatrix Lcomplete(static_cast(featM.getDimension()), 6); Lcomplete = const_cast(featM).interaction(vpBasicFeature::FEATURE_ALL); Lcomplete.matlabPrint(os); return os; } /*! -Interface function to display the moments and other interaction matrices -on which a particular vpFeatureMoment is dependent upon -Not made pure to maintain compatibility -Recommended : Types inheriting from vpFeatureMoment should implement this -function -*/ + * Interface function to display the moments and other interaction matrices + * on which a particular vpFeatureMoment is dependent upon + * Not made pure to maintain compatibility + * Recommended : Types inheriting from vpFeatureMoment should implement this + * function. + */ void vpFeatureMoment::printDependencies(std::ostream &os) const { os << " WARNING : Falling back to base class version of " - "printDependencies() in vpFeatureMoment. To prevent that, this has " - "to be implemented in the derived classes!" - << std::endl; + "printDependencies() in vpFeatureMoment. To prevent that, this has " + "to be implemented in the derived classes!" + << std::endl; } diff --git a/modules/visual_features/src/visual-feature/vpFeatureMomentAlpha.cpp b/modules/visual_features/src/visual-feature/vpFeatureMomentAlpha.cpp index 0b4e278a93..22a418b852 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureMomentAlpha.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureMomentAlpha.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Implementation for alpha moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ #include #include @@ -49,21 +44,21 @@ #ifdef VISP_MOMENTS_COMBINE_MATRICES /*! - Computes interaction matrix for alpha moment. Called internally. - The moment primitives must be computed before calling this. - This feature depends on: - - vpMomentCentered - - vpFeatureMomentCentered -*/ + * Computes interaction matrix for alpha moment. Called internally. + * The moment primitives must be computed before calling this. + * This feature depends on: + * - vpMomentCentered + * - vpFeatureMomentCentered + */ void vpFeatureMomentAlpha::compute_interaction() { bool found_moment_centered; bool found_FeatureMoment_centered; const vpMomentCentered &momentCentered = - (static_cast(moments.get("vpMomentCentered", found_moment_centered))); + (static_cast(moments.get("vpMomentCentered", found_moment_centered))); vpFeatureMomentCentered &featureMomentCentered = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentCentered", found_FeatureMoment_centered))); + featureMomentsDataBase->get("vpFeatureMomentCentered", found_FeatureMoment_centered))); if (!found_moment_centered) throw vpException(vpException::notInitialized, "vpMomentCentered not found"); @@ -75,28 +70,28 @@ void vpFeatureMomentAlpha::compute_interaction() double dinv = 1 / (4 * u11 * u11 + u20_u02 * u20_u02); interaction_matrices[0].resize(1, 6); interaction_matrices[0] = - (u20_u02 * dinv) * featureMomentCentered.interaction(1, 1) + - (u11 * dinv) * (featureMomentCentered.interaction(0, 2) - featureMomentCentered.interaction(2, 0)); + (u20_u02 * dinv) * featureMomentCentered.interaction(1, 1) + + (u11 * dinv) * (featureMomentCentered.interaction(0, 2) - featureMomentCentered.interaction(2, 0)); } #else // #ifdef VISP_MOMENTS_COMBINE_MATRICES /*! - Computes interaction matrix for alpha moment. Called internally. - The moment primitives must be computed before calling this. - This feature depends on: - - vpMomentCentered - - vpMomentGravityCenter -*/ + * Computes interaction matrix for alpha moment. Called internally. + * The moment primitives must be computed before calling this. + * This feature depends on: + * - vpMomentCentered + * - vpMomentGravityCenter + */ void vpFeatureMomentAlpha::compute_interaction() { bool found_moment_centered; bool found_moment_gravity; const vpMomentCentered &momentCentered = - static_cast(moments.get("vpMomentCentered", found_moment_centered)); + static_cast(moments.get("vpMomentCentered", found_moment_centered)); const vpMomentGravityCenter &momentGravity = - static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); + static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); const vpMomentObject &momentObject = moment->getObject(); if (!found_moment_centered) @@ -128,10 +123,10 @@ void vpFeatureMomentAlpha::compute_interaction() Awx = (beta * (mu12 * (mu20 - mu02) + mu11 * (mu03 - mu21)) + Xg * (mu02 * (mu20 - mu02) - 2 * mu11_2) + Yg * mu11 * (mu20 + mu02)) / - d; + d; Awy = (beta * (mu21 * (mu02 - mu20) + mu11 * (mu30 - mu12)) + Xg * mu11 * (mu20 + mu02) + Yg * (mu20 * (mu02 - mu20) - 2 * mu11_2)) / - d; + d; Avz = B * Awx - A * Awy; interaction_matrices.resize(1); diff --git a/modules/visual_features/src/visual-feature/vpFeatureMomentAreaNormalized.cpp b/modules/visual_features/src/visual-feature/vpFeatureMomentAreaNormalized.cpp index d976f58084..61c845c788 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureMomentAreaNormalized.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureMomentAreaNormalized.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ #include #ifdef VISP_MOMENTS_COMBINE_MATRICES @@ -50,14 +45,14 @@ #include /*! - Computes interaction matrix for the normalized surface moment. Called - internally. The moment primitives must be computed before calling this. This - feature depends on: - - vpMomentCentered - - vpFeatureMomentCentered - - vpMomentAreaNormalized - - vpFeatureMomentBasic -*/ + * Computes interaction matrix for the normalized surface moment. Called + * internally. The moment primitives must be computed before calling this. This + * feature depends on: + * - vpMomentCentered + * - vpFeatureMomentCentered + * - vpMomentAreaNormalized + * - vpFeatureMomentBasic + */ void vpFeatureMomentAreaNormalized::compute_interaction() { bool found_moment_centered; @@ -66,15 +61,15 @@ void vpFeatureMomentAreaNormalized::compute_interaction() bool found_featuremoment_basic; vpFeatureMomentBasic &featureMomentBasic = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentBasic", found_featuremoment_basic))); + featureMomentsDataBase->get("vpFeatureMomentBasic", found_featuremoment_basic))); const vpMomentCentered &momentCentered = - static_cast(moments.get("vpMomentCentered", found_moment_centered)); + static_cast(moments.get("vpMomentCentered", found_moment_centered)); const vpMomentObject &momentObject = moment->getObject(); const vpMomentAreaNormalized &momentSurfaceNormalized = static_cast( moments.get("vpMomentAreaNormalized", found_moment_surface_normalized)); vpFeatureMomentCentered &featureMomentCentered = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentCentered", found_FeatureMoment_centered))); + featureMomentsDataBase->get("vpFeatureMomentCentered", found_FeatureMoment_centered))); if (!found_FeatureMoment_centered) throw vpException(vpException::notInitialized, "vpFeatureMomentCentered not found"); @@ -92,13 +87,14 @@ void vpFeatureMomentAreaNormalized::compute_interaction() if (momentObject.getType() == vpMomentObject::DISCRETE) { a = momentCentered.get(2, 0) + momentCentered.get(0, 2); La = featureMomentCentered.interaction(2, 0) + featureMomentCentered.interaction(0, 2); - } else { + } + else { a = momentObject.get(0, 0); La = featureMomentBasic.interaction(0, 0); } normalized_multiplier = - (-momentSurfaceNormalized.getDesiredDepth() / (2 * a)) * sqrt(momentSurfaceNormalized.getDesiredArea() / a); + (-momentSurfaceNormalized.getDesiredDepth() / (2 * a)) * sqrt(momentSurfaceNormalized.getDesiredArea() / a); interaction_matrices[0] = normalized_multiplier * La; } @@ -115,13 +111,13 @@ void vpFeatureMomentAreaNormalized::compute_interaction() #include /*! - Computes interaction matrix for the normalized surface moment. Called - internally. The moment primitives must be computed before calling this. This - feature depends on: - - vpMomentCentered - - vpMomentAreaNormalized - - vpMomentGravityCenter -*/ + * Computes interaction matrix for the normalized surface moment. Called + * internally. The moment primitives must be computed before calling this. This + * feature depends on: + * - vpMomentCentered + * - vpMomentAreaNormalized + * - vpMomentGravityCenter + */ void vpFeatureMomentAreaNormalized::compute_interaction() { bool found_moment_centered; @@ -129,9 +125,9 @@ void vpFeatureMomentAreaNormalized::compute_interaction() bool found_moment_gravity; const vpMomentCentered &momentCentered = - static_cast(moments.get("vpMomentCentered", found_moment_centered)); + static_cast(moments.get("vpMomentCentered", found_moment_centered)); const vpMomentGravityCenter &momentGravity = - static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); + static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); const vpMomentObject &momentObject = moment->getObject(); const vpMomentAreaNormalized &momentSurfaceNormalized = static_cast( moments.get("vpMomentAreaNormalized", found_moment_surface_normalized)); @@ -178,7 +174,8 @@ void vpFeatureMomentAreaNormalized::compute_interaction() Anvz = -An * C + B * Anwx - A * Anwy; - } else { + } + else { Anvx = A * An / 2.; Anvy = B * An / 2.; Anvz = -An * C - (3. / 2.) * A * Xn - (3. / 2.) * B * Yn; diff --git a/modules/visual_features/src/visual-feature/vpFeatureMomentBasic.cpp b/modules/visual_features/src/visual-feature/vpFeatureMomentBasic.cpp index 29d264472e..ff1bd231e8 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureMomentBasic.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureMomentBasic.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ #include #include @@ -42,24 +37,22 @@ #include #include /*! - Default constructor. - \param data_base : Database of moment primitives. - \param A_ : First plane coefficient for a plane equation of the following - type Ax+By+C=1/Z. \param B_ : Second plane coefficient for a plane equation - of the following type Ax+By+C=1/Z. \param C_ : Third plane coefficient for a - plane equation of the following type Ax+By+C=1/Z. \param featureMoments : - Database of features. -*/ + * Default constructor. + * \param data_base : Database of moment primitives. + * \param A_ : First plane coefficient for a plane equation of the following type Ax+By+C=1/Z. + * \param B_ : Second plane coefficient for a plane equation of the following type Ax+By+C=1/Z. + * \param C_ : Third plane coefficient for a plane equation of the following type Ax+By+C=1/Z. + * \param featureMoments : Database of features. + */ vpFeatureMomentBasic::vpFeatureMomentBasic(vpMomentDatabase &data_base, double A_, double B_, double C_, vpFeatureMomentDatabase *featureMoments) : vpFeatureMoment(data_base, A_, B_, C_, featureMoments), order(0) -{ -} +{ } /*! - Computes interaction matrix for basic moment. Called internally. - The moment primitives must be computed before calling this. -*/ + * Computes interaction matrix for basic moment. Called internally. + * The moment primitives must be computed before calling this. + */ void vpFeatureMomentBasic::compute_interaction() { int delta; @@ -70,7 +63,8 @@ void vpFeatureMomentBasic::compute_interaction() i->resize(1, 6); if (m.getType() == vpMomentObject::DISCRETE) { delta = 0; - } else { + } + else { delta = 1; } @@ -85,7 +79,7 @@ void vpFeatureMomentBasic::compute_interaction() interaction_matrices[0][0][VX] = -delta * A * m.get(0, 0); interaction_matrices[0][0][VY] = -delta * B * m.get(0, 0); interaction_matrices[0][0][VZ] = - 3 * delta * (A * m.get(1, 0) + B * m.get(0, 1) + C * m.get(0, 0)) - delta * C * m.get(0, 0); + 3 * delta * (A * m.get(1, 0) + B * m.get(0, 1) + C * m.get(0, 0)) - delta * C * m.get(0, 0); interaction_matrices[0][0][WX] = 3 * delta * m.get(0, 1); interaction_matrices[0][0][WY] = -3 * delta * m.get(1, 0); @@ -99,9 +93,9 @@ void vpFeatureMomentBasic::compute_interaction() interaction_matrices[j_ * order][0][VX] = -delta * A * m.get(0, j_); interaction_matrices[j_ * order][0][VY] = - -j * (A * m.get(1, jm1_) + B * m.get(0, j_) + C * m.get(0, jm1_)) - delta * B * m.get(0, j_); + -j * (A * m.get(1, jm1_) + B * m.get(0, j_) + C * m.get(0, jm1_)) - delta * B * m.get(0, j_); interaction_matrices[j_ * order][0][VZ] = - (j + 3 * delta) * (A * m.get(1, j_) + B * m.get(0, jp1_) + C * m.get(0, j_)) - delta * C * m.get(0, j_); + (j + 3 * delta) * (A * m.get(1, j_) + B * m.get(0, jp1_) + C * m.get(0, j_)) - delta * C * m.get(0, j_); interaction_matrices[j_ * order][0][WX] = (j + 3 * delta) * m.get(0, jp1_) + j * m.get(0, jm1_); interaction_matrices[j_ * order][0][WY] = -(j + 3 * delta) * m.get(1, j_); @@ -115,10 +109,10 @@ void vpFeatureMomentBasic::compute_interaction() unsigned int ip1_ = i_ + 1; interaction_matrices[i_][0][VX] = - -i * (A * m.get(i_, 0) + B * m.get(im1_, 1) + C * m.get(im1_, 0)) - delta * A * m.get(i_, 0); + -i * (A * m.get(i_, 0) + B * m.get(im1_, 1) + C * m.get(im1_, 0)) - delta * A * m.get(i_, 0); interaction_matrices[i_][0][VY] = -delta * B * m.get(i_, 0); interaction_matrices[i_][0][VZ] = - (i + 3 * delta) * (A * m.get(ip1_, 0) + B * m.get(i_, 1) + C * m.get(i_, 0)) - delta * C * m.get(i_, 0); + (i + 3 * delta) * (A * m.get(ip1_, 0) + B * m.get(i_, 1) + C * m.get(i_, 0)) - delta * C * m.get(i_, 0); interaction_matrices[i_][0][WX] = (i + 3 * delta) * m.get(i_, 1); interaction_matrices[i_][0][WY] = -(i + 3 * delta) * m.get(ip1_, 0) - i * m.get(im1_, 0); @@ -136,12 +130,12 @@ void vpFeatureMomentBasic::compute_interaction() unsigned int ip1_ = i_ + 1; interaction_matrices[j_ * order + i_][0][VX] = - -i * (A * m.get(i_, j_) + B * m.get(im1_, jp1_) + C * m.get(im1_, j_)) - delta * A * m.get(i_, j_); + -i * (A * m.get(i_, j_) + B * m.get(im1_, jp1_) + C * m.get(im1_, j_)) - delta * A * m.get(i_, j_); interaction_matrices[j_ * order + i_][0][VY] = - -j * (A * m.get(ip1_, jm1_) + B * m.get(i_, j_) + C * m.get(i_, jm1_)) - delta * B * m.get(i_, j_); + -j * (A * m.get(ip1_, jm1_) + B * m.get(i_, j_) + C * m.get(i_, jm1_)) - delta * B * m.get(i_, j_); interaction_matrices[j_ * order + i_][0][VZ] = - (i + j + 3 * delta) * (A * m.get(ip1_, j_) + B * m.get(i_, jp1_) + C * m.get(i_, j_)) - - delta * C * m.get(i_, j_); + (i + j + 3 * delta) * (A * m.get(ip1_, j_) + B * m.get(i_, jp1_) + C * m.get(i_, j_)) - + delta * C * m.get(i_, j_); interaction_matrices[j_ * order + i_][0][WX] = (i + j + 3 * delta) * m.get(i_, jp1_) + j * m.get(i_, jm1_); interaction_matrices[j_ * order + i_][0][WY] = -(i + j + 3 * delta) * m.get(ip1_, j_) - i * m.get(im1_, j_); @@ -151,11 +145,11 @@ void vpFeatureMomentBasic::compute_interaction() } /*! -Interaction matrix corresponding to \f$ m_{ij} \f$ moment. -\param select_one : first index (i). -\param select_two : second index (j). -\return Interaction matrix \f$ L_{m_{ij}} \f$ corresponding to the moment. -*/ + * Interaction matrix corresponding to \f$ m_{ij} \f$ moment. + * \param select_one : first index (i). + * \param select_two : second index (j). + * \return Interaction matrix \f$ L_{m_{ij}} \f$ corresponding to the moment. + */ vpMatrix vpFeatureMomentBasic::interaction(unsigned int select_one, unsigned int select_two) const { if (select_one + select_two > moment->getObject().getOrder()) diff --git a/modules/visual_features/src/visual-feature/vpFeatureMomentCInvariant.cpp b/modules/visual_features/src/visual-feature/vpFeatureMomentCInvariant.cpp index 40bc326afb..97769c5d7a 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureMomentCInvariant.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureMomentCInvariant.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ #include #ifdef VISP_MOMENTS_COMBINE_MATRICES #include @@ -49,14 +44,14 @@ #include /*! - Computes interaction matrix for space-scale-rotation invariants. Called - internally. The moment primitives must be computed before calling this. This - feature depends on: - - vpMomentCentered - - vpFeatureMomentCentered - - vpMomentCInvariant - - vpFeatureMomentBasic -*/ + * Computes interaction matrix for space-scale-rotation invariants. Called + * internally. The moment primitives must be computed before calling this. This + * feature depends on: + * - vpMomentCentered + * - vpFeatureMomentCentered + * - vpMomentCInvariant + * - vpFeatureMomentBasic + */ void vpFeatureMomentCInvariant::compute_interaction() { std::vector LI(16); @@ -67,14 +62,14 @@ void vpFeatureMomentCInvariant::compute_interaction() const vpMomentObject &momentObject = moment->getObject(); const vpMomentCentered &momentCentered = - (static_cast(moments.get("vpMomentCentered", found_moment_centered))); + (static_cast(moments.get("vpMomentCentered", found_moment_centered))); const vpMomentCInvariant &momentCInvariant = - (static_cast(moments.get("vpMomentCInvariant", found_moment_cinvariant))); + (static_cast(moments.get("vpMomentCInvariant", found_moment_cinvariant))); vpFeatureMomentCentered &featureMomentCentered = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentCentered", found_FeatureMoment_centered))); + featureMomentsDataBase->get("vpFeatureMomentCentered", found_FeatureMoment_centered))); vpFeatureMomentBasic &featureMomentBasic = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentBasic", found_featuremoment_basic))); + featureMomentsDataBase->get("vpFeatureMomentBasic", found_featuremoment_basic))); if (!found_featuremoment_basic) throw vpException(vpException::notInitialized, "vpFeatureMomentBasic not found"); @@ -91,244 +86,244 @@ void vpFeatureMomentCInvariant::compute_interaction() zeros[0][i] = 0; LI[1] = -featureMomentCentered.interaction(2, 0) * momentCentered.get(0, 2) - - momentCentered.get(2, 0) * featureMomentCentered.interaction(0, 2) + - 2 * momentCentered.get(1, 1) * featureMomentCentered.interaction(1, 1); + momentCentered.get(2, 0) * featureMomentCentered.interaction(0, 2) + + 2 * momentCentered.get(1, 1) * featureMomentCentered.interaction(1, 1); LI[2] = 2 * (momentCentered.get(2, 0) - momentCentered.get(0, 2)) * - (featureMomentCentered.interaction(2, 0) - featureMomentCentered.interaction(0, 2)) + - 8 * momentCentered.get(1, 1) * featureMomentCentered.interaction(1, 1); + (featureMomentCentered.interaction(2, 0) - featureMomentCentered.interaction(0, 2)) + + 8 * momentCentered.get(1, 1) * featureMomentCentered.interaction(1, 1); LI[3] = 2 * (momentCentered.get(3, 0) - 3 * momentCentered.get(1, 2)) * - (featureMomentCentered.interaction(3, 0) - 3 * featureMomentCentered.interaction(1, 2)) + - 2 * (3 * momentCentered.get(2, 1) - momentCentered.get(0, 3)) * - (3 * featureMomentCentered.interaction(2, 1) - featureMomentCentered.interaction(0, 3)); + (featureMomentCentered.interaction(3, 0) - 3 * featureMomentCentered.interaction(1, 2)) + + 2 * (3 * momentCentered.get(2, 1) - momentCentered.get(0, 3)) * + (3 * featureMomentCentered.interaction(2, 1) - featureMomentCentered.interaction(0, 3)); LI[4] = 2 * (momentCentered.get(3, 0) + momentCentered.get(1, 2)) * - (featureMomentCentered.interaction(3, 0) + featureMomentCentered.interaction(1, 2)) + - 2 * (momentCentered.get(2, 1) + momentCentered.get(0, 3)) * - (featureMomentCentered.interaction(2, 1) + featureMomentCentered.interaction(0, 3)); + (featureMomentCentered.interaction(3, 0) + featureMomentCentered.interaction(1, 2)) + + 2 * (momentCentered.get(2, 1) + momentCentered.get(0, 3)) * + (featureMomentCentered.interaction(2, 1) + featureMomentCentered.interaction(0, 3)); LI[5] = -2 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(3, 0) + - 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * - featureMomentCentered.interaction(3, 0) - - 4 * pow(momentCentered.get(1, 2), 3) * featureMomentCentered.interaction(3, 0) + - 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(2, 1) - - 12 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(2, 1) + - 6 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(2, 1) + - 6 * momentCentered.get(0, 3) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(1, 2) - - 12 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(1, 2) + - 6 * momentCentered.get(1, 2) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(1, 2) - - 2 * momentCentered.get(0, 3) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(0, 3) + - 6 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(0, 3) - - 4 * pow(momentCentered.get(2, 1), 3) * featureMomentCentered.interaction(0, 3); + 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * + featureMomentCentered.interaction(3, 0) - + 4 * pow(momentCentered.get(1, 2), 3) * featureMomentCentered.interaction(3, 0) + + 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(2, 1) - + 12 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(2, 1) + + 6 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(2, 1) + + 6 * momentCentered.get(0, 3) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(1, 2) - + 12 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(1, 2) + + 6 * momentCentered.get(1, 2) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(1, 2) - + 2 * momentCentered.get(0, 3) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(0, 3) + + 6 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(0, 3) - + 4 * pow(momentCentered.get(2, 1), 3) * featureMomentCentered.interaction(0, 3); LI[6] = 6 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(3, 0) + - 4 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(3, 0) - - 6 * momentCentered.get(1, 2) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(3, 0) - - 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * - featureMomentCentered.interaction(3, 0) + - 2 * pow(momentCentered.get(1, 2), 3) * featureMomentCentered.interaction(3, 0) - - 12 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(2, 1) - - 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(2, 1) + - 12 * pow(momentCentered.get(2, 1), 3) * featureMomentCentered.interaction(2, 1) + - 6 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(2, 1) + - 6 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(2, 1) - - 6 * momentCentered.get(0, 3) * pow(momentCentered.get(1, 2), 2) * featureMomentCentered.interaction(2, 1) + - 6 * momentCentered.get(1, 2) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(1, 2) - - 6 * pow(momentCentered.get(2, 1), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(1, 2) - - 6 * momentCentered.get(0, 3) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(1, 2) + - 6 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(1, 2) - - 12 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * - featureMomentCentered.interaction(1, 2) + - 12 * pow(momentCentered.get(1, 2), 3) * featureMomentCentered.interaction(1, 2) + - 4 * momentCentered.get(0, 3) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(0, 3) - - 6 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(0, 3) + - 2 * pow(momentCentered.get(2, 1), 3) * featureMomentCentered.interaction(0, 3) + - 6 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(0, 3) - - 6 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(0, 3); + 4 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(3, 0) - + 6 * momentCentered.get(1, 2) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(3, 0) - + 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * + featureMomentCentered.interaction(3, 0) + + 2 * pow(momentCentered.get(1, 2), 3) * featureMomentCentered.interaction(3, 0) - + 12 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(2, 1) - + 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(2, 1) + + 12 * pow(momentCentered.get(2, 1), 3) * featureMomentCentered.interaction(2, 1) + + 6 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(2, 1) + + 6 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(2, 1) - + 6 * momentCentered.get(0, 3) * pow(momentCentered.get(1, 2), 2) * featureMomentCentered.interaction(2, 1) + + 6 * momentCentered.get(1, 2) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(1, 2) - + 6 * pow(momentCentered.get(2, 1), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(1, 2) - + 6 * momentCentered.get(0, 3) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(1, 2) + + 6 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(1, 2) - + 12 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * + featureMomentCentered.interaction(1, 2) + + 12 * pow(momentCentered.get(1, 2), 3) * featureMomentCentered.interaction(1, 2) + + 4 * momentCentered.get(0, 3) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(0, 3) - + 6 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(0, 3) + + 2 * pow(momentCentered.get(2, 1), 3) * featureMomentCentered.interaction(0, 3) + + 6 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(0, 3) - + 6 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(0, 3); LI[7] = -3 * momentCentered.get(0, 3) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(3, 0) + - 6 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(3, 0) - - 2 * pow(momentCentered.get(2, 1), 3) * featureMomentCentered.interaction(3, 0) - - 3 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(3, 0) + - 6 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(3, 0) + - 3 * momentCentered.get(0, 3) * pow(momentCentered.get(1, 2), 2) * featureMomentCentered.interaction(3, 0) + - pow(momentCentered.get(0, 3), 3) * featureMomentCentered.interaction(3, 0) + - 3 * momentCentered.get(1, 2) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(2, 1) - - 6 * pow(momentCentered.get(2, 1), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(2, 1) - - 6 * momentCentered.get(0, 3) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(2, 1) + - 6 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(2, 1) - - 9 * momentCentered.get(1, 2) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(2, 1) - - 12 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * - featureMomentCentered.interaction(2, 1) + - 3 * pow(momentCentered.get(1, 2), 3) * featureMomentCentered.interaction(2, 1) - - 3 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(1, 2) * featureMomentCentered.interaction(2, 1) + - 3 * momentCentered.get(2, 1) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(1, 2) + - 12 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(1, 2) + - 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(1, 2) - - 3 * pow(momentCentered.get(2, 1), 3) * featureMomentCentered.interaction(1, 2) - - 6 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(1, 2) + - 9 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(1, 2) - - 3 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(1, 2) + - 6 * momentCentered.get(0, 3) * pow(momentCentered.get(1, 2), 2) * featureMomentCentered.interaction(1, 2) - - pow(momentCentered.get(3, 0), 3) * featureMomentCentered.interaction(0, 3) - - 3 * pow(momentCentered.get(2, 1), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(0, 3) + - 3 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(0, 3) + - 3 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(0, 3) - - 6 * momentCentered.get(1, 2) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(0, 3) - - 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * - featureMomentCentered.interaction(0, 3) + - 2 * pow(momentCentered.get(1, 2), 3) * featureMomentCentered.interaction(0, 3); + 6 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(3, 0) - + 2 * pow(momentCentered.get(2, 1), 3) * featureMomentCentered.interaction(3, 0) - + 3 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(3, 0) + + 6 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(3, 0) + + 3 * momentCentered.get(0, 3) * pow(momentCentered.get(1, 2), 2) * featureMomentCentered.interaction(3, 0) + + pow(momentCentered.get(0, 3), 3) * featureMomentCentered.interaction(3, 0) + + 3 * momentCentered.get(1, 2) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(2, 1) - + 6 * pow(momentCentered.get(2, 1), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(2, 1) - + 6 * momentCentered.get(0, 3) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(2, 1) + + 6 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(2, 1) - + 9 * momentCentered.get(1, 2) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(2, 1) - + 12 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * + featureMomentCentered.interaction(2, 1) + + 3 * pow(momentCentered.get(1, 2), 3) * featureMomentCentered.interaction(2, 1) - + 3 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(1, 2) * featureMomentCentered.interaction(2, 1) + + 3 * momentCentered.get(2, 1) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(1, 2) + + 12 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(1, 2) + + 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(1, 2) - + 3 * pow(momentCentered.get(2, 1), 3) * featureMomentCentered.interaction(1, 2) - + 6 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(1, 2) + + 9 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(1, 2) - + 3 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(1, 2) + + 6 * momentCentered.get(0, 3) * pow(momentCentered.get(1, 2), 2) * featureMomentCentered.interaction(1, 2) - + pow(momentCentered.get(3, 0), 3) * featureMomentCentered.interaction(0, 3) - + 3 * pow(momentCentered.get(2, 1), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(0, 3) + + 3 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(0, 3) + + 3 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(0, 3) - + 6 * momentCentered.get(1, 2) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(0, 3) - + 6 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * + featureMomentCentered.interaction(0, 3) + + 2 * pow(momentCentered.get(1, 2), 3) * featureMomentCentered.interaction(0, 3); LI[8] = -2 * momentCentered.get(3, 0) * momentCentered.get(2, 1) * momentCentered.get(0, 3) * - featureMomentCentered.interaction(3, 0) + - 6 * momentCentered.get(3, 0) * momentCentered.get(2, 1) * momentCentered.get(1, 2) * - featureMomentCentered.interaction(2, 1) - - 6 * featureMomentCentered.interaction(3, 0) * momentCentered.get(2, 1) * momentCentered.get(0, 3) * - momentCentered.get(1, 2) - - 6 * momentCentered.get(3, 0) * featureMomentCentered.interaction(2, 1) * momentCentered.get(0, 3) * - momentCentered.get(1, 2) - - 6 * momentCentered.get(3, 0) * momentCentered.get(2, 1) * featureMomentCentered.interaction(0, 3) * - momentCentered.get(1, 2) - - 6 * momentCentered.get(3, 0) * momentCentered.get(2, 1) * momentCentered.get(0, 3) * - featureMomentCentered.interaction(1, 2) - - 2 * momentCentered.get(3, 0) * momentCentered.get(1, 2) * momentCentered.get(0, 3) * - featureMomentCentered.interaction(0, 3) + - 6 * momentCentered.get(2, 1) * momentCentered.get(1, 2) * momentCentered.get(0, 3) * - featureMomentCentered.interaction(1, 2) - - pow((double)momentCentered.get(3, 0), (double)3) * featureMomentCentered.interaction(1, 2) + - 3 * featureMomentCentered.interaction(3, 0) * pow((double)momentCentered.get(1, 2), (double)3) + - 6 * pow((double)momentCentered.get(2, 1), (double)3) * featureMomentCentered.interaction(0, 3) - - featureMomentCentered.interaction(2, 1) * pow((double)momentCentered.get(0, 3), (double)3) + - 3 * featureMomentCentered.interaction(2, 1) * pow((double)momentCentered.get(1, 2), (double)2) * - momentCentered.get(0, 3) + - 18 * pow((double)momentCentered.get(2, 1), (double)2) * momentCentered.get(0, 3) * - featureMomentCentered.interaction(2, 1) - - pow((double)momentCentered.get(3, 0), (double)2) * featureMomentCentered.interaction(2, 1) * - momentCentered.get(0, 3) + - 9 * momentCentered.get(3, 0) * pow((double)momentCentered.get(1, 2), (double)2) * - featureMomentCentered.interaction(1, 2) - - 4 * pow((double)momentCentered.get(3, 0), (double)2) * momentCentered.get(1, 2) * - featureMomentCentered.interaction(1, 2) + - 2 * pow((double)momentCentered.get(1, 2), (double)2) * momentCentered.get(0, 3) * - featureMomentCentered.interaction(0, 3) - - 4 * momentCentered.get(3, 0) * pow((double)momentCentered.get(1, 2), (double)2) * - featureMomentCentered.interaction(3, 0) + - 2 * momentCentered.get(1, 2) * pow((double)momentCentered.get(0, 3), (double)2) * - featureMomentCentered.interaction(1, 2) - - 4 * momentCentered.get(2, 1) * pow((double)momentCentered.get(0, 3), (double)2) * - featureMomentCentered.interaction(2, 1) + - 3 * momentCentered.get(3, 0) * pow((double)momentCentered.get(2, 1), (double)2) * - featureMomentCentered.interaction(1, 2) - - 3 * pow((double)momentCentered.get(3, 0), (double)2) * momentCentered.get(1, 2) * - featureMomentCentered.interaction(3, 0) - - momentCentered.get(3, 0) * featureMomentCentered.interaction(1, 2) * - pow((double)momentCentered.get(0, 3), (double)2) - - 4 * pow((double)momentCentered.get(2, 1), (double)2) * momentCentered.get(0, 3) * - featureMomentCentered.interaction(0, 3) - - 3 * momentCentered.get(2, 1) * pow((double)momentCentered.get(0, 3), (double)2) * - featureMomentCentered.interaction(0, 3) + - 2 * momentCentered.get(3, 0) * pow((double)momentCentered.get(2, 1), (double)2) * - featureMomentCentered.interaction(3, 0) + - 2 * pow((double)momentCentered.get(3, 0), (double)2) * momentCentered.get(2, 1) * - featureMomentCentered.interaction(2, 1) + - 3 * featureMomentCentered.interaction(3, 0) * pow((double)momentCentered.get(2, 1), (double)2) * - momentCentered.get(1, 2) - - pow((double)momentCentered.get(3, 0), (double)2) * momentCentered.get(2, 1) * - featureMomentCentered.interaction(0, 3) + - 3 * momentCentered.get(2, 1) * pow((double)momentCentered.get(1, 2), (double)2) * - featureMomentCentered.interaction(0, 3) - - featureMomentCentered.interaction(3, 0) * momentCentered.get(1, 2) * - pow((double)momentCentered.get(0, 3), (double)2); + featureMomentCentered.interaction(3, 0) + + 6 * momentCentered.get(3, 0) * momentCentered.get(2, 1) * momentCentered.get(1, 2) * + featureMomentCentered.interaction(2, 1) - + 6 * featureMomentCentered.interaction(3, 0) * momentCentered.get(2, 1) * momentCentered.get(0, 3) * + momentCentered.get(1, 2) - + 6 * momentCentered.get(3, 0) * featureMomentCentered.interaction(2, 1) * momentCentered.get(0, 3) * + momentCentered.get(1, 2) - + 6 * momentCentered.get(3, 0) * momentCentered.get(2, 1) * featureMomentCentered.interaction(0, 3) * + momentCentered.get(1, 2) - + 6 * momentCentered.get(3, 0) * momentCentered.get(2, 1) * momentCentered.get(0, 3) * + featureMomentCentered.interaction(1, 2) - + 2 * momentCentered.get(3, 0) * momentCentered.get(1, 2) * momentCentered.get(0, 3) * + featureMomentCentered.interaction(0, 3) + + 6 * momentCentered.get(2, 1) * momentCentered.get(1, 2) * momentCentered.get(0, 3) * + featureMomentCentered.interaction(1, 2) - + pow((double)momentCentered.get(3, 0), (double)3) * featureMomentCentered.interaction(1, 2) + + 3 * featureMomentCentered.interaction(3, 0) * pow((double)momentCentered.get(1, 2), (double)3) + + 6 * pow((double)momentCentered.get(2, 1), (double)3) * featureMomentCentered.interaction(0, 3) - + featureMomentCentered.interaction(2, 1) * pow((double)momentCentered.get(0, 3), (double)3) + + 3 * featureMomentCentered.interaction(2, 1) * pow((double)momentCentered.get(1, 2), (double)2) * + momentCentered.get(0, 3) + + 18 * pow((double)momentCentered.get(2, 1), (double)2) * momentCentered.get(0, 3) * + featureMomentCentered.interaction(2, 1) - + pow((double)momentCentered.get(3, 0), (double)2) * featureMomentCentered.interaction(2, 1) * + momentCentered.get(0, 3) + + 9 * momentCentered.get(3, 0) * pow((double)momentCentered.get(1, 2), (double)2) * + featureMomentCentered.interaction(1, 2) - + 4 * pow((double)momentCentered.get(3, 0), (double)2) * momentCentered.get(1, 2) * + featureMomentCentered.interaction(1, 2) + + 2 * pow((double)momentCentered.get(1, 2), (double)2) * momentCentered.get(0, 3) * + featureMomentCentered.interaction(0, 3) - + 4 * momentCentered.get(3, 0) * pow((double)momentCentered.get(1, 2), (double)2) * + featureMomentCentered.interaction(3, 0) + + 2 * momentCentered.get(1, 2) * pow((double)momentCentered.get(0, 3), (double)2) * + featureMomentCentered.interaction(1, 2) - + 4 * momentCentered.get(2, 1) * pow((double)momentCentered.get(0, 3), (double)2) * + featureMomentCentered.interaction(2, 1) + + 3 * momentCentered.get(3, 0) * pow((double)momentCentered.get(2, 1), (double)2) * + featureMomentCentered.interaction(1, 2) - + 3 * pow((double)momentCentered.get(3, 0), (double)2) * momentCentered.get(1, 2) * + featureMomentCentered.interaction(3, 0) - + momentCentered.get(3, 0) * featureMomentCentered.interaction(1, 2) * + pow((double)momentCentered.get(0, 3), (double)2) - + 4 * pow((double)momentCentered.get(2, 1), (double)2) * momentCentered.get(0, 3) * + featureMomentCentered.interaction(0, 3) - + 3 * momentCentered.get(2, 1) * pow((double)momentCentered.get(0, 3), (double)2) * + featureMomentCentered.interaction(0, 3) + + 2 * momentCentered.get(3, 0) * pow((double)momentCentered.get(2, 1), (double)2) * + featureMomentCentered.interaction(3, 0) + + 2 * pow((double)momentCentered.get(3, 0), (double)2) * momentCentered.get(2, 1) * + featureMomentCentered.interaction(2, 1) + + 3 * featureMomentCentered.interaction(3, 0) * pow((double)momentCentered.get(2, 1), (double)2) * + momentCentered.get(1, 2) - + pow((double)momentCentered.get(3, 0), (double)2) * momentCentered.get(2, 1) * + featureMomentCentered.interaction(0, 3) + + 3 * momentCentered.get(2, 1) * pow((double)momentCentered.get(1, 2), (double)2) * + featureMomentCentered.interaction(0, 3) - + featureMomentCentered.interaction(3, 0) * momentCentered.get(1, 2) * + pow((double)momentCentered.get(0, 3), (double)2); LI[9] = 4 * pow(momentCentered.get(3, 0), 3) * featureMomentCentered.interaction(3, 0) + - 18 * momentCentered.get(1, 2) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(3, 0) + - 12 * momentCentered.get(0, 3) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(3, 0) + - 18 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(3, 0) + - 4 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(3, 0) + - 18 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * - featureMomentCentered.interaction(3, 0) + - 6 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(1, 2) * featureMomentCentered.interaction(3, 0) + - 6 * momentCentered.get(0, 3) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(2, 1) + - 18 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(2, 1) + - 18 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(2, 1) + - 6 * pow(momentCentered.get(0, 3), 3) * featureMomentCentered.interaction(2, 1) + - 6 * pow(momentCentered.get(3, 0), 3) * featureMomentCentered.interaction(1, 2) + - 18 * momentCentered.get(1, 2) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(1, 2) + - 18 * momentCentered.get(0, 3) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(1, 2) + - 6 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(1, 2) + - 6 * momentCentered.get(2, 1) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(0, 3) + - 4 * momentCentered.get(0, 3) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(0, 3) + - 18 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(0, 3) + - 12 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(3, 0) * - featureMomentCentered.interaction(0, 3) + - 18 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(0, 3) + - 18 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(0, 3) + - 4 * pow(momentCentered.get(0, 3), 3) * featureMomentCentered.interaction(0, 3); + 18 * momentCentered.get(1, 2) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(3, 0) + + 12 * momentCentered.get(0, 3) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(3, 0) + + 18 * pow(momentCentered.get(1, 2), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(3, 0) + + 4 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(3, 0) + + 18 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(2, 1) * + featureMomentCentered.interaction(3, 0) + + 6 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(1, 2) * featureMomentCentered.interaction(3, 0) + + 6 * momentCentered.get(0, 3) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(2, 1) + + 18 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(2, 1) + + 18 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(2, 1) + + 6 * pow(momentCentered.get(0, 3), 3) * featureMomentCentered.interaction(2, 1) + + 6 * pow(momentCentered.get(3, 0), 3) * featureMomentCentered.interaction(1, 2) + + 18 * momentCentered.get(1, 2) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(1, 2) + + 18 * momentCentered.get(0, 3) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(1, 2) + + 6 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(3, 0) * featureMomentCentered.interaction(1, 2) + + 6 * momentCentered.get(2, 1) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(0, 3) + + 4 * momentCentered.get(0, 3) * pow(momentCentered.get(3, 0), 2) * featureMomentCentered.interaction(0, 3) + + 18 * momentCentered.get(1, 2) * momentCentered.get(2, 1) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(0, 3) + + 12 * momentCentered.get(0, 3) * momentCentered.get(1, 2) * momentCentered.get(3, 0) * + featureMomentCentered.interaction(0, 3) + + 18 * momentCentered.get(0, 3) * pow(momentCentered.get(2, 1), 2) * featureMomentCentered.interaction(0, 3) + + 18 * pow(momentCentered.get(0, 3), 2) * momentCentered.get(2, 1) * featureMomentCentered.interaction(0, 3) + + 4 * pow(momentCentered.get(0, 3), 3) * featureMomentCentered.interaction(0, 3); LI[10] = featureMomentCentered.interaction(4, 0) * momentCentered.get(0, 4) + - momentCentered.get(4, 0) * featureMomentCentered.interaction(0, 4) - - 4 * featureMomentCentered.interaction(3, 1) * momentCentered.get(1, 3) - - 4 * momentCentered.get(3, 1) * featureMomentCentered.interaction(1, 3) + - 6 * momentCentered.get(2, 2) * featureMomentCentered.interaction(2, 2); + momentCentered.get(4, 0) * featureMomentCentered.interaction(0, 4) - + 4 * featureMomentCentered.interaction(3, 1) * momentCentered.get(1, 3) - + 4 * momentCentered.get(3, 1) * featureMomentCentered.interaction(1, 3) + + 6 * momentCentered.get(2, 2) * featureMomentCentered.interaction(2, 2); LI[11] = -3 * featureMomentCentered.interaction(4, 0) * momentCentered.get(2, 2) - - 3 * momentCentered.get(4, 0) * featureMomentCentered.interaction(2, 2) - - 2 * featureMomentCentered.interaction(4, 0) * momentCentered.get(0, 4) - - 2 * momentCentered.get(4, 0) * featureMomentCentered.interaction(0, 4) + - 6 * momentCentered.get(3, 1) * featureMomentCentered.interaction(3, 1) + - 2 * featureMomentCentered.interaction(3, 1) * momentCentered.get(1, 3) + - 2 * momentCentered.get(3, 1) * featureMomentCentered.interaction(1, 3) - - 3 * featureMomentCentered.interaction(2, 2) * momentCentered.get(0, 4) - - 3 * momentCentered.get(2, 2) * featureMomentCentered.interaction(0, 4) + - 6 * momentCentered.get(1, 3) * featureMomentCentered.interaction(1, 3); + 3 * momentCentered.get(4, 0) * featureMomentCentered.interaction(2, 2) - + 2 * featureMomentCentered.interaction(4, 0) * momentCentered.get(0, 4) - + 2 * momentCentered.get(4, 0) * featureMomentCentered.interaction(0, 4) + + 6 * momentCentered.get(3, 1) * featureMomentCentered.interaction(3, 1) + + 2 * featureMomentCentered.interaction(3, 1) * momentCentered.get(1, 3) + + 2 * momentCentered.get(3, 1) * featureMomentCentered.interaction(1, 3) - + 3 * featureMomentCentered.interaction(2, 2) * momentCentered.get(0, 4) - + 3 * momentCentered.get(2, 2) * featureMomentCentered.interaction(0, 4) + + 6 * momentCentered.get(1, 3) * featureMomentCentered.interaction(1, 3); LI[12] = 6 * momentCentered.get(4, 0) * featureMomentCentered.interaction(4, 0) + - 12 * featureMomentCentered.interaction(4, 0) * momentCentered.get(2, 2) + - 12 * momentCentered.get(4, 0) * featureMomentCentered.interaction(2, 2) + - 2 * featureMomentCentered.interaction(4, 0) * momentCentered.get(0, 4) + - 2 * momentCentered.get(4, 0) * featureMomentCentered.interaction(0, 4) + - 16 * featureMomentCentered.interaction(3, 1) * momentCentered.get(1, 3) + - 16 * momentCentered.get(3, 1) * featureMomentCentered.interaction(1, 3) + - 12 * featureMomentCentered.interaction(2, 2) * momentCentered.get(0, 4) + - 12 * momentCentered.get(2, 2) * featureMomentCentered.interaction(0, 4) + - 6 * momentCentered.get(0, 4) * featureMomentCentered.interaction(0, 4); + 12 * featureMomentCentered.interaction(4, 0) * momentCentered.get(2, 2) + + 12 * momentCentered.get(4, 0) * featureMomentCentered.interaction(2, 2) + + 2 * featureMomentCentered.interaction(4, 0) * momentCentered.get(0, 4) + + 2 * momentCentered.get(4, 0) * featureMomentCentered.interaction(0, 4) + + 16 * featureMomentCentered.interaction(3, 1) * momentCentered.get(1, 3) + + 16 * momentCentered.get(3, 1) * featureMomentCentered.interaction(1, 3) + + 12 * featureMomentCentered.interaction(2, 2) * momentCentered.get(0, 4) + + 12 * momentCentered.get(2, 2) * featureMomentCentered.interaction(0, 4) + + 6 * momentCentered.get(0, 4) * featureMomentCentered.interaction(0, 4); LI[13] = 2 * (momentCentered.get(5, 0) + 2 * momentCentered.get(3, 2) + momentCentered.get(1, 4)) * - (featureMomentCentered.interaction(5, 0) + 2 * featureMomentCentered.interaction(3, 2) + - featureMomentCentered.interaction(1, 4)) + - 2 * (momentCentered.get(0, 5) + 2 * momentCentered.get(2, 3) + momentCentered.get(4, 1)) * - (featureMomentCentered.interaction(0, 5) + 2 * featureMomentCentered.interaction(2, 3) + - featureMomentCentered.interaction(4, 1)); + (featureMomentCentered.interaction(5, 0) + 2 * featureMomentCentered.interaction(3, 2) + + featureMomentCentered.interaction(1, 4)) + + 2 * (momentCentered.get(0, 5) + 2 * momentCentered.get(2, 3) + momentCentered.get(4, 1)) * + (featureMomentCentered.interaction(0, 5) + 2 * featureMomentCentered.interaction(2, 3) + + featureMomentCentered.interaction(4, 1)); LI[14] = 2 * (momentCentered.get(5, 0) - 2 * momentCentered.get(3, 2) - 3 * momentCentered.get(1, 4)) * - (featureMomentCentered.interaction(5, 0) - 2 * featureMomentCentered.interaction(3, 2) - - 3 * featureMomentCentered.interaction(1, 4)) + - 2 * (momentCentered.get(0, 5) - 2 * momentCentered.get(2, 3) - 3 * momentCentered.get(4, 1)) * - (featureMomentCentered.interaction(0, 5) - 2 * featureMomentCentered.interaction(2, 3) - - 3 * featureMomentCentered.interaction(4, 1)); + (featureMomentCentered.interaction(5, 0) - 2 * featureMomentCentered.interaction(3, 2) - + 3 * featureMomentCentered.interaction(1, 4)) + + 2 * (momentCentered.get(0, 5) - 2 * momentCentered.get(2, 3) - 3 * momentCentered.get(4, 1)) * + (featureMomentCentered.interaction(0, 5) - 2 * featureMomentCentered.interaction(2, 3) - + 3 * featureMomentCentered.interaction(4, 1)); LI[15] = 2 * (momentCentered.get(5, 0) - 10 * momentCentered.get(3, 2) + 5 * momentCentered.get(1, 4)) * - (featureMomentCentered.interaction(5, 0) - 10 * featureMomentCentered.interaction(3, 2) + - 5 * featureMomentCentered.interaction(1, 4)) + - 2 * (momentCentered.get(0, 5) - 10 * momentCentered.get(2, 3) + 5 * momentCentered.get(4, 1)) * - (featureMomentCentered.interaction(0, 5) - 10 * featureMomentCentered.interaction(2, 3) + - 5 * featureMomentCentered.interaction(4, 1)); + (featureMomentCentered.interaction(5, 0) - 10 * featureMomentCentered.interaction(3, 2) + + 5 * featureMomentCentered.interaction(1, 4)) + + 2 * (momentCentered.get(0, 5) - 10 * momentCentered.get(2, 3) + 5 * momentCentered.get(4, 1)) * + (featureMomentCentered.interaction(0, 5) - 10 * featureMomentCentered.interaction(2, 3) + + 5 * featureMomentCentered.interaction(4, 1)); double s3 = momentCInvariant.getS(3); double s2 = momentCInvariant.getS(2); @@ -341,18 +336,18 @@ void vpFeatureMomentCInvariant::compute_interaction() vpMatrix Lc2 = featureMomentCentered.interaction(0, 3) - 3 * featureMomentCentered.interaction(2, 1); vpMatrix Ls2 = featureMomentCentered.interaction(3, 0) - 3 * featureMomentCentered.interaction(1, 2); vpMatrix Lc3 = 2 * (momentCentered.get(2, 0) - momentCentered.get(0, 2)) * - (featureMomentCentered.interaction(2, 0) - featureMomentCentered.interaction(0, 2)) - - 8 * momentCentered.get(1, 1) * featureMomentCentered.interaction(1, 1); + (featureMomentCentered.interaction(2, 0) - featureMomentCentered.interaction(0, 2)) - + 8 * momentCentered.get(1, 1) * featureMomentCentered.interaction(1, 1); vpMatrix Ls3 = 4 * featureMomentCentered.interaction(1, 1) * (momentCentered.get(2, 0) - momentCentered.get(0, 2)) + - 4 * momentCentered.get(1, 1) * - (featureMomentCentered.interaction(2, 0) - featureMomentCentered.interaction(0, 2)); + 4 * momentCentered.get(1, 1) * + (featureMomentCentered.interaction(2, 0) - featureMomentCentered.interaction(0, 2)); vpMatrix LI1 = 2 * (momentCentered.get(2, 0) - momentCentered.get(0, 2)) * - (featureMomentCentered.interaction(2, 0) - featureMomentCentered.interaction(0, 2)) + - 8 * momentCentered.get(1, 1) * featureMomentCentered.interaction(1, 1); + (featureMomentCentered.interaction(2, 0) - featureMomentCentered.interaction(0, 2)) + + 8 * momentCentered.get(1, 1) * featureMomentCentered.interaction(1, 1); vpMatrix LI2 = 2 * (momentCentered.get(0, 3) - 3 * momentCentered.get(2, 1)) * - (featureMomentCentered.interaction(0, 3) - 3 * featureMomentCentered.interaction(2, 1)) + - 2 * (momentCentered.get(3, 0) - 3 * momentCentered.get(1, 2)) * - (featureMomentCentered.interaction(3, 0) - 3 * featureMomentCentered.interaction(1, 2)); + (featureMomentCentered.interaction(0, 3) - 3 * featureMomentCentered.interaction(2, 1)) + + 2 * (momentCentered.get(3, 0) - 3 * momentCentered.get(1, 2)) * + (featureMomentCentered.interaction(3, 0) - 3 * featureMomentCentered.interaction(1, 2)); vpMatrix LI3 = featureMomentCentered.interaction(2, 0) + featureMomentCentered.interaction(0, 2); vpMatrix La(1, 6); @@ -360,54 +355,55 @@ void vpFeatureMomentCInvariant::compute_interaction() if (momentObject.getType() == vpMomentObject::DISCRETE) { a = momentCentered.get(2, 0) + momentCentered.get(0, 2); La = (featureMomentCentered.interaction(2, 0) + featureMomentCentered.interaction(0, 2)); - } else { + } + else { a = momentObject.get(0, 0); La = featureMomentBasic.interaction(0, 0); } interaction_matrices.resize(14); interaction_matrices[0] = (1. / (momentCInvariant.getI(2) * momentCInvariant.getI(2))) * - (momentCInvariant.getI(2) * LI[1] - momentCInvariant.getI(1) * LI[2]); + (momentCInvariant.getI(2) * LI[1] - momentCInvariant.getI(1) * LI[2]); interaction_matrices[1] = (1. / (momentCInvariant.getI(4) * momentCInvariant.getI(4))) * - (momentCInvariant.getI(4) * LI[3] - momentCInvariant.getI(3) * LI[4]); + (momentCInvariant.getI(4) * LI[3] - momentCInvariant.getI(3) * LI[4]); interaction_matrices[2] = (1. / (momentCInvariant.getI(6) * momentCInvariant.getI(6))) * - (momentCInvariant.getI(6) * LI[5] - momentCInvariant.getI(5) * LI[6]); + (momentCInvariant.getI(6) * LI[5] - momentCInvariant.getI(5) * LI[6]); interaction_matrices[3] = (1. / (momentCInvariant.getI(6) * momentCInvariant.getI(6))) * - (momentCInvariant.getI(6) * LI[7] - momentCInvariant.getI(7) * LI[6]); + (momentCInvariant.getI(6) * LI[7] - momentCInvariant.getI(7) * LI[6]); interaction_matrices[4] = (1. / (momentCInvariant.getI(6) * momentCInvariant.getI(6))) * - (momentCInvariant.getI(6) * LI[8] - momentCInvariant.getI(8) * LI[6]); + (momentCInvariant.getI(6) * LI[8] - momentCInvariant.getI(8) * LI[6]); interaction_matrices[5] = (1. / (momentCInvariant.getI(6) * momentCInvariant.getI(6))) * - (momentCInvariant.getI(6) * LI[9] - momentCInvariant.getI(9) * LI[6]); + (momentCInvariant.getI(6) * LI[9] - momentCInvariant.getI(9) * LI[6]); interaction_matrices[6] = (1. / (momentCInvariant.getI(10) * momentCInvariant.getI(10))) * - (momentCInvariant.getI(10) * LI[11] - momentCInvariant.getI(11) * LI[10]); + (momentCInvariant.getI(10) * LI[11] - momentCInvariant.getI(11) * LI[10]); interaction_matrices[7] = (1. / (momentCInvariant.getI(10) * momentCInvariant.getI(10))) * - (momentCInvariant.getI(10) * LI[12] - momentCInvariant.getI(12) * LI[10]); + (momentCInvariant.getI(10) * LI[12] - momentCInvariant.getI(12) * LI[10]); interaction_matrices[8] = (1. / (momentCInvariant.getI(15) * momentCInvariant.getI(15))) * - (momentCInvariant.getI(15) * LI[13] - momentCInvariant.getI(13) * LI[15]); + (momentCInvariant.getI(15) * LI[13] - momentCInvariant.getI(13) * LI[15]); interaction_matrices[9] = (1. / (momentCInvariant.getI(15) * momentCInvariant.getI(15))) * - (momentCInvariant.getI(15) * LI[14] - momentCInvariant.getI(14) * LI[15]); + (momentCInvariant.getI(15) * LI[14] - momentCInvariant.getI(14) * LI[15]); interaction_matrices[10] = (Lc2 * c3 + c2 * Lc3 + Ls2 * s3 + s2 * Ls3) * sqrt(a) / I1 * pow(I3, -0.3e1 / 0.2e1) + - (c2 * c3 + s2 * s3) * pow(a, -0.1e1 / 0.2e1) / I1 * pow(I3, -0.3e1 / 0.2e1) * La / 0.2e1 - - (c2 * c3 + s2 * s3) * sqrt(a) * pow(I1, -0.2e1) * pow(I3, -0.3e1 / 0.2e1) * LI1 - - 0.3e1 / 0.2e1 * (c2 * c3 + s2 * s3) * sqrt(a) / I1 * pow(I3, -0.5e1 / 0.2e1) * LI3; + (c2 * c3 + s2 * s3) * pow(a, -0.1e1 / 0.2e1) / I1 * pow(I3, -0.3e1 / 0.2e1) * La / 0.2e1 - + (c2 * c3 + s2 * s3) * sqrt(a) * pow(I1, -0.2e1) * pow(I3, -0.3e1 / 0.2e1) * LI1 - + 0.3e1 / 0.2e1 * (c2 * c3 + s2 * s3) * sqrt(a) / I1 * pow(I3, -0.5e1 / 0.2e1) * LI3; interaction_matrices[11] = (Ls2 * c3 + s2 * Lc3 - Lc2 * s3 - c2 * Ls3) * sqrt(a) / I1 * pow(I3, -0.3e1 / 0.2e1) + - (s2 * c3 - c2 * s3) * pow(a, -0.1e1 / 0.2e1) / I1 * pow(I3, -0.3e1 / 0.2e1) * La / 0.2e1 - - (s2 * c3 - c2 * s3) * sqrt(a) * pow(I1, -0.2e1) * pow(I3, -0.3e1 / 0.2e1) * LI1 - - 0.3e1 / 0.2e1 * (s2 * c3 - c2 * s3) * sqrt(a) / I1 * pow(I3, -0.5e1 / 0.2e1) * LI3; + (s2 * c3 - c2 * s3) * pow(a, -0.1e1 / 0.2e1) / I1 * pow(I3, -0.3e1 / 0.2e1) * La / 0.2e1 - + (s2 * c3 - c2 * s3) * sqrt(a) * pow(I1, -0.2e1) * pow(I3, -0.3e1 / 0.2e1) * LI1 - + 0.3e1 / 0.2e1 * (s2 * c3 - c2 * s3) * sqrt(a) / I1 * pow(I3, -0.5e1 / 0.2e1) * LI3; interaction_matrices[12] = (1 / (I3 * I3)) * LI1 - (2 * I1 / (I3 * I3 * I3)) * LI3; interaction_matrices[13] = - (I2 / (I3 * I3 * I3)) * La + (a / (I3 * I3 * I3)) * LI2 - (3 * a * I2 / (I3 * I3 * I3 * I3)) * LI3; + (I2 / (I3 * I3 * I3)) * La + (a / (I3 * I3 * I3)) * LI2 - (3 * a * I2 / (I3 * I3 * I3 * I3)) * LI3; } #else @@ -425,17 +421,16 @@ void vpFeatureMomentCInvariant::compute_interaction() #include /*! - Computes interaction matrix for space-scale-rotation invariants. Called - internally. The moment primitives must be computed before calling this. This - feature depends on: - - vpMomentCentered - - vpFeatureMomentCentered - - vpMomentCInvariant - - vpFeatureMomentBasic -*/ + * Computes interaction matrix for space-scale-rotation invariants. Called + * internally. The moment primitives must be computed before calling this. This + * feature depends on: + * - vpMomentCentered + * - vpFeatureMomentCentered + * - vpMomentCInvariant + * - vpFeatureMomentBasic + */ void vpFeatureMomentCInvariant::compute_interaction() { - // std::vector LI(16); LI.resize(16); // LI made class member @@ -446,15 +441,15 @@ void vpFeatureMomentCInvariant::compute_interaction() const vpMomentObject &momentObject = moment->getObject(); const vpMomentCentered &momentCentered = - (static_cast(moments.get("vpMomentCentered", found_moment_centered))); + (static_cast(moments.get("vpMomentCentered", found_moment_centered))); const vpMomentCInvariant &momentCInvariant = - (static_cast(moments.get("vpMomentCInvariant", found_moment_cinvariant))); + (static_cast(moments.get("vpMomentCInvariant", found_moment_cinvariant))); vpFeatureMomentCentered &featureMomentCentered = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentCentered", found_FeatureMoment_centered))); + featureMomentsDataBase->get("vpFeatureMomentCentered", found_FeatureMoment_centered))); vpFeatureMomentBasic &featureMomentBasic = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentBasic", found_featuremoment_basic))); + featureMomentsDataBase->get("vpFeatureMomentBasic", found_featuremoment_basic))); if (!found_featuremoment_basic) throw vpException(vpException::notInitialized, "vpFeatureMomentBasic not found"); @@ -523,75 +518,75 @@ void vpFeatureMomentCInvariant::compute_interaction() LI[2] = (-2 * mu20 + 2 * mu02) * Lmu02 + 8 * mu11 * Lmu11 + (2 * mu20 - 2 * mu02) * Lmu20; LI[3] = (-6 * mu21 + 2 * mu03) * Lmu03 + (-6 * mu30 + 18 * mu12) * Lmu12 + (18 * mu21 - 6 * mu03) * Lmu21 + - (2 * mu30 - 6 * mu12) * Lmu30; + (2 * mu30 - 6 * mu12) * Lmu30; LI[4] = (2 * mu21 + 2 * mu03) * Lmu03 + (2 * mu30 + 2 * mu12) * Lmu12 + (2 * mu21 + 2 * mu03) * Lmu21 + - (2 * mu30 + 2 * mu12) * Lmu30; + (2 * mu30 + 2 * mu12) * Lmu30; LI[5] = (-2 * mu30_2 * mu03 + 6 * mu30 * mu21 * mu12 - 4 * mu21_3) * Lmu03 + - (6 * mu30 * mu21 * mu03 - 12 * mu30 * mu12_2 + 6 * mu21_2 * mu12) * Lmu12 + - (6 * mu30 * mu12 * mu03 - 12 * mu21_2 * mu03 + 6 * mu21 * mu12_2) * Lmu21 + - (-2 * mu30 * mu03_2 - 4 * mu12_3 + 6 * mu21 * mu12 * mu03) * Lmu30; + (6 * mu30 * mu21 * mu03 - 12 * mu30 * mu12_2 + 6 * mu21_2 * mu12) * Lmu12 + + (6 * mu30 * mu12 * mu03 - 12 * mu21_2 * mu03 + 6 * mu21 * mu12_2) * Lmu21 + + (-2 * mu30 * mu03_2 - 4 * mu12_3 + 6 * mu21 * mu12 * mu03) * Lmu30; LI[6] = (-6 * mu30 * mu21 * mu12 - 6 * mu21 * mu12_2 + 6 * mu21_2 * mu03 + 2 * mu21_3 + 4 * mu30_2 * mu03) * Lmu03 + - (-6 * mu30 * mu21_2 - 6 * mu30 * mu21 * mu03 + 12 * mu12_3 + 6 * mu30_2 * mu12 - 12 * mu21 * mu12 * mu03 + - 6 * mu30 * mu12_2) * - Lmu12 + - (6 * mu21 * mu03_2 + 6 * mu21_2 * mu03 - 6 * mu30 * mu12 * mu03 + 12 * mu21_3 - 12 * mu30 * mu21 * mu12 - - 6 * mu12_2 * mu03) * - Lmu21 + - (6 * mu30 * mu12_2 + 2 * mu12_3 + 4 * mu30 * mu03_2 - 6 * mu21_2 * mu12 - 6 * mu21 * mu12 * mu03) * Lmu30; + (-6 * mu30 * mu21_2 - 6 * mu30 * mu21 * mu03 + 12 * mu12_3 + 6 * mu30_2 * mu12 - 12 * mu21 * mu12 * mu03 + + 6 * mu30 * mu12_2) * + Lmu12 + + (6 * mu21 * mu03_2 + 6 * mu21_2 * mu03 - 6 * mu30 * mu12 * mu03 + 12 * mu21_3 - 12 * mu30 * mu21 * mu12 - + 6 * mu12_2 * mu03) * + Lmu21 + + (6 * mu30 * mu12_2 + 2 * mu12_3 + 4 * mu30 * mu03_2 - 6 * mu21_2 * mu12 - 6 * mu21 * mu12 * mu03) * Lmu30; LI[7] = (-6 * mu21_2 * mu12 + 3 * mu30 * mu03_2 - mu30_3 - 3 * mu30 * mu21_2 - 6 * mu21 * mu12 * mu03 + 3 * mu30 * mu12_2 + 2 * mu12_3) * - Lmu03 + - (-3 * mu21 * mu03_2 + 12 * mu30 * mu21 * mu12 + 6 * mu30 * mu12 * mu03 + 3 * mu30_2 * mu21 + - 9 * mu21 * mu12_2 - 6 * mu21_2 * mu03 - 3 * mu21_3 + 6 * mu12_2 * mu03) * - Lmu12 + - (3 * mu30_2 * mu12 - 9 * mu21_2 * mu12 - 12 * mu21 * mu12 * mu03 - 6 * mu30 * mu21 * mu03 - - 6 * mu30 * mu21_2 + 6 * mu30 * mu12_2 + 3 * mu12_3 - 3 * mu12 * mu03_2) * - Lmu21 + - (6 * mu21 * mu12_2 + 6 * mu30 * mu21 * mu12 - 3 * mu30_2 * mu03 + 3 * mu12_2 * mu03 - 3 * mu21_2 * mu03 - - 2 * mu21_3 + mu03_3) * - Lmu30; + Lmu03 + + (-3 * mu21 * mu03_2 + 12 * mu30 * mu21 * mu12 + 6 * mu30 * mu12 * mu03 + 3 * mu30_2 * mu21 + + 9 * mu21 * mu12_2 - 6 * mu21_2 * mu03 - 3 * mu21_3 + 6 * mu12_2 * mu03) * + Lmu12 + + (3 * mu30_2 * mu12 - 9 * mu21_2 * mu12 - 12 * mu21 * mu12 * mu03 - 6 * mu30 * mu21 * mu03 - + 6 * mu30 * mu21_2 + 6 * mu30 * mu12_2 + 3 * mu12_3 - 3 * mu12 * mu03_2) * + Lmu21 + + (6 * mu21 * mu12_2 + 6 * mu30 * mu21 * mu12 - 3 * mu30_2 * mu03 + 3 * mu12_2 * mu03 - 3 * mu21_2 * mu03 - + 2 * mu21_3 + mu03_3) * + Lmu30; LI[8] = (6 * mu21_3 - 2 * mu30 * mu12 * mu03 + 2 * mu12_2 * mu03 + 3 * mu21 * mu12_2 - 6 * mu30 * mu21 * mu12 - mu30_2 * mu21 - 4 * mu21_2 * mu03 - 3 * mu21 * mu03_2) * - Lmu03 + - (2 * mu12 * mu03_2 - 4 * mu30_2 * mu12 + 9 * mu30 * mu12_2 - mu30 * mu03_2 - 6 * mu30 * mu21 * mu03 + - 3 * mu30 * mu21_2 + 6 * mu21 * mu12 * mu03 - mu30_3) * - Lmu12 + - (18 * mu21_2 * mu03 + 6 * mu30 * mu21 * mu12 - 4 * mu21 * mu03_2 - mu03_3 - mu30_2 * mu03 - - 6 * mu30 * mu12 * mu03 + 3 * mu12_2 * mu03 + 2 * mu30_2 * mu21) * - Lmu21 + - (-6 * mu21 * mu12 * mu03 - 4 * mu30 * mu12_2 - 2 * mu30 * mu21 * mu03 + 2 * mu30 * mu21_2 + 3 * mu12_3 + - 3 * mu21_2 * mu12 - 3 * mu30_2 * mu12 - mu12 * mu03_2) * - Lmu30; + Lmu03 + + (2 * mu12 * mu03_2 - 4 * mu30_2 * mu12 + 9 * mu30 * mu12_2 - mu30 * mu03_2 - 6 * mu30 * mu21 * mu03 + + 3 * mu30 * mu21_2 + 6 * mu21 * mu12 * mu03 - mu30_3) * + Lmu12 + + (18 * mu21_2 * mu03 + 6 * mu30 * mu21 * mu12 - 4 * mu21 * mu03_2 - mu03_3 - mu30_2 * mu03 - + 6 * mu30 * mu12 * mu03 + 3 * mu12_2 * mu03 + 2 * mu30_2 * mu21) * + Lmu21 + + (-6 * mu21 * mu12 * mu03 - 4 * mu30 * mu12_2 - 2 * mu30 * mu21 * mu03 + 2 * mu30 * mu21_2 + 3 * mu12_3 + + 3 * mu21_2 * mu12 - 3 * mu30_2 * mu12 - mu12 * mu03_2) * + Lmu30; LI[9] = (2 * (2 * mu03 + 3 * mu21)) * (3 * mu03 * mu21 + 3 * mu30 * mu12 + mu30_2 + mu03_2) * Lmu03 + - 6 * mu30 * (3 * mu03 * mu21 + 3 * mu30 * mu12 + mu30_2 + mu03_2) * Lmu12 + - 6 * mu03 * (3 * mu03 * mu21 + 3 * mu30 * mu12 + mu30_2 + mu03_2) * Lmu21 + - (2 * (2 * mu30 + 3 * mu12)) * (3 * mu03 * mu21 + 3 * mu30 * mu12 + mu30_2 + mu03_2) * Lmu30; + 6 * mu30 * (3 * mu03 * mu21 + 3 * mu30 * mu12 + mu30_2 + mu03_2) * Lmu12 + + 6 * mu03 * (3 * mu03 * mu21 + 3 * mu30 * mu12 + mu30_2 + mu03_2) * Lmu21 + + (2 * (2 * mu30 + 3 * mu12)) * (3 * mu03 * mu21 + 3 * mu30 * mu12 + mu30_2 + mu03_2) * Lmu30; LI[10] = Lmu40 * mu04 + mu40 * Lmu04 - 4 * Lmu31 * mu13 - 4 * mu31 * Lmu13 + 6 * mu22 * Lmu22; LI[11] = (-2 * mu40 - 3 * mu22) * Lmu04 + (2 * mu31 + 6 * mu13) * Lmu13 + (-3 * mu04 - 3 * mu40) * Lmu22 + - (2 * mu13 + 6 * mu31) * Lmu31 + (-3 * mu22 - 2 * mu04) * Lmu40; + (2 * mu13 + 6 * mu31) * Lmu31 + (-3 * mu22 - 2 * mu04) * Lmu40; LI[12] = (2 * mu40 + 12 * mu22 + 6 * mu04) * Lmu04 + 16 * mu31 * Lmu13 + (12 * mu40 + 12 * mu04) * Lmu22 + - 16 * Lmu31 * mu13 + (6 * mu40 + 12 * mu22 + 2 * mu04) * Lmu40; + 16 * Lmu31 * mu13 + (6 * mu40 + 12 * mu22 + 2 * mu04) * Lmu40; LI[13] = (2 * mu05 + 4 * mu23 + 2 * mu41) * Lmu05 + (2 * mu50 + 4 * mu32 + 2 * mu14) * Lmu14 + - (4 * mu05 + 8 * mu23 + 4 * mu41) * Lmu23 + (4 * mu50 + 8 * mu32 + 4 * mu14) * Lmu32 + - (2 * mu05 + 4 * mu23 + 2 * mu41) * Lmu41 + (2 * mu50 + 4 * mu32 + 2 * mu14) * Lmu50; + (4 * mu05 + 8 * mu23 + 4 * mu41) * Lmu23 + (4 * mu50 + 8 * mu32 + 4 * mu14) * Lmu32 + + (2 * mu05 + 4 * mu23 + 2 * mu41) * Lmu41 + (2 * mu50 + 4 * mu32 + 2 * mu14) * Lmu50; LI[14] = (2 * mu05 - 4 * mu23 - 6 * mu41) * Lmu05 + (-6 * mu50 + 12 * mu32 + 18 * mu14) * Lmu14 + - (-4 * mu05 + 8 * mu23 + 12 * mu41) * Lmu23 + (-4 * mu50 + 8 * mu32 + 12 * mu14) * Lmu32 + - (-6 * mu05 + 12 * mu23 + 18 * mu41) * Lmu41 + (2 * mu50 - 4 * mu32 - 6 * mu14) * Lmu50; + (-4 * mu05 + 8 * mu23 + 12 * mu41) * Lmu23 + (-4 * mu50 + 8 * mu32 + 12 * mu14) * Lmu32 + + (-6 * mu05 + 12 * mu23 + 18 * mu41) * Lmu41 + (2 * mu50 - 4 * mu32 - 6 * mu14) * Lmu50; LI[15] = (2 * mu05 - 20 * mu23 + 10 * mu41) * Lmu05 + (10 * mu50 - 100 * mu32 + 50 * mu14) * Lmu14 + - (-20 * mu05 + 200 * mu23 - 100 * mu41) * Lmu23 + (-20 * mu50 + 200 * mu32 - 100 * mu14) * Lmu32 + - (10 * mu05 - 100 * mu23 + 50 * mu41) * Lmu41 + (2 * mu50 - 20 * mu32 + 10 * mu14) * Lmu50; + (-20 * mu05 + 200 * mu23 - 100 * mu41) * Lmu23 + (-20 * mu50 + 200 * mu32 - 100 * mu14) * Lmu32 + + (10 * mu05 - 100 * mu23 + 50 * mu41) * Lmu41 + (2 * mu50 - 20 * mu32 + 10 * mu14) * Lmu50; double s3 = momentCInvariant.getS(3); double s2 = momentCInvariant.getS(2); @@ -607,9 +602,9 @@ void vpFeatureMomentCInvariant::compute_interaction() vpMatrix Lc2 = Lmu03 - 3 * Lmu21; vpMatrix Ls2 = Lmu30 - 3 * Lmu12; vpMatrix Lc3 = 2 * (mu20__mu02) * (Lmu20__Lmu02)-8. * mu11 * Lmu11; - vpMatrix Ls3 = 4 * Lmu11 * (mu20__mu02) + 4 * mu11 * (Lmu20__Lmu02); - vpMatrix LI1 = 2 * (mu20__mu02) * (Lmu20__Lmu02) + 8 * mu11 * Lmu11; - vpMatrix LI2 = 2 * (mu03 - 3 * mu21) * (Lc2) + 2 * (mu30 - 3 * mu12) * (Ls2); + vpMatrix Ls3 = 4 * Lmu11 * (mu20__mu02)+4 * mu11 * (Lmu20__Lmu02); + vpMatrix LI1 = 2 * (mu20__mu02) * (Lmu20__Lmu02)+8 * mu11 * Lmu11; + vpMatrix LI2 = 2 * (mu03 - 3 * mu21) * (Lc2)+2 * (mu30 - 3 * mu12) * (Ls2); vpMatrix LI3 = Lmu20 + Lmu02; vpMatrix La(1, 6); @@ -617,7 +612,8 @@ void vpFeatureMomentCInvariant::compute_interaction() if (momentObject.getType() == vpMomentObject::DISCRETE) { a = momentCentered.get(2, 0) + momentCentered.get(0, 2); La = (featureMomentCentered.interaction(2, 0) + featureMomentCentered.interaction(0, 2)); - } else { + } + else { a = momentObject.get(0, 0); La = featureMomentBasic.interaction(0, 0); } @@ -630,55 +626,55 @@ void vpFeatureMomentCInvariant::compute_interaction() */ interaction_matrices[0] = (1. / (momentCInvariant.getI(2) * momentCInvariant.getI(2))) * - (momentCInvariant.getI(2) * LI[1] - momentCInvariant.getI(1) * LI[2]); + (momentCInvariant.getI(2) * LI[1] - momentCInvariant.getI(1) * LI[2]); interaction_matrices[1] = (1. / (momentCInvariant.getI(4) * momentCInvariant.getI(4))) * - (momentCInvariant.getI(4) * LI[3] - momentCInvariant.getI(3) * LI[4]); + (momentCInvariant.getI(4) * LI[3] - momentCInvariant.getI(3) * LI[4]); interaction_matrices[2] = (1. / (momentCInvariant.getI(6) * momentCInvariant.getI(6))) * - (momentCInvariant.getI(6) * LI[5] - momentCInvariant.getI(5) * LI[6]); + (momentCInvariant.getI(6) * LI[5] - momentCInvariant.getI(5) * LI[6]); interaction_matrices[3] = (1. / (momentCInvariant.getI(6) * momentCInvariant.getI(6))) * - (momentCInvariant.getI(6) * LI[7] - momentCInvariant.getI(7) * LI[6]); + (momentCInvariant.getI(6) * LI[7] - momentCInvariant.getI(7) * LI[6]); interaction_matrices[4] = (1. / (momentCInvariant.getI(6) * momentCInvariant.getI(6))) * - (momentCInvariant.getI(6) * LI[8] - momentCInvariant.getI(8) * LI[6]); + (momentCInvariant.getI(6) * LI[8] - momentCInvariant.getI(8) * LI[6]); interaction_matrices[5] = (1. / (momentCInvariant.getI(6) * momentCInvariant.getI(6))) * - (momentCInvariant.getI(6) * LI[9] - momentCInvariant.getI(9) * LI[6]); + (momentCInvariant.getI(6) * LI[9] - momentCInvariant.getI(9) * LI[6]); interaction_matrices[6] = (1. / (momentCInvariant.getI(10) * momentCInvariant.getI(10))) * - (momentCInvariant.getI(10) * LI[11] - momentCInvariant.getI(11) * LI[10]); + (momentCInvariant.getI(10) * LI[11] - momentCInvariant.getI(11) * LI[10]); interaction_matrices[7] = (1. / (momentCInvariant.getI(10) * momentCInvariant.getI(10))) * - (momentCInvariant.getI(10) * LI[12] - momentCInvariant.getI(12) * LI[10]); + (momentCInvariant.getI(10) * LI[12] - momentCInvariant.getI(12) * LI[10]); interaction_matrices[8] = (1. / (momentCInvariant.getI(15) * momentCInvariant.getI(15))) * - (momentCInvariant.getI(15) * LI[13] - momentCInvariant.getI(13) * LI[15]); + (momentCInvariant.getI(15) * LI[13] - momentCInvariant.getI(13) * LI[15]); interaction_matrices[9] = (1. / (momentCInvariant.getI(15) * momentCInvariant.getI(15))) * - (momentCInvariant.getI(15) * LI[14] - momentCInvariant.getI(14) * LI[15]); + (momentCInvariant.getI(15) * LI[14] - momentCInvariant.getI(14) * LI[15]); interaction_matrices[10] = (Lc2 * c3 + c2 * Lc3 + Ls2 * s3 + s2 * Ls3) * sqrt(a) / I1 * pow(I3, -0.3e1 / 0.2e1) + - (c2 * c3 + s2 * s3) * pow(a, -0.1e1 / 0.2e1) / I1 * pow(I3, -0.3e1 / 0.2e1) * La / 0.2e1 - - (c2 * c3 + s2 * s3) * sqrt(a) * pow(I1, -0.2e1) * pow(I3, -0.3e1 / 0.2e1) * LI1 - - 0.3e1 / 0.2e1 * (c2 * c3 + s2 * s3) * sqrt(a) / I1 * pow(I3, -0.5e1 / 0.2e1) * LI3; + (c2 * c3 + s2 * s3) * pow(a, -0.1e1 / 0.2e1) / I1 * pow(I3, -0.3e1 / 0.2e1) * La / 0.2e1 - + (c2 * c3 + s2 * s3) * sqrt(a) * pow(I1, -0.2e1) * pow(I3, -0.3e1 / 0.2e1) * LI1 - + 0.3e1 / 0.2e1 * (c2 * c3 + s2 * s3) * sqrt(a) / I1 * pow(I3, -0.5e1 / 0.2e1) * LI3; interaction_matrices[11] = (Ls2 * c3 + s2 * Lc3 - Lc2 * s3 - c2 * Ls3) * sqrt(a) / I1 * pow(I3, -0.3e1 / 0.2e1) + - (s2 * c3 - c2 * s3) * pow(a, -0.1e1 / 0.2e1) / I1 * pow(I3, -0.3e1 / 0.2e1) * La / 0.2e1 - - (s2 * c3 - c2 * s3) * sqrt(a) * pow(I1, -0.2e1) * pow(I3, -0.3e1 / 0.2e1) * LI1 - - 0.3e1 / 0.2e1 * (s2 * c3 - c2 * s3) * sqrt(a) / I1 * pow(I3, -0.5e1 / 0.2e1) * LI3; + (s2 * c3 - c2 * s3) * pow(a, -0.1e1 / 0.2e1) / I1 * pow(I3, -0.3e1 / 0.2e1) * La / 0.2e1 - + (s2 * c3 - c2 * s3) * sqrt(a) * pow(I1, -0.2e1) * pow(I3, -0.3e1 / 0.2e1) * LI1 - + 0.3e1 / 0.2e1 * (s2 * c3 - c2 * s3) * sqrt(a) / I1 * pow(I3, -0.5e1 / 0.2e1) * LI3; interaction_matrices[12] = (1 / (I3 * I3)) * LI1 - (2 * I1 / (I3 * I3 * I3)) * LI3; interaction_matrices[13] = - (I2 / (I3 * I3 * I3)) * La + (a / (I3 * I3 * I3)) * LI2 - (3 * a * I2 / (I3 * I3 * I3 * I3)) * LI3; + (I2 / (I3 * I3 * I3)) * La + (a / (I3 * I3 * I3)) * LI2 - (3 * a * I2 / (I3 * I3 * I3 * I3)) * LI3; } /*! - Print out all invariants that were computed - There are 15 of them, as in [Point-based and region based.ITRO05] - \cite Tahri05z + * Print out all invariants that were computed + * There are 15 of them, as in [Point-based and region based.ITRO05] + * \cite Tahri05z */ void vpFeatureMomentCInvariant::printLsofInvariants(std::ostream &os) const { @@ -690,8 +686,8 @@ void vpFeatureMomentCInvariant::printLsofInvariants(std::ostream &os) const } /*! - \relates vpFeatureMomentCInvariant - Print all the interaction matrices of visual features + * \relates vpFeatureMomentCInvariant + * Print all the interaction matrices of visual features */ std::ostream &operator<<(std::ostream &os, const vpFeatureMomentCInvariant &featcinv) { diff --git a/modules/visual_features/src/visual-feature/vpFeatureMomentCentered.cpp b/modules/visual_features/src/visual-feature/vpFeatureMomentCentered.cpp index 53d50b46fe..7373e2f280 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureMomentCentered.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureMomentCentered.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * Manikandan Bakthavatchalam - *****************************************************************************/ + */ #include @@ -50,26 +45,24 @@ #include /*! - Default constructor - \param moments_ : Database of moment primitives. - \param A_ : First plane coefficient for a plane equation of the following - type Ax+By+C=1/Z. \param B_ : Second plane coefficient for a plane equation - of the following type Ax+By+C=1/Z. \param C_ : Third plane coefficient for a - plane equation of the following type Ax+By+C=1/Z. \param featureMoments : - Database of features. -*/ + * Default constructor + * \param moments_ : Database of moment primitives. + * \param A_ : First plane coefficient for a plane equation of the following type Ax+By+C=1/Z. + * \param B_ : Second plane coefficient for a plane equation of the following type Ax+By+C=1/Z. + * \param C_ : Third plane coefficient for a plane equation of the following type Ax+By+C=1/Z. + * \param featureMoments : Database of features. + */ vpFeatureMomentCentered::vpFeatureMomentCentered(vpMomentDatabase &moments_, double A_, double B_, double C_, vpFeatureMomentDatabase *featureMoments) : vpFeatureMoment(moments_, A_, B_, C_, featureMoments), order(0) -{ -} +{ } /*! -Interaction matrix corresponding to \f$ \mu_{ij} \f$ moment -\param select_one : first index (i) -\param select_two : second index (j) -\return Interaction matrix corresponding to the moment -*/ + * Interaction matrix corresponding to \f$ \mu_{ij} \f$ moment + * \param select_one : first index (i). + * \param select_two : second index (j). + * \return Interaction matrix corresponding to the moment. + */ vpMatrix vpFeatureMomentCentered::interaction(unsigned int select_one, unsigned int select_two) const { if (select_one + select_two > moment->getObject().getOrder()) @@ -80,9 +73,9 @@ vpMatrix vpFeatureMomentCentered::interaction(unsigned int select_one, unsigned } /*! - * Core function for the interaction matrix computation for moment m_pq - * Given its dependent moment and interaction matrices, computes the - * interaction matrix of centred moments + * Core function for the interaction matrix computation for moment m_pq + * Given its dependent moment and interaction matrices, computes the + * interaction matrix of centred moments. */ vpMatrix vpFeatureMomentCentered::compute_Lmu_pq(const unsigned int &p, const unsigned int &q, const double &xg, const double &yg, const vpMatrix &L_xg, const vpMatrix &L_yg, @@ -131,16 +124,17 @@ vpMatrix vpFeatureMomentCentered::compute_Lmu_pq(const unsigned int &p, const un } /*! - Interface to the interaction matrix computation for centered moments. Called -internally. Calls compute_Lmu_pq() for main computation moments (upto order-1) -Dependencies: - Moment classes - - vpMomentBasic - Interaction matrix classes - - vpMomentGravityCenter - - vpFeatureMomentBasic - - vpFeatureMomentGravityCenter -*/ + * Interface to the interaction matrix computation for centered moments. Called + * internally. Calls compute_Lmu_pq() for main computation moments (upto order-1) + * + * Dependencies to moment classes: + * - vpMomentBasic + * + * Dependencies to interaction matrix classes: + * - vpMomentGravityCenter + * - vpFeatureMomentBasic + * - vpFeatureMomentGravityCenter + */ void vpFeatureMomentCentered::compute_interaction() { #ifdef VISP_MOMENTS_COMBINE_MATRICES @@ -152,7 +146,7 @@ void vpFeatureMomentCentered::compute_interaction() bool found_moment_gravity; const vpMomentGravityCenter &momentGravity = - static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); + static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); if (!found_moment_gravity) throw vpException(vpException::notInitialized, "vpMomentGravityCenter not found"); double xg = momentGravity.get()[0]; @@ -160,7 +154,7 @@ void vpFeatureMomentCentered::compute_interaction() bool found_feature_gravity_center; vpFeatureMomentGravityCenter &featureMomentGravityCenter = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentGravityCenter", found_feature_gravity_center))); + featureMomentsDataBase->get("vpFeatureMomentGravityCenter", found_feature_gravity_center))); if (!found_feature_gravity_center) throw vpException(vpException::notInitialized, "vpFeatureMomentGravityCenter not found"); vpMatrix Lxg = featureMomentGravityCenter.interaction(1 << 0); @@ -168,13 +162,13 @@ void vpFeatureMomentCentered::compute_interaction() bool found_moment_basic; const vpMomentBasic &momentbasic = - static_cast(moments.get("vpMomentBasic", found_moment_basic)); + static_cast(moments.get("vpMomentBasic", found_moment_basic)); if (!found_moment_basic) throw vpException(vpException::notInitialized, "vpMomentBasic not found"); bool found_featuremoment_basic; vpFeatureMomentBasic &featureMomentBasic = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentBasic", found_featuremoment_basic))); + featureMomentsDataBase->get("vpFeatureMomentBasic", found_featuremoment_basic))); if (!found_featuremoment_basic) throw vpException(vpException::notInitialized, "vpFeatureMomentBasic not found"); @@ -182,7 +176,7 @@ void vpFeatureMomentCentered::compute_interaction() for (int i = 0; i < (int)order - 1; i++) { for (int j = 0; j < (int)order - 1 - i; j++) { interaction_matrices[(unsigned int)j * order + (unsigned int)i] = - compute_Lmu_pq(i, j, xg, yg, Lxg, Lyg, momentbasic, featureMomentBasic); + compute_Lmu_pq(i, j, xg, yg, Lxg, Lyg, momentbasic, featureMomentBasic); } } #else // #ifdef VISP_MOMENTS_COMBINE_MATRICES @@ -190,9 +184,9 @@ void vpFeatureMomentCentered::compute_interaction() bool found_moment_gravity; const vpMomentCentered &momentCentered = - (static_cast(moments.get("vpMomentCentered", found_moment_centered))); + (static_cast(moments.get("vpMomentCentered", found_moment_centered))); const vpMomentGravityCenter &momentGravity = - static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); + static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); if (!found_moment_centered) throw vpException(vpException::notInitialized, "vpMomentCentered not found"); @@ -209,7 +203,8 @@ void vpFeatureMomentCentered::compute_interaction() if (momentObject.getType() == vpMomentObject::DISCRETE) { delta = 0; epsilon = 1; - } else { + } + else { delta = 1; epsilon = 4; } @@ -237,7 +232,7 @@ void vpFeatureMomentCentered::compute_interaction() interaction_matrices[0][0][WX] = (3 * delta) * Yg * mu00; interaction_matrices[0][0][WY] = -(3 * delta) * Xg * mu00; interaction_matrices[0][0][VZ] = - -A * interaction_matrices[0][0][WY] + B * interaction_matrices[0][0][WX] + (2 * delta) * C * mu00; + -A * interaction_matrices[0][0][WY] + B * interaction_matrices[0][0][WX] + (2 * delta) * C * mu00; interaction_matrices[0][0][WZ] = 0.; for (int i = 1; i < (int)order - 1; i++) { @@ -255,11 +250,11 @@ void vpFeatureMomentCentered::compute_interaction() interaction_matrices[i_][0][VY] = -(delta)*B * mu_i0; interaction_matrices[i_][0][WX] = - (i + 3 * delta) * mu_i1 + (i + 3 * delta) * Yg * mu_i0 + i * Xg * mu_im11 - i * epsilon * n11 * mu_im10; + (i + 3 * delta) * mu_i1 + (i + 3 * delta) * Yg * mu_i0 + i * Xg * mu_im11 - i * epsilon * n11 * mu_im10; interaction_matrices[i_][0][WY] = - -(i + 3 * delta) * mu_ip10 - (2 * i + 3 * delta) * Xg * mu_i0 + i * epsilon * n20 * mu_im10; + -(i + 3 * delta) * mu_ip10 - (2 * i + 3 * delta) * Xg * mu_i0 + i * epsilon * n20 * mu_im10; interaction_matrices[i_][0][VZ] = - -A * interaction_matrices[i_][0][WY] + B * interaction_matrices[i_][0][WX] + (i + 2 * delta) * C * mu_i0; + -A * interaction_matrices[i_][0][WY] + B * interaction_matrices[i_][0][WX] + (i + 2 * delta) * C * mu_i0; interaction_matrices[i_][0][WZ] = i * mu_im11; } @@ -278,11 +273,11 @@ void vpFeatureMomentCentered::compute_interaction() interaction_matrices[j_ * order][0][VY] = -j * A * mu_1jm1 - (j + delta) * B * mu_0j; interaction_matrices[j_ * order][0][WX] = - (j + 3 * delta) * mu_0jp1 + (2 * j + 3 * delta) * Yg * mu_0j - j * epsilon * n02 * mu_0jm1; + (j + 3 * delta) * mu_0jp1 + (2 * j + 3 * delta) * Yg * mu_0j - j * epsilon * n02 * mu_0jm1; interaction_matrices[j_ * order][0][WY] = - -(j + 3 * delta) * mu_1j - (j + 3 * delta) * Xg * mu_0j - j * Yg * mu_1jm1 + j * epsilon * n11 * mu_0jm1; + -(j + 3 * delta) * mu_1j - (j + 3 * delta) * Xg * mu_0j - j * Yg * mu_1jm1 + j * epsilon * n11 * mu_0jm1; interaction_matrices[j_ * order][0][VZ] = -A * interaction_matrices[j_ * order][0][WY] + - B * interaction_matrices[j_ * order][0][WX] + (j + 2 * delta) * C * mu_0j; + B * interaction_matrices[j_ * order][0][WX] + (j + 2 * delta) * C * mu_0j; interaction_matrices[j_ * order][0][WZ] = -j * mu_1jm1; } @@ -307,14 +302,14 @@ void vpFeatureMomentCentered::compute_interaction() interaction_matrices[j_ * order + i_][0][VY] = -j * A * mu_ip1jm1 - (j + delta) * B * mu_ij; interaction_matrices[j_ * order + i_][0][WX] = (i + j + 3 * delta) * mu_ijp1 + - (i + 2 * j + 3 * delta) * Yg * mu_ij + i * Xg * mu_im1jp1 - - i * epsilon * n11 * mu_im1j - j * epsilon * n02 * mu_ijm1; + (i + 2 * j + 3 * delta) * Yg * mu_ij + i * Xg * mu_im1jp1 - + i * epsilon * n11 * mu_im1j - j * epsilon * n02 * mu_ijm1; interaction_matrices[j_ * order + i_][0][WY] = -(i + j + 3 * delta) * mu_ip1j - - (2 * i + j + 3 * delta) * Xg * mu_ij - j * Yg * mu_ip1jm1 + - i * epsilon * n20 * mu_im1j + j * epsilon * n11 * mu_ijm1; + (2 * i + j + 3 * delta) * Xg * mu_ij - j * Yg * mu_ip1jm1 + + i * epsilon * n20 * mu_im1j + j * epsilon * n11 * mu_ijm1; interaction_matrices[j_ * order + i_][0][VZ] = -A * interaction_matrices[j_ * order + i_][0][WY] + - B * interaction_matrices[j_ * order + i_][0][WX] + - (i + j + 2 * delta) * C * mu_ij; + B * interaction_matrices[j_ * order + i_][0][WX] + + (i + j + 2 * delta) * C * mu_ij; interaction_matrices[j_ * order + i_][0][WZ] = i * mu_im1jp1 - j * mu_ip1jm1; } } @@ -322,8 +317,8 @@ void vpFeatureMomentCentered::compute_interaction() } /*! - \relates vpFeatureMomentCentered - Print all the interaction matrices of visual features + * \relates vpFeatureMomentCentered + * Print all the interaction matrices of visual features */ std::ostream &operator<<(std::ostream &os, const vpFeatureMomentCentered &mu) { diff --git a/modules/visual_features/src/visual-feature/vpFeatureMomentCommon.cpp b/modules/visual_features/src/visual-feature/vpFeatureMomentCommon.cpp index bbff94b203..3922385be5 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureMomentCommon.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureMomentCommon.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,28 +29,23 @@ * * Description: * Pre-filled pseudo-database used to handle dependencies between common - *moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + * moment features. + */ #include #include /*! - Constructor which initializes and links all common features in the database - \param moments : database for moment primitives - \param A : first plane coefficient for a plane equation of the following - type Ax+By+C=1/Z \param B : second plane coefficient for a plane equation of - the following type Ax+By+C=1/Z \param C : third plane coefficient for a - plane equation of the following type Ax+By+C=1/Z -*/ + * Constructor which initializes and links all common features in the database + * \param moments : database for moment primitives + * \param A : first plane coefficient for a plane equation of the following type Ax+By+C=1/Z + * \param B : second plane coefficient for a plane equation of the following type Ax+By+C=1/Z + * \param C : third plane coefficient for a plane equation of the following type Ax+By+C=1/Z + */ vpFeatureMomentCommon::vpFeatureMomentCommon(vpMomentDatabase &moments, double A, double B, double C) : featureGravity(moments, A, B, C), featureGravityNormalized(moments, A, B, C), featureAn(moments, A, B, C), - featureCInvariant(moments, A, B, C), featureAlpha(moments, A, B, C), featureCentered(moments, A, B, C), - featureMomentBasic(moments, A, B, C), feature_moment_area(moments, A, B, C) + featureCInvariant(moments, A, B, C), featureAlpha(moments, A, B, C), featureCentered(moments, A, B, C), + featureMomentBasic(moments, A, B, C), feature_moment_area(moments, A, B, C) { featureGravity.linkTo(*this); @@ -65,12 +59,11 @@ vpFeatureMomentCommon::vpFeatureMomentCommon(vpMomentDatabase &moments, double A } /*! - Update all moment features in the database with plane coefficients - \param A : first plane coefficient for a plane equation of the following - type Ax+By+C=1/Z \param B : second plane coefficient for a plane equation of - the following type Ax+By+C=1/Z \param C : third plane coefficient for a - plane equation of the following type Ax+By+C=1/Z -*/ + * Update all moment features in the database with plane coefficients + * \param A : first plane coefficient for a plane equation of the following type Ax+By+C=1/Z + * \param B : second plane coefficient for a plane equation of the following type Ax+By+C=1/Z + * \param C : third plane coefficient for a plane equation of the following type Ax+By+C=1/Z + */ void vpFeatureMomentCommon::updateAll(double A, double B, double C) { featureMomentBasic.update(A, B, C); diff --git a/modules/visual_features/src/visual-feature/vpFeatureMomentDatabase.cpp b/modules/visual_features/src/visual-feature/vpFeatureMomentDatabase.cpp index be6c1a5653..35a30a1aaf 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureMomentDatabase.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureMomentDatabase.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Pseudo-database used to handle dependencies between moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ #include #include @@ -43,24 +38,24 @@ #include /*! - Add a moment and it's corresponding name to the database - \param featureMoment : database for moment features - \param name : the feature's name, usually the string naming it's class. Each - name must be unique -*/ + * Add a moment and it's corresponding name to the database + * \param featureMoment : database for moment features + * \param name : the feature's name, usually the string naming it's class. Each + * name must be unique + */ void vpFeatureMomentDatabase::add(vpFeatureMoment &featureMoment, char *name) { featureMomentsDataBase.insert(std::pair((const char *)name, &featureMoment)); } /*! - Retrieves a moment feature from the database - \param type : the name of the feature, the one specified when using add - \param found : true if the type string is found inside the database, false - otherwise - - \return the moment feature corresponding to the type string -*/ + * Retrieves a moment feature from the database + * \param type : the name of the feature, the one specified when using add + * \param found : true if the type string is found inside the database, false + * otherwise + * + * \return the moment feature corresponding to the type string + */ vpFeatureMoment &vpFeatureMomentDatabase::get(const char *type, bool &found) { std::map::const_iterator it = @@ -71,12 +66,11 @@ vpFeatureMoment &vpFeatureMomentDatabase::get(const char *type, bool &found) } /*! - Update all moment features in the database with plane coefficients - \param A : first plane coefficient for a plane equation of the following - type Ax+By+C=1/Z \param B : second plane coefficient for a plane equation of - the following type Ax+By+C=1/Z \param C : third plane coefficient for a - plane equation of the following type Ax+By+C=1/Z -*/ + * Update all moment features in the database with plane coefficients + * \param A : first plane coefficient for a plane equation of the following type Ax+By+C=1/Z + * \param B : second plane coefficient for a plane equation of the following type Ax+By+C=1/Z + * \param C : third plane coefficient for a plane equation of the following type Ax+By+C=1/Z + */ void vpFeatureMomentDatabase::updateAll(double A, double B, double C) { std::map::const_iterator itr; @@ -97,17 +91,3 @@ void vpFeatureMomentDatabase::updateAll(double A, double B, double C) } #endif } - -/* -std::ostream & operator<<(std::ostream & os, const vpFeatureMomentDatabase& -m){ std::map::const_iterator itr; os << -"{"; - - for(itr = m.featureMoments.begin(); itr != m.featureMoments.end(); itr++){ - os << (*itr).first << ": [" << *((*itr).second) << "],"; - } - os << "}"; - - return os; -}*/ diff --git a/modules/visual_features/src/visual-feature/vpFeatureMomentGravityCenter.cpp b/modules/visual_features/src/visual-feature/vpFeatureMomentGravityCenter.cpp index c0c1a7b637..a0fa07da9c 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureMomentGravityCenter.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureMomentGravityCenter.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ #include @@ -49,19 +44,19 @@ #include /*! - Computes interaction matrix for gravity center moment. Called internally. - The moment primitives must be computed before calling this. - This feature depends on: - - vpFeatureMomentBasic - - Minimum vpMomentObject order needed to compute this feature: 2. -*/ + * Computes interaction matrix for gravity center moment. Called internally. + * The moment primitives must be computed before calling this. + * This feature depends on: + * - vpFeatureMomentBasic + * + $ Minimum vpMomentObject order needed to compute this feature: 2. + */ void vpFeatureMomentGravityCenter::compute_interaction() { bool found_featuremoment_basic; vpFeatureMomentBasic &featureMomentBasic = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentBasic", found_featuremoment_basic))); + featureMomentsDataBase->get("vpFeatureMomentBasic", found_featuremoment_basic))); const vpMomentObject &momentObject = moment->getObject(); if (!found_featuremoment_basic) @@ -71,11 +66,11 @@ void vpFeatureMomentGravityCenter::compute_interaction() interaction_matrices[1].resize(1, 6); interaction_matrices[0] = - featureMomentBasic.interaction(1, 0) / momentObject.get(0, 0) - - momentObject.get(1, 0) * pow(momentObject.get(0, 0), -0.2e1) * featureMomentBasic.interaction(0, 0); + featureMomentBasic.interaction(1, 0) / momentObject.get(0, 0) - + momentObject.get(1, 0) * pow(momentObject.get(0, 0), -0.2e1) * featureMomentBasic.interaction(0, 0); interaction_matrices[1] = - featureMomentBasic.interaction(0, 1) / momentObject.get(0, 0) - - momentObject.get(0, 1) * pow(momentObject.get(0, 0), -0.2e1) * featureMomentBasic.interaction(0, 0); + featureMomentBasic.interaction(0, 1) / momentObject.get(0, 0) - + momentObject.get(0, 1) * pow(momentObject.get(0, 0), -0.2e1) * featureMomentBasic.interaction(0, 0); } #else @@ -90,23 +85,23 @@ void vpFeatureMomentGravityCenter::compute_interaction() #include /*! - Computes interaction matrix for gravity center moment. Called internally. - The moment primitives must be computed before calling this. - This feature depends on: - - vpMomentCentered - - vpMomentGravityCenter - - Minimum vpMomentObject order needed to compute this feature: 2. -*/ + * Computes interaction matrix for gravity center moment. Called internally. + * The moment primitives must be computed before calling this. + * This feature depends on: + * - vpMomentCentered + * - vpMomentGravityCenter + * + * Minimum vpMomentObject order needed to compute this feature: 2. + */ void vpFeatureMomentGravityCenter::compute_interaction() { bool found_moment_centered; bool found_moment_gravity; const vpMomentCentered &momentCentered = - (static_cast(moments.get("vpMomentCentered", found_moment_centered))); + (static_cast(moments.get("vpMomentCentered", found_moment_centered))); const vpMomentGravityCenter &momentGravity = - static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); + static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); const vpMomentObject &momentObject = moment->getObject(); @@ -120,7 +115,8 @@ void vpFeatureMomentGravityCenter::compute_interaction() int epsilon; if (momentObject.getType() == vpMomentObject::DISCRETE) { epsilon = 1; - } else { + } + else { epsilon = 4; } double n11 = momentCentered.get(1, 1) / momentObject.get(0, 0); diff --git a/modules/visual_features/src/visual-feature/vpFeatureMomentGravityCenterNormalized.cpp b/modules/visual_features/src/visual-feature/vpFeatureMomentGravityCenterNormalized.cpp index 97e6530300..55145e01b9 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureMomentGravityCenterNormalized.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureMomentGravityCenterNormalized.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Implementation for all supported moment features. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ #include @@ -53,14 +48,14 @@ #include /*! - Computes interaction matrix for centered and normalized moment. Called - internally. The moment primitives must be computed before calling this. This - feature depends on: - - vpFeatureMomentGravityCenter - - vpMomentGravityCenter - - vpMomentAreaNormalized - - vpFeatureMomentAreaNormalized -*/ + * Computes interaction matrix for centered and normalized moment. Called + * internally. The moment primitives must be computed before calling this. This + * feature depends on: + * - vpFeatureMomentGravityCenter + * - vpMomentGravityCenter + * - vpMomentAreaNormalized + * - vpFeatureMomentAreaNormalized + */ void vpFeatureMomentGravityCenterNormalized::compute_interaction() { bool found_moment_gravity; @@ -72,11 +67,11 @@ void vpFeatureMomentGravityCenterNormalized::compute_interaction() const vpMomentAreaNormalized &momentSurfaceNormalized = static_cast( moments.get("vpMomentAreaNormalized", found_moment_surface_normalized)); const vpMomentGravityCenter &momentGravity = - static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); + static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); vpFeatureMomentGravityCenter &featureMomentGravity = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentGravityCenter", found_featuremoment_gravity))); + featureMomentsDataBase->get("vpFeatureMomentGravityCenter", found_featuremoment_gravity))); vpFeatureMomentAreaNormalized featureMomentAreaNormalized = (static_cast( - featureMomentsDataBase->get("vpFeatureMomentAreaNormalized", found_featuremoment_surfacenormalized))); + featureMomentsDataBase->get("vpFeatureMomentAreaNormalized", found_featuremoment_surfacenormalized))); if (!found_moment_surface_normalized) throw vpException(vpException::notInitialized, "vpMomentAreaNormalized not found"); @@ -92,9 +87,9 @@ void vpFeatureMomentGravityCenterNormalized::compute_interaction() interaction_matrices[1].resize(1, 6); interaction_matrices[0] = momentGravity.get()[0] * featureMomentAreaNormalized.interaction(1) + - momentSurfaceNormalized.get()[0] * featureMomentGravity.interaction(1); + momentSurfaceNormalized.get()[0] * featureMomentGravity.interaction(1); interaction_matrices[1] = momentGravity.get()[1] * featureMomentAreaNormalized.interaction(1) + - momentSurfaceNormalized.get()[0] * featureMomentGravity.interaction(2); + momentSurfaceNormalized.get()[0] * featureMomentGravity.interaction(2); } #else @@ -110,24 +105,23 @@ void vpFeatureMomentGravityCenterNormalized::compute_interaction() #include /*! - Computes interaction matrix for centered and normalized moment. Called - internally. The moment primitives must be computed before calling this. This - feature depends on: - - vpMomentCentered - - vpMomentAreaNormalized - - vpMomentGravityCenter -*/ + * Computes interaction matrix for centered and normalized moment. Called + * internally. The moment primitives must be computed before calling this. This + * feature depends on: + * - vpMomentCentered + * - vpMomentAreaNormalized + * - vpMomentGravityCenter + */ void vpFeatureMomentGravityCenterNormalized::compute_interaction() { - bool found_moment_surface_normalized; bool found_moment_gravity; bool found_moment_centered; const vpMomentCentered &momentCentered = - static_cast(moments.get("vpMomentCentered", found_moment_centered)); + static_cast(moments.get("vpMomentCentered", found_moment_centered)); const vpMomentGravityCenter &momentGravity = - static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); + static_cast(moments.get("vpMomentGravityCenter", found_moment_gravity)); const vpMomentAreaNormalized &momentSurfaceNormalized = static_cast( moments.get("vpMomentAreaNormalized", found_moment_surface_normalized)); @@ -174,19 +168,20 @@ void vpFeatureMomentGravityCenterNormalized::compute_interaction() Xnvy = A * Xn * e11 + n02 * B * Xn / NA; Xnwx = An * e11 * NA + Yn * n10 - Xn * Xg * e11 + Xn * n01 + Xn * n10 * e11 - Xn * e21 + - (-Xn * n03 + (Xn * n01 - Yn * Xg) * n02) / NA; + (-Xn * n03 + (Xn * n01 - Yn * Xg) * n02) / NA; Xnwy = -An * NA + Xn * e12 + Xn * Xg - An + e11 * Xg * Yn - Xn * n01 * e11 - 2 * Xn * n10 + Xn * e30 + n02 * An + - (-Xn * Xg + Xn * n10) * n02 / NA; + (-Xn * Xg + Xn * n10) * n02 / NA; Ynvx = (Yn - n02 * Yn / NA) * A + Yn * e11 * B; Ynvy = (-Xn + e11 * Yn) * A + (-Yn + n02 * Yn / NA) * B - An * C; Ynwx = n02 * An + Yn * n10 * e11 - e11 * Xg * Yn + An - Yn * e21 + Yn * n01 + - (-Yn * n03 + (Yn * n01 - Yn * Yg) * n02) / NA; + (-Yn * n03 + (Yn * n01 - Yn * Yg) * n02) / NA; Ynwy = -An * e11 * NA + Yn * e11 * Yg - Yn * n01 * e11 + Yn * Xg + Yn * e12 + Yn * e30 - Xn * n01 - 2 * Yn * n10 + - (Yn * n10 - Yn * Xg) * n02 / NA; + (Yn * n10 - Yn * Xg) * n02 / NA; - } else { + } + else { Xnvx = -An * C - A * Xn - Yn * B; Xnvy = (0.5) * B * Xn; diff --git a/modules/visual_features/src/visual-feature/vpFeatureSegment.cpp b/modules/visual_features/src/visual-feature/vpFeatureSegment.cpp index b88c03e09e..7a724ab5a5 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureSegment.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureSegment.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Segment visual feature. - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ #include #include @@ -51,16 +46,13 @@ #include /*! - \file vpFeatureSegment.cpp - \brief class that defines the vpFeatureSegment visual feature -*/ + * \file vpFeatureSegment.cpp + * \brief class that defines the vpFeatureSegment visual feature + */ /*! - - Initialise the memory space requested for segment visual - feature. - -*/ + * Initialise the memory space requested for segment visual feature. + */ void vpFeatureSegment::init() { // feature dimension @@ -76,12 +68,12 @@ void vpFeatureSegment::init() } /*! - Default constructor that builds an empty segment visual feature. - - \param normalized : If true, use normalized features \f${\bf s} = (x_n, y_n, - l_n, \alpha)\f$. If false, use non normalized features \f${\bf s} = (x_c, - y_c, l_c, \alpha)\f$. -*/ + * Default constructor that builds an empty segment visual feature. + * + * \param normalized : If true, use normalized features \f${\bf s} = (x_n, y_n, + * l_n, \alpha)\f$. If false, use non normalized features \f${\bf s} = (x_c, + * y_c, l_c, \alpha)\f$. + */ vpFeatureSegment::vpFeatureSegment(bool normalized) : xc_(0), yc_(0), l_(0), alpha_(0), Z1_(0), Z2_(0), cos_a_(0), sin_a_(0), normalized_(normalized) { @@ -89,104 +81,102 @@ vpFeatureSegment::vpFeatureSegment(bool normalized) } /*! - Compute and return the interaction matrix \f$ L \f$ associated to a - subset of the possible features \f${\bf s} = (x_c, y_c, l, \alpha)\f$ - or \f${\bf s} = (x_n, y_n, l_n, \alpha)\f$. - - The interaction matrix of the non normalized feature set is of the following -form: \f[ - {\bf L} = \left[ - \begin{array}{c} - L_{x_c} \\ - L_{y_c} \\ - L_{l} \\ - L_{\alpha} - \end{array} - \right] = - \left[ - \begin{array}{cccccc} - -\lambda_2 & 0 & \lambda_2 x_c - \lambda_1 l \frac{\cos \alpha}{4} & - x_c y_c + l^2 \frac{\cos \alpha \sin \alpha}{4} & - -(1 + {x_{c}}^{2} + l^2 \frac{\cos^2\alpha}{4}) & - y_c \\ - 0 & -\lambda_2 & \lambda_2 y_c - \lambda_1 l \frac{\sin \alpha}{4} & - 1 + {y_{c}}^{2} + l^2 \frac{\sin^2 \alpha}{4} & - -x_c y_c-l^2 \frac{\cos \alpha \sin \alpha}{4} & - -x_c \\ - \lambda_1 \cos \alpha & \lambda_1 \sin \alpha & - \lambda_2 l - \lambda_1 (x_c \cos \alpha + y_c \sin \alpha) & - l (x_c \cos \alpha \sin \alpha + y_c (1 + \sin^2 \alpha)) & - -l (x_c (1 + \cos^2 \alpha)+y_c \cos \alpha \sin \alpha) & - 0 \\ - -\lambda_1 \frac{\sin \alpha}{l} & \lambda_1 \frac{\cos \alpha}{l} & - \lambda_1 \frac{x_c \sin \alpha - y_c \cos \alpha}{l} & - -x_c \sin^2 \alpha + y_c \cos \alpha \sin \alpha & - x_c \cos \alpha \sin \alpha - y_c \cos^2 \alpha & - -1 - \end{array} - \right] - \f] - - with \f$ \lambda_1 = \frac{Z_1 - Z_2}{Z_1 Z_2}\f$ and \f$ \lambda_2 = -\frac{Z_1 + Z_2}{2 Z_1 Z_2}\f$ where \f$Z_i\f$ are the depths of the points. - - - \param select : Selection of a subset of the possible segment features. - - To compute the interaction matrix for all the four - subset features \f$(x_c \f$, \f$ y_c \f$, \f$ l \f$, \f$ \alpha)\f$ or - \f$(x_n \f$, \f$ y_n \f$, \f$ l_n \f$, \f$ \alpha)\f$ use -vpBasicFeature::FEATURE_ALL. In that case the dimension of the interaction -matrix is \f$ [4 \times 6] \f$. - - To compute the interaction matrix for only one of the subset - use one of the following functions: - selectXc(), selectYc(), selectL(), selectAlpha(). In that case, the -returned interaction matrix is of dimension \f$ [1 \times 6] \f$ . - - \return The interaction matrix computed from the segment features. - - The code below shows how to compute the interaction matrix associated to - the visual feature \f${\bf s} = (x_c, y_c, l, \alpha)\f$. - \code -#include -#include - -int main() -{ - // Define two 3D points in the object frame - vpPoint p1(.1, .1, 0.), p2(.3, .2, 0.); - - // Define the camera pose wrt the object - vpHomogeneousMatrix cMo (0, 0, 1, 0, 0, 0); // Z=1 meter - // Compute the coordinates of the points in the camera frame - p1.changeFrame(cMo); - p2.changeFrame(cMo); - // Compute the coordinates of the points in the image plane by perspective projection - p1.project(); p2.project(); - - // Build the segment visual feature - vpFeatureSegment s; - s.buildFrom(p1.get_x(), p1.get_y(), p1.get_Z(), p2.get_x(), p2.get_y(), p2.get_Z()); - - // Compute the interaction matrix - vpMatrix L = s.interaction( vpBasicFeature::FEATURE_ALL ); -} - \endcode - - In this case, L is a 4 by 6 matrix. - - It is also possible to build the interaction matrix associated to - one of the possible features. The code below shows how to modify the -previous code to consider as visual feature \f$s = (l, \alpha)\f$. - - \code - vpMatrix L = s.interaction( vpFeatureSegment::selectL() | vpFeatureSegment::selectAlpha() ); - \endcode - - In that case, L is a 2 by 6 matrix. -*/ + * Compute and return the interaction matrix \f$ L \f$ associated to a + * subset of the possible features \f${\bf s} = (x_c, y_c, l, \alpha)\f$ + * or \f${\bf s} = (x_n, y_n, l_n, \alpha)\f$. + * + * The interaction matrix of the non normalized feature set is of the following + * form: \f[ + * {\bf L} = \left[ + * \begin{array}{c} + * L_{x_c} \\ + * L_{y_c} \\ + * L_{l} \\ + * L_{\alpha} + * \end{array} + * \right] = + * \left[ + * \begin{array}{cccccc} + * -\lambda_2 & 0 & \lambda_2 x_c - \lambda_1 l \frac{\cos \alpha}{4} & + * x_c y_c + l^2 \frac{\cos \alpha \sin \alpha}{4} & + * -(1 + {x_{c}}^{2} + l^2 \frac{\cos^2\alpha}{4}) & + * y_c \\ + * 0 & -\lambda_2 & \lambda_2 y_c - \lambda_1 l \frac{\sin \alpha}{4} & + * 1 + {y_{c}}^{2} + l^2 \frac{\sin^2 \alpha}{4} & + * -x_c y_c-l^2 \frac{\cos \alpha \sin \alpha}{4} & + * -x_c \\ + * \lambda_1 \cos \alpha & \lambda_1 \sin \alpha & + * \lambda_2 l - \lambda_1 (x_c \cos \alpha + y_c \sin \alpha) & + * l (x_c \cos \alpha \sin \alpha + y_c (1 + \sin^2 \alpha)) & + * -l (x_c (1 + \cos^2 \alpha)+y_c \cos \alpha \sin \alpha) & + * 0 \\ + * -\lambda_1 \frac{\sin \alpha}{l} & \lambda_1 \frac{\cos \alpha}{l} & + * \lambda_1 \frac{x_c \sin \alpha - y_c \cos \alpha}{l} & + * -x_c \sin^2 \alpha + y_c \cos \alpha \sin \alpha & + * x_c \cos \alpha \sin \alpha - y_c \cos^2 \alpha & + * -1 + * \end{array} + * \right] + * \f] + * + * with \f$ \lambda_1 = \frac{Z_1 - Z_2}{Z_1 Z_2}\f$ and \f$ \lambda_2 = + * \frac{Z_1 + Z_2}{2 Z_1 Z_2}\f$ where \f$Z_i\f$ are the depths of the points. + * + * \param select : Selection of a subset of the possible segment features. + * - To compute the interaction matrix for all the four + * subset features \f$(x_c \f$, \f$ y_c \f$, \f$ l \f$, \f$ \alpha)\f$ or + * \f$(x_n \f$, \f$ y_n \f$, \f$ l_n \f$, \f$ \alpha)\f$ use + * vpBasicFeature::FEATURE_ALL. In that case the dimension of the interaction + * matrix is \f$ [4 \times 6] \f$. + * - To compute the interaction matrix for only one of the subset + * use one of the following functions: + * selectXc(), selectYc(), selectL(), selectAlpha(). In that case, the + * returned interaction matrix is of dimension \f$ [1 \times 6] \f$ . + * + * \return The interaction matrix computed from the segment features. + * + * The code below shows how to compute the interaction matrix associated to + * the visual feature \f${\bf s} = (x_c, y_c, l, \alpha)\f$. + * \code + * #include + * #include + * + * int main() + * { + * // Define two 3D points in the object frame + * vpPoint p1(.1, .1, 0.), p2(.3, .2, 0.); + * + * // Define the camera pose wrt the object + * vpHomogeneousMatrix cMo (0, 0, 1, 0, 0, 0); // Z=1 meter + * // Compute the coordinates of the points in the camera frame + * p1.changeFrame(cMo); + * p2.changeFrame(cMo); + * // Compute the coordinates of the points in the image plane by perspective projection + * p1.project(); p2.project(); + * + * // Build the segment visual feature + * vpFeatureSegment s; + * s.buildFrom(p1.get_x(), p1.get_y(), p1.get_Z(), p2.get_x(), p2.get_y(), p2.get_Z()); + * + * // Compute the interaction matrix + * vpMatrix L = s.interaction( vpBasicFeature::FEATURE_ALL ); + * } + * \endcode + * + * In this case, L is a 4 by 6 matrix. + * + * It is also possible to build the interaction matrix associated to + * one of the possible features. The code below shows how to modify the + * previous code to consider as visual feature \f$s = (l, \alpha)\f$. + * + * \code + * vpMatrix L = s.interaction( vpFeatureSegment::selectL() | vpFeatureSegment::selectAlpha() ); + * \endcode + * + * In that case, L is a 2 by 6 matrix. + */ vpMatrix vpFeatureSegment::interaction(unsigned int select) { - vpMatrix L; L.resize(0, 6); @@ -286,7 +276,8 @@ vpMatrix vpFeatureSegment::interaction(unsigned int select) Lalpha[0][5] = -1; L = vpMatrix::stack(L, Lalpha); } - } else { + } + else { if (vpFeatureSegment::selectXc() & select) { vpMatrix Lxc(1, 6); Lxc[0][0] = -lambda2; @@ -335,30 +326,29 @@ vpMatrix vpFeatureSegment::interaction(unsigned int select) } /*! - Computes the error between the current and the desired visual features from - a subset of the possible features \f${\bf s} = (x_c, y_c, l, \alpha)\f$ or - \f${\bf s} = (x_n, y_n, l_n, \alpha)\f$. - - For the angular component \f$\alpha\f$, we define the error as - \f$\alpha \ominus \alpha^*\f$, where \f$\ominus\f$ is modulo \f$2\pi\f$ - subtraction. - - \param s_star : Desired 2D segment feature. - - \param select : The error can be computed for a selection of a - subset of the possible segment features. - - To compute the error for all the four parameters use - vpBasicFeature::FEATURE_ALL. In that case the error vector is a 4 - dimension column vector. - - To compute the error for only one subfeature of - \f${\bf s} = (x_c, y_c, l, \alpha)\f$ or \f${\bf s} = (x_n, y_n, l_n, - \alpha)\f$ feature set use one of the following functions: selectXc(), - selectYc(), selectL(), selectAlpha(). - - \return The error between the current and the desired - visual feature. - -*/ + * Computes the error between the current and the desired visual features from + * a subset of the possible features \f${\bf s} = (x_c, y_c, l, \alpha)\f$ or + * \f${\bf s} = (x_n, y_n, l_n, \alpha)\f$. + * + * For the angular component \f$\alpha\f$, we define the error as + * \f$\alpha \ominus \alpha^*\f$, where \f$\ominus\f$ is modulo \f$2\pi\f$ + * subtraction. + * + * \param s_star : Desired 2D segment feature. + * + * \param select : The error can be computed for a selection of a + * subset of the possible segment features. + * - To compute the error for all the four parameters use + * vpBasicFeature::FEATURE_ALL. In that case the error vector is a 4 + * dimension column vector. + * - To compute the error for only one subfeature of + * \f${\bf s} = (x_c, y_c, l, \alpha)\f$ or \f${\bf s} = (x_n, y_n, l_n, + * \alpha)\f$ feature set use one of the following functions: selectXc(), + * selectYc(), selectL(), selectAlpha(). + * + * \return The error between the current and the desired + * visual feature. + */ vpColVector vpFeatureSegment::error(const vpBasicFeature &s_star, unsigned int select) { vpColVector e(0); @@ -394,32 +384,32 @@ vpColVector vpFeatureSegment::error(const vpBasicFeature &s_star, unsigned int s } /*! - Print to stdout the values of the current visual feature \f$ s \f$. - - \param select : Selection of a subset of the possible segement features (\f$ - x_c \f$,\f$ y_c \f$,\f$ l \f$,\f$ \alpha \f$). - - \code - s.print(); - \endcode - - produces the following output: - - \code - vpFeatureSegment: (xc = -0.255634; yc = -0.13311; l = 0.105005; alpha = 92.1305 deg) - \endcode - - while - \code - s.print( vpFeatureSegment::selectL() | vpFeatureSegment::selectAlpha() ); - \endcode - - produces the following output: - - \code - vpFeatureSegment: (l = 0.105005; alpha = 92.1305 deg) - \endcode -*/ + * Print to stdout the values of the current visual feature \f$ s \f$. + * + * \param select : Selection of a subset of the possible segment features (\f$ + * x_c \f$,\f$ y_c \f$,\f$ l \f$,\f$ \alpha \f$). + * + * \code + * s.print(); + * \endcode + * + * produces the following output: + * + * \code + * vpFeatureSegment: (xc = -0.255634; yc = -0.13311; l = 0.105005; alpha = 92.1305 deg) + * \endcode + * + * while + * \code + * s.print( vpFeatureSegment::selectL() | vpFeatureSegment::selectAlpha() ); + * \endcode + * + * produces the following output: + * + * \code + * vpFeatureSegment: (l = 0.105005; alpha = 92.1305 deg) + * \endcode + */ void vpFeatureSegment::print(unsigned int select) const { std::cout << "vpFeatureSegment: ("; @@ -451,15 +441,14 @@ void vpFeatureSegment::print(unsigned int select) const } /*! - Create an object with the same type. - - \code - vpBasicFeature *s_star; - vpFeatureSegment s; - s_star = s.duplicate(); // s_star is now a vpFeatureSegment - \endcode - -*/ + * Create an object with the same type. + * + * \code + * vpBasicFeature *s_star; + * vpFeatureSegment s; + * s_star = s.duplicate(); // s_star is now a vpFeatureSegment + * \endcode + */ vpFeatureSegment *vpFeatureSegment::duplicate() const { vpFeatureSegment *feature; @@ -469,16 +458,14 @@ vpFeatureSegment *vpFeatureSegment::duplicate() const } /*! - - Displays a segment representing the feature on a greyscale image. - The two limiting points are displayed in cyan and yellow. - - \param cam : Camera parameters. - \param I : Image. - \param color : Color to use for the segment. - \param thickness : Thickness of the feature representation. - -*/ + * Displays a segment representing the feature on a grayscale image. + * The two limiting points are displayed in cyan and yellow. + * + * \param cam : Camera parameters. + * \param I : Image. + * \param color : Color to use for the segment. + * \param thickness : Thickness of the feature representation. + */ void vpFeatureSegment::display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color, unsigned int thickness) const { @@ -487,7 +474,8 @@ void vpFeatureSegment::display(const vpCameraParameters &cam, const vpImage &I, const vpColor &color, unsigned int thickness) const { @@ -525,7 +512,8 @@ void vpFeatureSegment::display(const vpCameraParameters &cam, const vpImage -/*! - \file vpGenericFeature.cpp - Class that defines what is a generic feature. This class could be used to - create new features not implemented in ViSP. -*/ - -vpGenericFeature::~vpGenericFeature() {} - void vpGenericFeature::init() { s = 0; } /*! @@ -102,7 +94,7 @@ void vpGenericFeature::setError(const vpColVector &error_vector) vpERROR_TRACE("size mismatch between error dimension" "and feature dimension"); throw(vpFeatureException(vpFeatureException::sizeMismatchError, "size mismatch between error dimension" - "and feature dimension")); + "and feature dimension")); } errorStatus = errorInitialized; err = error_vector; @@ -169,7 +161,7 @@ vpColVector vpGenericFeature::error(const vpBasicFeature &s_star, unsigned int s vpERROR_TRACE("size mismatch between s* dimension " "and feature dimension"); throw(vpFeatureException(vpFeatureException::sizeMismatchError, "size mismatch between s* dimension " - "and feature dimension")); + "and feature dimension")); } vpColVector e(0); @@ -181,7 +173,8 @@ vpColVector vpGenericFeature::error(const vpBasicFeature &s_star, unsigned int s "in you visual servoing loop"); throw(vpFeatureException(vpFeatureException::badErrorVectorError, "Error has no been updated since last iteration")); - } else if (errorStatus == errorInitialized) { + } + else if (errorStatus == errorInitialized) { vpDEBUG_TRACE(25, "Error init: e=e."); errorStatus = errorHasToBeUpdated; for (unsigned int i = 0; i < dim_s; i++) @@ -191,7 +184,8 @@ vpColVector vpGenericFeature::error(const vpBasicFeature &s_star, unsigned int s e = vpColVector::stack(e, ex); } - } else { + } + else { vpDEBUG_TRACE(25, "Error not init: e=s-s*."); for (unsigned int i = 0; i < dim_s; i++) @@ -202,7 +196,8 @@ vpColVector vpGenericFeature::error(const vpBasicFeature &s_star, unsigned int s e = vpColVector::stack(e, ex); } } - } catch (...) { + } + catch (...) { throw; } return e; @@ -259,7 +254,8 @@ vpColVector vpGenericFeature::error(unsigned int select) "in you visual servoing loop"); throw(vpFeatureException(vpFeatureException::badErrorVectorError, "Error has no been updated since last iteration")); - } else if (errorStatus == errorInitialized) { + } + else if (errorStatus == errorInitialized) { errorStatus = errorHasToBeUpdated; for (unsigned int i = 0; i < dim_s; i++) if (FEATURE_LINE[i] & select) { @@ -268,7 +264,8 @@ vpColVector vpGenericFeature::error(unsigned int select) e = vpColVector::stack(e, ex); } - } else { + } + else { for (unsigned int i = 0; i < dim_s; i++) if (FEATURE_LINE[i] & select) { @@ -278,7 +275,8 @@ vpColVector vpGenericFeature::error(unsigned int select) e = vpColVector::stack(e, ex); } } - } catch (...) { + } + catch (...) { throw; } @@ -345,7 +343,7 @@ vpMatrix vpGenericFeature::interaction(unsigned int select) std::cout << "with Ls=s* (default) or vice versa" << std::endl; throw(vpFeatureException(vpFeatureException::notInitializedError, "size mismatch between s* dimension " - "and feature dimension")); + "and feature dimension")); } vpMatrix Ls; @@ -382,7 +380,7 @@ void vpGenericFeature::setInteractionMatrix(const vpMatrix &L_) vpERROR_TRACE("size mismatch between interaction matrix size " "and feature dimension"); throw(vpFeatureException(vpFeatureException::sizeMismatchError, "size mismatch between interaction matrix size " - "and feature dimension")); + "and feature dimension")); } this->L = L_; @@ -405,7 +403,7 @@ void vpGenericFeature::set_s(const vpColVector &s_vector) vpERROR_TRACE("size mismatch between s dimension" "and feature dimension"); throw(vpFeatureException(vpFeatureException::sizeMismatchError, "size mismatch between s dimension" - "and feature dimension")); + "and feature dimension")); } this->s = s_vector; } @@ -426,7 +424,7 @@ void vpGenericFeature::get_s(vpColVector &s_vector) const vpERROR_TRACE("size mismatch between s dimension" "and feature dimension"); throw(vpFeatureException(vpFeatureException::sizeMismatchError, "size mismatch between s dimension" - "and feature dimension")); + "and feature dimension")); } s_vector = this->s; } @@ -452,7 +450,7 @@ void vpGenericFeature::set_s(double s0, double s1, double s2) vpERROR_TRACE("size mismatch between number of parameters" "and feature dimension"); throw(vpFeatureException(vpFeatureException::sizeMismatchError, "size mismatch between number of parameters" - "and feature dimension")); + "and feature dimension")); } s[0] = s0; s[1] = s1; @@ -480,7 +478,7 @@ void vpGenericFeature::get_s(double &s0, double &s1, double &s2) const vpERROR_TRACE("size mismatch between number of parameters" "and feature dimension"); throw(vpFeatureException(vpFeatureException::sizeMismatchError, "size mismatch between number of parameters" - "and feature dimension")); + "and feature dimension")); } s0 = s[0]; s1 = s[1]; @@ -505,7 +503,7 @@ void vpGenericFeature::set_s(double s0, double s1) vpERROR_TRACE("size mismatch between number of parameters" "and feature dimension"); throw(vpFeatureException(vpFeatureException::sizeMismatchError, "size mismatch between number of parameters" - "and feature dimension")); + "and feature dimension")); } s[0] = s0; s[1] = s1; @@ -529,7 +527,7 @@ void vpGenericFeature::get_s(double &s0, double &s1) const vpERROR_TRACE("size mismatch between number of parameters" "and feature dimension"); throw(vpFeatureException(vpFeatureException::sizeMismatchError, "size mismatch between number of parameters" - "and feature dimension")); + "and feature dimension")); } s0 = s[0]; s1 = s[1]; @@ -551,7 +549,7 @@ void vpGenericFeature::set_s(double s0) vpERROR_TRACE("size mismatch between number of parameters" "and feature dimension"); throw(vpFeatureException(vpFeatureException::sizeMismatchError, "size mismatch between number of parameters" - "and feature dimension")); + "and feature dimension")); } s[0] = s0; } @@ -572,7 +570,7 @@ void vpGenericFeature::get_s(double &s0) const vpERROR_TRACE("size mismatch between number of parameters" "and feature dimension"); throw(vpFeatureException(vpFeatureException::sizeMismatchError, "size mismatch between number of parameters" - "and feature dimension")); + "and feature dimension")); } s0 = s[0]; } From 7dd377cb2e7236d940b80b507c623c8386d0cc26 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Tue, 24 Oct 2023 21:58:49 +0200 Subject: [PATCH 03/14] Fix previous commit when OpenCV is used --- .../include/visp3/mbt/vpMbDepthDenseTracker.h | 6 +- .../visp3/mbt/vpMbDepthNormalTracker.h | 6 +- .../include/visp3/mbt/vpMbEdgeKltTracker.h | 363 ++++++++------- .../include/visp3/mbt/vpMbGenericTracker.h | 308 ++++++------- .../mbt/include/visp3/mbt/vpMbKltTracker.h | 425 +++++++++--------- 5 files changed, 552 insertions(+), 556 deletions(-) diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbDepthDenseTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbDepthDenseTracker.h index b42f849550..0dd98709d4 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbDepthDenseTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbDepthDenseTracker.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Model-based tracker using depth dense features. - * -*****************************************************************************/ + */ #ifndef _vpMbDepthDenseTracker_h_ #define _vpMbDepthDenseTracker_h_ diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbDepthNormalTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbDepthNormalTracker.h index 72678f823e..c48d2fd5f4 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbDepthNormalTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbDepthNormalTracker.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,8 +29,7 @@ * * Description: * Model-based tracker using depth normal features. - * -*****************************************************************************/ + */ #ifndef _vpMbDepthNormalTracker_h_ #define _vpMbDepthNormalTracker_h_ diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeKltTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeKltTracker.h index 73152356c9..6d6a6be8fd 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeKltTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbEdgeKltTracker.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,13 +29,12 @@ * * Description: * Hybrid tracker based on edges (vpMbt) and points of interests (KLT) - * -*****************************************************************************/ + */ /*! - \file vpMbEdgeKltTracker.h - \brief Hybrid tracker based on edges (vpMbt) and points of interests (KLT) -*/ + * \file vpMbEdgeKltTracker.h + * \brief Hybrid tracker based on edges (vpMbt) and points of interests (KLT) + */ #ifndef _vpMbEdgeKltTracker_h_ #define _vpMbEdgeKltTracker_h_ @@ -55,156 +53,156 @@ #include /*! - \class vpMbEdgeKltTracker - \ingroup group_mbt_trackers - \warning This class is deprecated for user usage. You should rather use the high level - vpMbGenericTracker class. - \warning This class is only available if OpenCV is installed, and used. - - \brief Hybrid tracker based on moving-edges and keypoints tracked using KLT - tracker. - - The \ref tutorial-tracking-mb-deprecated is a good starting point to use this class. - - The tracker requires the knowledge of the 3D model that could be provided in - a vrml or in a cao file. The cao format is described in loadCAOModel(). It may - also use an xml file used to tune the behavior of the tracker and an init file - used to compute the pose at the very first image. - - The following code shows the simplest way to use the tracker. The \ref - tutorial-tracking-mb-deprecated is also a good starting point to use this class. - -\code -#include -#include -#include -#include -#include -#include - -int main() -{ -#if defined VISP_HAVE_OPENCV - vpMbEdgeKltTracker tracker; // Create an hybrid model based tracker. - vpImage I; - vpHomogeneousMatrix cMo; // Pose computed using the tracker. - vpCameraParameters cam; - - // Acquire an image - vpImageIo::read(I, "cube.pgm"); - -#if defined(VISP_HAVE_X11) - vpDisplayX display; - display.init(I,100,100,"Mb Hybrid Tracker"); -#endif - - tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker - // Load the 3d model in cao format. No 3rd party library is required - tracker.loadModel("cube.cao"); - // Get the camera parameters used by the tracker (from the configuration file). - tracker.getCameraParameters(cam); - // Initialise manually the pose by clicking on the image points associated to the 3d points contained in the - // cube.init file. - tracker.initClick(I, "cube.init"); - - while(true){ - // Acquire a new image - vpDisplay::display(I); - tracker.track(I); // Track the object on this image - tracker.getPose(cMo); // Get the pose - - tracker.display(I, cMo, cam, vpColor::darkRed, 1); // Display the model at the computed pose. - vpDisplay::flush(I); - } - - return 0; -#endif -} -\endcode - - The tracker can also be used without display, in that case the initial pose - must be known (object always at the same initial pose for example) or -computed using another method: - -\code -#include -#include -#include -#include -#include - -int main() -{ -#if defined VISP_HAVE_OPENCV - vpMbEdgeKltTracker tracker; // Create an hybrid model based tracker. - vpImage I; - vpHomogeneousMatrix cMo; // Pose used in entry (has to be defined), then computed using the tracker. - - //acquire an image - vpImageIo::read(I, "cube.pgm"); // Example of acquisition - - tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker - // load the 3d model, to read .wrl model coin is required, if coin is not installed .cao file can be used. - tracker.loadModel("cube.cao"); - tracker.initFromPose(I, cMo); // initialise the tracker with the given pose. - - while(true){ - // acquire a new image - tracker.track(I); // track the object on this image - tracker.getPose(cMo); // get the pose - } - - return 0; -#endif -} -\endcode - - Finally it can be used not to track an object but just to display a model at -a given pose: - -\code -#include -#include -#include -#include -#include -#include - -int main() -{ -#if defined VISP_HAVE_OPENCV - vpMbEdgeKltTracker tracker; // Create an hybrid model based tracker. - vpImage I; - vpHomogeneousMatrix cMo; // Pose used to display the model. - vpCameraParameters cam; - - // Acquire an image - vpImageIo::read(I, "cube.pgm"); - -#if defined(VISP_HAVE_X11) - vpDisplayX display; - display.init(I,100,100,"Mb Hybrid Tracker"); -#endif - - tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker - tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). - // load the 3d model, to read .wrl model coin is required, if coin is not installed .cao file can be used. - tracker.loadModel("cube.cao"); - - while(true){ - // acquire a new image - // Get the pose using any method - vpDisplay::display(I); - tracker.display(I, cMo, cam, vpColor::darkRed, 1, true); // Display the model at the computed pose. - vpDisplay::flush(I); - } - -#endif - - return 0; -} -\endcode -*/ + * \class vpMbEdgeKltTracker + * \ingroup group_mbt_trackers + * \warning This class is deprecated for user usage. You should rather use the high level + * vpMbGenericTracker class. + * \warning This class is only available if OpenCV is installed, and used. + * + * \brief Hybrid tracker based on moving-edges and keypoints tracked using KLT + * tracker. + * + * The \ref tutorial-tracking-mb-deprecated is a good starting point to use this class. + * + * The tracker requires the knowledge of the 3D model that could be provided in + * a vrml or in a cao file. The cao format is described in loadCAOModel(). It may + * also use an xml file used to tune the behavior of the tracker and an init file + * used to compute the pose at the very first image. + * + * The following code shows the simplest way to use the tracker. The \ref + * tutorial-tracking-mb-deprecated is also a good starting point to use this class. + * + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * #if defined VISP_HAVE_OPENCV + * vpMbEdgeKltTracker tracker; // Create an hybrid model based tracker. + * vpImage I; + * vpHomogeneousMatrix cMo; // Pose computed using the tracker. + * vpCameraParameters cam; + * + * // Acquire an image + * vpImageIo::read(I, "cube.pgm"); + * + * #if defined(VISP_HAVE_X11) + * vpDisplayX display; + * display.init(I,100,100,"Mb Hybrid Tracker"); + * #endif + * + * tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker + * // Load the 3d model in cao format. No 3rd party library is required + * tracker.loadModel("cube.cao"); + * // Get the camera parameters used by the tracker (from the configuration file). + * tracker.getCameraParameters(cam); + * // Initialise manually the pose by clicking on the image points associated to the 3d points contained in the + * // cube.init file. + * tracker.initClick(I, "cube.init"); + * + * while(true){ + * // Acquire a new image + * vpDisplay::display(I); + * tracker.track(I); // Track the object on this image + * tracker.getPose(cMo); // Get the pose + * + * tracker.display(I, cMo, cam, vpColor::darkRed, 1); // Display the model at the computed pose. + * vpDisplay::flush(I); + * } + * + * return 0; + * #endif + * } + * \endcode + * + * The tracker can also be used without display, in that case the initial pose + * must be known (object always at the same initial pose for example) or + * computed using another method: + * + * \code + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * #if defined VISP_HAVE_OPENCV + * vpMbEdgeKltTracker tracker; // Create an hybrid model based tracker. + * vpImage I; + * vpHomogeneousMatrix cMo; // Pose used in entry (has to be defined), then computed using the tracker. + * + * //acquire an image + * vpImageIo::read(I, "cube.pgm"); // Example of acquisition + * + * tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker + * // load the 3d model, to read .wrl model coin is required, if coin is not installed .cao file can be used. + * tracker.loadModel("cube.cao"); + * tracker.initFromPose(I, cMo); // initialise the tracker with the given pose. + * + * while(true){ + * // acquire a new image + * tracker.track(I); // track the object on this image + * tracker.getPose(cMo); // get the pose + * } + * + * return 0; + * #endif + * } + * \endcode + * + * Finally it can be used not to track an object but just to display a model at + * a given pose: + * + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * #if defined VISP_HAVE_OPENCV + * vpMbEdgeKltTracker tracker; // Create an hybrid model based tracker. + * vpImage I; + * vpHomogeneousMatrix cMo; // Pose used to display the model. + * vpCameraParameters cam; + * + * // Acquire an image + * vpImageIo::read(I, "cube.pgm"); + * + * #if defined(VISP_HAVE_X11) + * vpDisplayX display; + * display.init(I,100,100,"Mb Hybrid Tracker"); + * #endif + * + * tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker + * tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). + * // load the 3d model, to read .wrl model coin is required, if coin is not installed .cao file can be used. + * tracker.loadModel("cube.cao"); + * + * while(true){ + * // acquire a new image + * // Get the pose using any method + * vpDisplay::display(I); + * tracker.display(I, cMo, cam, vpColor::darkRed, 1, true); // Display the model at the computed pose. + * vpDisplay::flush(I); + * } + * + * #endif + * + * return 0; + * } + * \endcode + */ class VISP_EXPORT vpMbEdgeKltTracker : #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO) public vpMbKltTracker, @@ -246,11 +244,11 @@ class VISP_EXPORT vpMbEdgeKltTracker : virtual inline vpColVector getRobustWeights() const override { return m_w_hybrid; } /*! - Get the near distance for clipping. - - \return Near clipping value. + * Get the near distance for clipping. + * + * \return Near clipping value. */ - virtual inline double getNearClippingDistance() const { return vpMbKltTracker::getNearClippingDistance(); } + virtual inline double getNearClippingDistance() const override { return vpMbKltTracker::getNearClippingDistance(); } virtual void loadConfigFile(const std::string &configFile, bool verbose = true) override; @@ -261,35 +259,35 @@ class VISP_EXPORT vpMbEdgeKltTracker : virtual void setCameraParameters(const vpCameraParameters &cam) override; /*! - Specify which clipping to use. - - \sa vpMbtPolygonClipping - - \param flags : New clipping flags. + * Specify which clipping to use. + * + * \sa vpMbtPolygonClipping + * + * \param flags : New clipping flags. */ virtual void setClipping(const unsigned int &flags) override { vpMbEdgeTracker::setClipping(flags); } /*! - Set the far distance for clipping. - - \param dist : Far clipping value. + * Set the far distance for clipping. + * + * \param dist : Far clipping value. */ virtual void setFarClippingDistance(const double &dist) override { vpMbEdgeTracker::setFarClippingDistance(dist); } /*! - Set the near distance for clipping. - - \param dist : Near clipping value. + * Set the near distance for clipping. + * + * \param dist : Near clipping value. */ virtual void setNearClippingDistance(const double &dist) override { vpMbEdgeTracker::setNearClippingDistance(dist); } /*! - Use Ogre3D for visibility tests - - \warning This function has to be called before the initialization of the - tracker. - - \param v : True to use it, False otherwise + * Use Ogre3D for visibility tests + * + * \warning This function has to be called before the initialization of the + * tracker. + * + * \param v : True to use it, False otherwise */ virtual void setOgreVisibilityTest(const bool &v) override { @@ -300,10 +298,10 @@ class VISP_EXPORT vpMbEdgeKltTracker : } /*! - Use Scanline algorithm for visibility tests - - \param v : True to use it, False otherwise - */ + * Use Scanline algorithm for visibility tests + * + * \param v : True to use it, False otherwise + */ virtual void setScanLineVisibilityTest(const bool &v) override { vpMbEdgeTracker::setScanLineVisibilityTest(v); @@ -312,6 +310,7 @@ class VISP_EXPORT vpMbEdgeKltTracker : virtual void setPose(const vpImage &I, const vpHomogeneousMatrix &cdMo) override; virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo) override; + /*! * Set if the projection error criteria has to be computed. * diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h index fd8e63dc8d..ca3b07483b 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,12 +29,12 @@ * * Description: * Generic model-based tracker. - * -*****************************************************************************/ + */ + /*! - \file vpMbGenericTracker.h - \brief Generic model-based tracker -*/ + * \file vpMbGenericTracker.h + *\brief Generic model-based tracker + */ #ifndef _vpMbGenericTracker_h_ #define _vpMbGenericTracker_h_ @@ -51,151 +50,150 @@ #endif /*! - \class vpMbGenericTracker - \ingroup group_mbt_trackers - \brief Real-time 6D object pose tracking using its CAD model. - - The tracker requires the knowledge of the 3D model that could be provided in - a vrml or in a cao file. The cao format is described in loadCAOModel(). It may - also use an xml file used to tune the behavior of the tracker and an init file - used to compute the pose at the very first image. - - This class allows tracking an object or a scene given its 3D model. More information in \cite Trinh18a. - A lot of videos can be found on YouTube VispTeam channel. - - \htmlonly - - - - - \endhtmlonly - - The \ref tutorial-tracking-mb-generic is a good starting point to use this - class. If you want to track an object with a stereo camera refer to - \ref tutorial-tracking-mb-generic-stereo. If you want rather use a RGB-D camera and exploit - the depth information, you may see \ref tutorial-tracking-mb-generic-rgbd. - There is also \ref tutorial-detection-object that shows how to initialize the tracker from - an initial pose provided by a detection algorithm. - - JSON serialization - - Since ViSP 3.6.0, if ViSP is build with \ref soft_tool_json 3rd-party we introduce JSON serialization capabilities for vpMbGenericTracker. - The following sample code shows how to save a model-based tracker settings in a file named `mbt.json` - and reload the values from this JSON file. - \code - #include - - int main() - { - #if defined(VISP_HAVE_NLOHMANN_JSON) - std::string filename = "mbt-generic.json"; - { - vpMbGenericTracker mbt; - mbt.saveConfigFile(filename); - } - { - vpMbGenericTracker mbt; - bool verbose = false; - std::cout << "Read model-based tracker settings from " << filename << std::endl; - mbt.loadConfigFile(filename, verbose); - } - #endif - } - \endcode - If you build and execute the sample code, it will produce the following output: - \code{.unparsed} - Read model-based tracker settings from mbt-generic.json - \endcode - - The content of the `mbt.json` file is the following: - \code{.unparsed} - $ cat mbt-generic.json - { - "referenceCameraName": "Camera", - "trackers": { - "Camera": { - "angleAppear": 89.0, - "angleDisappear": 89.0, - "camTref": { - "cols": 4, - "data": [ - 1.0, - 0.0, - 0.0, - 0.0, - 0.0, - 1.0, - 0.0, - 0.0, - 0.0, - 0.0, - 1.0, - 0.0, - 0.0, - 0.0, - 0.0, - 1.0 - ], - "rows": 4, - "type": "vpHomogeneousMatrix" - }, - "camera": { - "model": "perspectiveWithoutDistortion", - "px": 600.0, - "py": 600.0, - "u0": 192.0, - "v0": 144.0 - }, - "clipping": { - "far": 100.0, - "flags": [ - "none" - ], - "near": 0.001 - }, - "display": { - "features": false, - "projectionError": false - }, - "edge": { - "maskSign": 0, - "maskSize": 5, - "minSampleStep": 4.0, - "mu": [ - 0.5, - 0.5 - ], - "nMask": 180, - "ntotalSample": 0, - "pointsToTrack": 500, - "range": 4, - "sampleStep": 10.0, - "strip": 2, - "threshold": 1500.0 - }, - "lod": { - "minLineLengthThresholdGeneral": 50.0, - "minPolygonAreaThresholdGeneral": 2500.0, - "useLod": false - }, - "type": [ - "edge" - ], - "visibilityTest": { - "ogre": false, - "scanline": false - } - } - }, - "version": "1.0" - } - \endcode - -*/ + * \class vpMbGenericTracker + * \ingroup group_mbt_trackers + * \brief Real-time 6D object pose tracking using its CAD model. + * + * The tracker requires the knowledge of the 3D model that could be provided in + * a vrml or in a cao file. The cao format is described in loadCAOModel(). It may + * also use an xml file used to tune the behavior of the tracker and an init file + * used to compute the pose at the very first image. + * + * This class allows tracking an object or a scene given its 3D model. More information in \cite Trinh18a. + * A lot of videos can be found on YouTube VispTeam channel. + * + * \htmlonly + * + * + * + * + * \endhtmlonly + * + * The \ref tutorial-tracking-mb-generic is a good starting point to use this + * class. If you want to track an object with a stereo camera refer to + * \ref tutorial-tracking-mb-generic-stereo. If you want rather use a RGB-D camera and exploit + * the depth information, you may see \ref tutorial-tracking-mb-generic-rgbd. + * There is also \ref tutorial-detection-object that shows how to initialize the tracker from + * an initial pose provided by a detection algorithm. + * + * JSON serialization + * + * Since ViSP 3.6.0, if ViSP is build with \ref soft_tool_json 3rd-party we introduce JSON serialization capabilities for vpMbGenericTracker. + * The following sample code shows how to save a model-based tracker settings in a file named `mbt.json` + * and reload the values from this JSON file. + * \code + * #include + * + * int main() + * { + * #if defined(VISP_HAVE_NLOHMANN_JSON) + * std::string filename = "mbt-generic.json"; + * { + * vpMbGenericTracker mbt; + * mbt.saveConfigFile(filename); + * } + * { + * vpMbGenericTracker mbt; + * bool verbose = false; + * std::cout << "Read model-based tracker settings from " << filename << std::endl; + * mbt.loadConfigFile(filename, verbose); + * } + * #endif + * } + * \endcode + * If you build and execute the sample code, it will produce the following output: + * \code{.unparsed} + * Read model-based tracker settings from mbt-generic.json + * \endcode + * + * The content of the `mbt.json` file is the following: + * \code{.unparsed} + * $ cat mbt-generic.json + * { + * "referenceCameraName": "Camera", + * "trackers": { + * "Camera": { + * "angleAppear": 89.0, + * "angleDisappear": 89.0, + * "camTref": { + * "cols": 4, + * "data": [ + * 1.0, + * 0.0, + * 0.0, + * 0.0, + * 0.0, + * 1.0, + * 0.0, + * 0.0, + * 0.0, + * 0.0, + * 1.0, + * 0.0, + * 0.0, + * 0.0, + * 0.0, + * 1.0 + * ], + * "rows": 4, + * "type": "vpHomogeneousMatrix" + * }, + * "camera": { + * "model": "perspectiveWithoutDistortion", + * "px": 600.0, + * "py": 600.0, + * "u0": 192.0, + * "v0": 144.0 + * }, + * "clipping": { + * "far": 100.0, + * "flags": [ + * "none" + * ], + * "near": 0.001 + * }, + * "display": { + * "features": false, + * "projectionError": false + * }, + * "edge": { + * "maskSign": 0, + * "maskSize": 5, + * "minSampleStep": 4.0, + * "mu": [ + * 0.5, + * 0.5 + * ], + * "nMask": 180, + * "ntotalSample": 0, + * "pointsToTrack": 500, + * "range": 4, + * "sampleStep": 10.0, + * "strip": 2, + * "threshold": 1500.0 + * }, + * "lod": { + * "minLineLengthThresholdGeneral": 50.0, + * "minPolygonAreaThresholdGeneral": 2500.0, + * "useLod": false + * }, + * "type": [ + * "edge" + * ], + * "visibilityTest": { + * "ogre": false, + * "scanline": false + * } + * } + * }, + * "version": "1.0" + * } + * \endcode + */ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker { public: @@ -319,16 +317,19 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker * Return the number of depth dense features taken into account in the virtual visual-servoing scheme. */ virtual inline unsigned int getNbFeaturesDepthDense() const { return m_nb_feat_depthDense; } + /*! * Return the number of depth normal features features taken into account in the virtual visual-servoing scheme. */ virtual inline unsigned int getNbFeaturesDepthNormal() const { return m_nb_feat_depthNormal; } + /*! * Return the number of moving-edges features taken into account in the virtual visual-servoing scheme. * * This function is similar to getNbPoints(). */ virtual inline unsigned int getNbFeaturesEdge() const { return m_nb_feat_edge; } + /*! * Return the number of klt keypoints features taken into account in the virtual visual-servoing scheme. */ @@ -849,7 +850,7 @@ NLOHMANN_JSON_SERIALIZE_ENUM(vpMbGenericTracker::vpTrackerType, { * @brief Serialize a tracker wrapper's settings into a JSON representation. * \sa from_json for more details on what is serialized * @param j The modified json object. -* @param t The tracker to serialize. +* @param t The tracker to serialize. */ inline void to_json(nlohmann::json &j, const vpMbGenericTracker::TrackerWrapper &t) { @@ -1047,5 +1048,4 @@ inline void from_json(const nlohmann::json &j, vpMbGenericTracker::TrackerWrappe #endif - #endif diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h index 87b99217b2..448f7b4bc3 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,12 +29,12 @@ * * Description: * Model based tracker using only KLT - * -*****************************************************************************/ + */ + /*! - \file vpMbKltTracker.h - \brief Model based tracker using only KLT -*/ + * \file vpMbKltTracker.h + * \brief Model based tracker using only KLT + */ #ifndef _vpMbKltTracker_h_ #define _vpMbKltTracker_h_ @@ -57,152 +56,152 @@ #include /*! - \class vpMbKltTracker - \ingroup group_mbt_trackers - \warning This class is deprecated for user usage. You should rather use the high level - vpMbGenericTracker class. - \warning This class is only available if OpenCV is installed, and used. - - \brief Model based tracker using only KLT. - - The \ref tutorial-tracking-mb-deprecated is a good starting point to use this class. - - The tracker requires the knowledge of the 3D model that could be provided in - a vrml or in a cao file. The cao format is described in loadCAOModel(). It may - also use an xml file used to tune the behavior of the tracker and an init file - used to compute the pose at the very first image. - - The following code shows the simplest way to use the tracker. The \ref - tutorial-tracking-mb-deprecated is also a good starting point to use this class. - -\code -#include -#include -#include -#include -#include -#include - -int main() -{ -#if defined VISP_HAVE_OPENCV - vpMbKltTracker tracker; // Create a model based tracker via KLT points. - vpImage I; - vpHomogeneousMatrix cMo; // Pose computed using the tracker. - vpCameraParameters cam; - - // Acquire an image - vpImageIo::read(I, "cube.pgm"); - -#if defined(VISP_HAVE_X11) - vpDisplayX display; - display.init(I,100,100,"Mb Klt Tracker"); -#endif - - tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker - tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). - tracker.loadModel("cube.cao"); // Load the 3d model in cao format. No 3rd party library is required - // Initialise manually the pose by clicking on the image points associated to the 3d points contained in the - // cube.init file. - tracker.initClick(I, "cube.init"); - - while(true){ - // Acquire a new image - vpDisplay::display(I); - tracker.track(I); // Track the object on this image - tracker.getPose(cMo); // Get the pose - - tracker.display(I, cMo, cam, vpColor::darkRed, 1); // Display the model at the computed pose. - vpDisplay::flush(I); - } - - return 0; -#endif -} -\endcode - - The tracker can also be used without display, in that case the initial pose - must be known (object always at the same initial pose for example) or -computed using another method: - -\code -#include -#include -#include -#include -#include - -int main() -{ -#if defined VISP_HAVE_OPENCV - vpMbKltTracker tracker; // Create a model based tracker via Klt Points. - vpImage I; - vpHomogeneousMatrix cMo; // Pose used in entry (has to be defined), then computed using the tracker. - - //acquire an image - vpImageIo::read(I, "cube.pgm"); // Example of acquisition - - tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker - // load the 3d model, to read .wrl model coin is required, if coin is not installed .cao file can be used. - tracker.loadModel("cube.cao"); - tracker.initFromPose(I, cMo); // initialize the tracker with the given pose. - - while(true){ - // acquire a new image - tracker.track(I); // track the object on this image - tracker.getPose(cMo); // get the pose - } - - return 0; -#endif -} -\endcode - - Finally it can be used not to track an object but just to display a model at -a given pose: - -\code -#include -#include -#include -#include -#include -#include - -int main() -{ -#if defined VISP_HAVE_OPENCV - vpMbKltTracker tracker; // Create a model based tracker via Klt Points. - vpImage I; - vpHomogeneousMatrix cMo; // Pose used to display the model. - vpCameraParameters cam; - - // Acquire an image - vpImageIo::read(I, "cube.pgm"); - -#if defined(VISP_HAVE_X11) - vpDisplayX display; - display.init(I,100,100,"Mb Klt Tracker"); -#endif - - tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker - tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). - // load the 3d model, to read .wrl model coin is required, if coin is not installed .cao file can be used. - tracker.loadModel("cube.cao"); - - while(true){ - // acquire a new image - // Get the pose using any method - vpDisplay::display(I); - tracker.display(I, cMo, cam, vpColor::darkRed, 1, true); // Display the model at the computed pose. - vpDisplay::flush(I); - } - - return 0; -#endif -} -\endcode -*/ + * \class vpMbKltTracker + * \ingroup group_mbt_trackers + * \warning This class is deprecated for user usage. You should rather use the high level + * vpMbGenericTracker class. + * \warning This class is only available if OpenCV is installed, and used. + * + * \brief Model based tracker using only KLT. + * + * The \ref tutorial-tracking-mb-deprecated is a good starting point to use this class. + * + * The tracker requires the knowledge of the 3D model that could be provided in + * a vrml or in a cao file. The cao format is described in loadCAOModel(). It may + * also use an xml file used to tune the behavior of the tracker and an init file + * used to compute the pose at the very first image. + * + * The following code shows the simplest way to use the tracker. The \ref + * tutorial-tracking-mb-deprecated is also a good starting point to use this class. + * + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * #if defined VISP_HAVE_OPENCV + * vpMbKltTracker tracker; // Create a model based tracker via KLT points. + * vpImage I; + * vpHomogeneousMatrix cMo; // Pose computed using the tracker. + * vpCameraParameters cam; + * + * // Acquire an image + * vpImageIo::read(I, "cube.pgm"); + * + * #if defined(VISP_HAVE_X11) + * vpDisplayX display; + * display.init(I,100,100,"Mb Klt Tracker"); + * #endif + * + * tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker + * tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). + * tracker.loadModel("cube.cao"); // Load the 3d model in cao format. No 3rd party library is required + * // Initialise manually the pose by clicking on the image points associated to the 3d points contained in the + * // cube.init file. + * tracker.initClick(I, "cube.init"); + * + * while(true){ + * // Acquire a new image + * vpDisplay::display(I); + * tracker.track(I); // Track the object on this image + * tracker.getPose(cMo); // Get the pose + * + * tracker.display(I, cMo, cam, vpColor::darkRed, 1); // Display the model at the computed pose. + * vpDisplay::flush(I); + * } + * + * return 0; + * #endif + * } + * \endcode + * + * The tracker can also be used without display, in that case the initial pose + * must be known (object always at the same initial pose for example) or + * computed using another method: + * + * \code + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * #if defined VISP_HAVE_OPENCV + * vpMbKltTracker tracker; // Create a model based tracker via Klt Points. + * vpImage I; + * vpHomogeneousMatrix cMo; // Pose used in entry (has to be defined), then computed using the tracker. + * + * //acquire an image + * vpImageIo::read(I, "cube.pgm"); // Example of acquisition + * + * tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker + * // load the 3d model, to read .wrl model coin is required, if coin is not installed .cao file can be used. + * tracker.loadModel("cube.cao"); + * tracker.initFromPose(I, cMo); // initialize the tracker with the given pose. + * + * while(true){ + * // acquire a new image + * tracker.track(I); // track the object on this image + * tracker.getPose(cMo); // get the pose + * } + * + * return 0; + * #endif + * } + * \endcode + * + * Finally it can be used not to track an object but just to display a model at + * a given pose: + * + * \code + * #include + * #include + * #include + * #include + * #include + * #include + * + * int main() + * { + * #if defined VISP_HAVE_OPENCV + * vpMbKltTracker tracker; // Create a model based tracker via Klt Points. + * vpImage I; + * vpHomogeneousMatrix cMo; // Pose used to display the model. + * vpCameraParameters cam; + * + * // Acquire an image + * vpImageIo::read(I, "cube.pgm"); + * + * #if defined(VISP_HAVE_X11) + * vpDisplayX display; + * display.init(I,100,100,"Mb Klt Tracker"); + * #endif + * + * tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker + * tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). + * // load the 3d model, to read .wrl model coin is required, if coin is not installed .cao file can be used. + * tracker.loadModel("cube.cao"); + * + * while(true){ + * // acquire a new image + * // Get the pose using any method + * vpDisplay::display(I); + * tracker.display(I, cMo, cam, vpColor::darkRed, 1, true); // Display the model at the computed pose. + * vpDisplay::flush(I); + * } + * + * return 0; + * #endif + * } + * \endcode + */ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker { protected: @@ -269,11 +268,11 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker /*! Return the address of the Klt feature list. */ virtual std::list &getFeaturesKlt() { return kltPolygons; } -/*! - Get the current list of KLT points. - - \return the list of KLT points through vpKltOpencv. - */ + /*! + * Get the current list of KLT points. + * + * \return the list of KLT points through vpKltOpencv. + */ inline std::vector getKltPoints() const { return tracker.getFeatures(); } std::vector getKltImagePoints() const; @@ -281,31 +280,31 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker std::map getKltImagePointsWithId() const; /*! - Get the klt tracker at the current state. - - \return klt tracker. + * Get the klt tracker at the current state. + * + * \return klt tracker. */ inline vpKltOpencv getKltOpencv() const { return tracker; } /*! - Get the erosion of the mask used on the Model faces. - - \return The erosion. + * Get the erosion of the mask used on the Model faces. + * + * \return The erosion. */ inline unsigned int getKltMaskBorder() const { return maskBorder; } /*! - Get the current number of klt points. - - \return the number of features + * Get the current number of klt points. + * + * \return the number of features */ inline int getKltNbPoints() const { return tracker.getNbFeatures(); } /*! - Get the threshold for the acceptation of a point. - - \return threshold_outlier : Threshold for the weight below which a point - is rejected. + * Get the threshold for the acceptation of a point. + * + * \return threshold_outlier : Threshold for the weight below which a point + * is rejected. */ inline double getKltThresholdAcceptation() const { return threshold_outlier; } @@ -321,15 +320,15 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker virtual void loadConfigFile(const std::string &configFile, bool verbose = true) override; virtual void reInitModel(const vpImage &I, const std::string &cad_name, const vpHomogeneousMatrix &cMo, - bool verbose = false, const vpHomogeneousMatrix &T = vpHomogeneousMatrix()) override; + bool verbose = false, const vpHomogeneousMatrix &T = vpHomogeneousMatrix()); void resetTracker() override; void setCameraParameters(const vpCameraParameters &cam) override; /*! - Set the erosion of the mask used on the Model faces. - - \param e : The desired erosion. + * Set the erosion of the mask used on the Model faces. + * + * \param e : The desired erosion. */ inline void setKltMaskBorder(const unsigned int &e) { @@ -341,19 +340,19 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker virtual void setKltOpencv(const vpKltOpencv &t); /*! - Set the threshold for the acceptation of a point. - - \param th : Threshold for the weight below which a point is rejected. + * Set the threshold for the acceptation of a point. + * + * \param th : Threshold for the weight below which a point is rejected. */ inline void setKltThresholdAcceptation(double th) { threshold_outlier = th; } /*! - Use Ogre3D for visibility tests - - \warning This function has to be called before the initialization of the - tracker. - - \param v : True to use it, False otherwise + * Use Ogre3D for visibility tests + * + * \warning This function has to be called before the initialization of the + * tracker. + * + * \param v : True to use it, False otherwise */ virtual void setOgreVisibilityTest(const bool &v) override { @@ -364,9 +363,9 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker } /*! - Use Scanline algorithm for visibility tests - - \param v : True to use it, False otherwise + * Use Scanline algorithm for visibility tests + * + * \param v : True to use it, False otherwise */ virtual void setScanLineVisibilityTest(const bool &v) override { @@ -380,11 +379,11 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker virtual void setPose(const vpImage &I_color, const vpHomogeneousMatrix &cdMo) override; /*! - Set if the projection error criteria has to be computed. - - \param flag : True if the projection error criteria has to be computed, - false otherwise - */ + * Set if the projection error criteria has to be computed. + * + * \param flag : True if the projection error criteria has to be computed, + * false otherwise + */ virtual void setProjectionErrorComputation(const bool &flag) override { if (flag) @@ -405,32 +404,34 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker //@{ /*! - Get the erosion of the mask used on the Model faces. - \deprecated Use rather getkltMaskBorder() - - \return The erosion. + * Get the erosion of the mask used on the Model faces. + * \deprecated Use rather getkltMaskBorder() + * + * \return The erosion. */ /* vp_deprecated */ inline unsigned int getMaskBorder() const { return maskBorder; } - /*! - Get the current number of klt points. - \deprecated Use rather getKltNbPoints() - \return the number of features + /*! + * Get the current number of klt points. + * \deprecated Use rather getKltNbPoints() + * + * \return the number of features */ /* vp_deprecated */ inline int getNbKltPoints() const { return tracker.getNbFeatures(); } /*! - Get the threshold for the acceptation of a point. - \deprecated Use rather getKltThresholdAcceptation() - - \return threshold_outlier : Threshold for the weight below which a point - is rejected. + * Get the threshold for the acceptation of a point. + * \deprecated Use rather getKltThresholdAcceptation() + * + * \return threshold_outlier : Threshold for the weight below which a point + * is rejected. */ /* vp_deprecated */ inline double getThresholdAcceptation() const { return threshold_outlier; } - /*! - Set the erosion of the mask used on the Model faces. - \param e : The desired erosion. + /*! + * Set the erosion of the mask used on the Model faces. + * + * \param e : The desired erosion. */ /* vp_deprecated */ inline void setMaskBorder(const unsigned int &e) { @@ -440,10 +441,10 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker } /*! - Set the threshold for the acceptation of a point. - \deprecated Use rather setKltThresholdAcceptation() - - \param th : Threshold for the weight below which a point is rejected. + * Set the threshold for the acceptation of a point. + * \deprecated Use rather setKltThresholdAcceptation() + * + * \param th : Threshold for the weight below which a point is rejected. */ /* vp_deprecated */ inline void setThresholdAcceptation(double th) { threshold_outlier = th; } From 0e5611bb0e19d0190c33877045410f79bf8c3ee6 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Tue, 24 Oct 2023 22:23:32 +0200 Subject: [PATCH 04/14] Fix doxygen documentation warnings --- .../include/visp3/mbt/vpMbGenericTracker.h | 2 +- .../vision/include/visp3/vision/vpKeyPoint.h | 1042 ++++++++--------- modules/vision/src/key-point/vpKeyPoint.cpp | 6 +- .../visual_features/vpFeatureMomentCentered.h | 13 +- .../vpFeatureMomentCentered.cpp | 4 +- 5 files changed, 531 insertions(+), 536 deletions(-) diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h index ca3b07483b..b576899404 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h @@ -783,7 +783,7 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker using vpMbDepthDenseTracker::setPose; #endif virtual void setPose(const vpImage *const I, const vpImage *const I_color, - const vpHomogeneousMatrix &cdMo); + const vpHomogeneousMatrix &cdMo) override; }; #ifdef VISP_HAVE_NLOHMANN_JSON friend void to_json(nlohmann::json &j, const TrackerWrapper &t); diff --git a/modules/vision/include/visp3/vision/vpKeyPoint.h b/modules/vision/include/visp3/vision/vpKeyPoint.h index 68c907dd6f..e92de87162 100644 --- a/modules/vision/include/visp3/vision/vpKeyPoint.h +++ b/modules/vision/include/visp3/vision/vpKeyPoint.h @@ -369,40 +369,40 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint unsigned int width); /*! - Build the reference keypoints list in a region of interest in the image. + * Build the reference keypoints list in a region of interest in the image. * - \param I : Input image. - \param rectangle : Rectangle of the region of interest. - \return The number of detected keypoints in the current image I. + * \param I : Input image. + * \param rectangle : Rectangle of the region of interest. + * \return The number of detected keypoints in the current image I. */ unsigned int buildReference(const vpImage &I, const vpRect &rectangle); /*! - Build the reference keypoints list and compute the 3D position - corresponding of the keypoints locations. + * Build the reference keypoints list and compute the 3D position + * corresponding of the keypoints locations. * - \param I : Input image. - \param trainKeyPoints : List of the train keypoints. - \param points3f : Output list of the 3D position corresponding of the keypoints locations. - \param append : If true, append the supply train keypoints with those already present. - \param class_id : The class id to be set to the input cv::KeyPoint if != -1. - \return The number of detected keypoints in the current image I. + * \param I : Input image. + * \param trainKeyPoints : List of the train keypoints. + * \param points3f : Output list of the 3D position corresponding of the keypoints locations. + * \param append : If true, append the supply train keypoints with those already present. + * \param class_id : The class id to be set to the input cv::KeyPoint if != -1. + * \return The number of detected keypoints in the current image I. */ unsigned int buildReference(const vpImage &I, std::vector &trainKeyPoints, std::vector &points3f, bool append = false, int class_id = -1); /*! - Build the reference keypoints list and compute the 3D position - corresponding of the keypoints locations. + * Build the reference keypoints list and compute the 3D position + * corresponding of the keypoints locations. * - \param I : Input image. - \param trainKeyPoints : List of the train keypoints. - \param points3f : List of the 3D position corresponding of the keypoints locations. - \param trainDescriptors : List of the train descriptors. - \param append : If true, append the supply train keypoints with those already present. - \param class_id : The class id to be set to the input cv::KeyPoint if != -1. + * \param I : Input image. + * \param trainKeyPoints : List of the train keypoints. + * \param points3f : List of the 3D position corresponding of the keypoints locations. + * \param trainDescriptors : List of the train descriptors. + * \param append : If true, append the supply train keypoints with those already present. + * \param class_id : The class id to be set to the input cv::KeyPoint if != -1. * - \return The number of keypoints in the current image I. + * \return The number of keypoints in the current image I. */ unsigned int buildReference(const vpImage &I, const std::vector &trainKeyPoints, const cv::Mat &trainDescriptors, const std::vector &points3f, @@ -417,107 +417,107 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint unsigned int buildReference(const vpImage &I_color); /*! - Build the reference keypoints list in a region of interest in the image. + * Build the reference keypoints list in a region of interest in the image. * - \param I_color : Input reference image. - \param iP : Position of the top-left corner of the region of interest. - \param height : Height of the region of interest. - \param width : Width of the region of interest. - \return The number of detected keypoints in the current image I. + * \param I_color : Input reference image. + * \param iP : Position of the top-left corner of the region of interest. + * \param height : Height of the region of interest. + * \param width : Width of the region of interest. + * \return The number of detected keypoints in the current image I. */ unsigned int buildReference(const vpImage &I_color, const vpImagePoint &iP, unsigned int height, unsigned int width); /*! - Build the reference keypoints list in a region of interest in the image. + * Build the reference keypoints list in a region of interest in the image. * - \param I_color : Input image. - \param rectangle : Rectangle of the region of interest. - \return The number of detected keypoints in the current image I. + * \param I_color : Input image. + * \param rectangle : Rectangle of the region of interest. + * \return The number of detected keypoints in the current image I. */ unsigned int buildReference(const vpImage &I_color, const vpRect &rectangle); /*! - Build the reference keypoints list and compute the 3D position - corresponding of the keypoints locations. + * Build the reference keypoints list and compute the 3D position + * corresponding of the keypoints locations. * - \param I_color : Input image. - \param trainKeyPoints : List of the train keypoints. - \param points3f : Output list of the 3D position corresponding of the keypoints locations. - \param append : If true, append the supply train keypoints with those already present. - \param class_id : The class id to be set to the input cv::KeyPoint if != -1. - \return The number of detected keypoints in the current image I. + * \param I_color : Input image. + * \param trainKeyPoints : List of the train keypoints. + * \param points3f : Output list of the 3D position corresponding of the keypoints locations. + * \param append : If true, append the supply train keypoints with those already present. + * \param class_id : The class id to be set to the input cv::KeyPoint if != -1. + * \return The number of detected keypoints in the current image I. */ unsigned int buildReference(const vpImage &I_color, std::vector &trainKeyPoints, std::vector &points3f, bool append = false, int class_id = -1); /*! - Build the reference keypoints list and compute the 3D position - corresponding of the keypoints locations. + * Build the reference keypoints list and compute the 3D position + * corresponding of the keypoints locations. * - \param I_color : Input image. - \param trainKeyPoints : List of the train keypoints. - \param points3f : List of the 3D position corresponding of the keypoints locations. - \param trainDescriptors : List of the train descriptors. - \param append : If true, append the supply train keypoints with those already present. - \param class_id : The class id to be set to the input cv::KeyPoint if != -1. - \return The number of detected keypoints in the current image I. + * \param I_color : Input image. + * \param trainKeyPoints : List of the train keypoints. + * \param points3f : List of the 3D position corresponding of the keypoints locations. + * \param trainDescriptors : List of the train descriptors. + * \param append : If true, append the supply train keypoints with those already present. + * \param class_id : The class id to be set to the input cv::KeyPoint if != -1. + * \return The number of detected keypoints in the current image I. */ - unsigned int buildReference(const vpImage &I, const std::vector &trainKeyPoints, + unsigned int buildReference(const vpImage &I_color, const std::vector &trainKeyPoints, const cv::Mat &trainDescriptors, const std::vector &points3f, bool append = false, int class_id = -1); /*! - Compute the 3D coordinate in the world/object frame given the 2D image - coordinate and under the assumption that the point is located on a plane - whose the plane equation is known in the camera frame. - The Z-coordinate is retrieved according to the proportional relationship - between the plane equation expressed in the normalized camera frame - (derived from the image coordinate) and the same plane equation expressed - in the camera frame. + * Compute the 3D coordinate in the world/object frame given the 2D image + * coordinate and under the assumption that the point is located on a plane + * whose the plane equation is known in the camera frame. + * The Z-coordinate is retrieved according to the proportional relationship + * between the plane equation expressed in the normalized camera frame + * (derived from the image coordinate) and the same plane equation expressed + * in the camera frame. * - \param candidate : Keypoint we want to compute the 3D coordinate. - \param roi : List of 3D points in the camera frame representing a planar face. - \param cam : Camera parameters. - \param cMo : Homogeneous matrix between the world and the camera frames. - \param point : 3D coordinate in the world/object frame computed. + * \param candidate : Keypoint we want to compute the 3D coordinate. + * \param roi : List of 3D points in the camera frame representing a planar face. + * \param cam : Camera parameters. + * \param cMo : Homogeneous matrix between the world and the camera frames. + * \param point : 3D coordinate in the world/object frame computed. */ static void compute3D(const cv::KeyPoint &candidate, const std::vector &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point); /*! - Compute the 3D coordinate in the world/object frame given the 2D image - coordinate and under the assumption that the point is located on a plane - whose the plane equation is known in the camera frame. - The Z-coordinate is retrieved according to the proportional relationship - between the plane equation expressed in the normalized camera frame - (derived from the image coordinate) and the same plane equation expressed - in the camera frame. + * Compute the 3D coordinate in the world/object frame given the 2D image + * coordinate and under the assumption that the point is located on a plane + * whose the plane equation is known in the camera frame. + * The Z-coordinate is retrieved according to the proportional relationship + * between the plane equation expressed in the normalized camera frame + * (derived from the image coordinate) and the same plane equation expressed + * in the camera frame. * - \param candidate : vpImagePoint we want to compute the 3D coordinate. - \param roi : List of 3D points in the camera frame representing a planar face. - \param cam : Camera parameters. - \param cMo : Homogeneous matrix between the world and the camera frames. - \param point : 3D coordinate in the world/object frame computed. + * \param candidate : vpImagePoint we want to compute the 3D coordinate. + * \param roi : List of 3D points in the camera frame representing a planar face. + * \param cam : Camera parameters. + * \param cMo : Homogeneous matrix between the world and the camera frames. + * \param point : 3D coordinate in the world/object frame computed. */ static void compute3D(const vpImagePoint &candidate, const std::vector &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, vpPoint &point); /*! - Keep only keypoints located on faces and compute for those keypoints the 3D - coordinate in the world/object frame given the 2D image coordinate and - under the assumption that the point is located on a plane. + * Keep only keypoints located on faces and compute for those keypoints the 3D + * coordinate in the world/object frame given the 2D image coordinate and + * under the assumption that the point is located on a plane. * - \param cMo : Homogeneous matrix between the world and the camera frames. - \param cam : Camera parameters. - \param candidates : In input, list of keypoints detected in the whole - image, in output, list of keypoints only located on planes. - \param polygons : List of 2D polygons representing the projection of the faces in - the image plane. - \param roisPt : List of faces, with the 3D coordinates known in the camera frame. - \param points : Output list of computed 3D coordinates (in - the world/object frame) of keypoints located only on faces. - \param descriptors : Optional parameter, pointer to the descriptors to filter. + * \param cMo : Homogeneous matrix between the world and the camera frames. + * \param cam : Camera parameters. + * \param candidates : In input, list of keypoints detected in the whole + * image, in output, list of keypoints only located on planes. + * \param polygons : List of 2D polygons representing the projection of the faces in + * the image plane. + * \param roisPt : List of faces, with the 3D coordinates known in the camera frame. + * \param points : Output list of computed 3D coordinates (in + * the world/object frame) of keypoints located only on faces. + * \param descriptors : Optional parameter, pointer to the descriptors to filter. */ static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector &candidates, @@ -526,20 +526,20 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint std::vector &points, cv::Mat *descriptors = NULL); /*! - Keep only keypoints located on faces and compute for those keypoints the 3D - coordinate in the world/object frame given the 2D image coordinate and - under the assumption that the point is located on a plane. + * Keep only keypoints located on faces and compute for those keypoints the 3D + * coordinate in the world/object frame given the 2D image coordinate and + * under the assumption that the point is located on a plane. * - \param cMo : Homogeneous matrix between the world and the camera frames. - \param cam : Camera parameters. - \param candidates : In input, list of vpImagePoint located in the whole - image, in output, list of vpImagePoint only located on planes. - \param polygons : List of 2D polygons representing the projection of the faces in - the image plane. - \param roisPt : List of faces, with the 3D coordinates known in the camera frame. - \param points : Output list of computed 3D coordinates (in the world/object frame) - of vpImagePoint located only on faces. - \param descriptors : Optional parameter, pointer to the descriptors to filter. + * \param cMo : Homogeneous matrix between the world and the camera frames. + * \param cam : Camera parameters. + * \param candidates : In input, list of vpImagePoint located in the whole + * image, in output, list of vpImagePoint only located on planes. + * \param polygons : List of 2D polygons representing the projection of the faces in + * the image plane. + * \param roisPt : List of faces, with the 3D coordinates known in the camera frame. + * \param points : Output list of computed 3D coordinates (in the world/object frame) + * of vpImagePoint located only on faces. + * \param descriptors : Optional parameter, pointer to the descriptors to filter. */ static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector &candidates, @@ -548,19 +548,19 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint std::vector &points, cv::Mat *descriptors = NULL); /*! - Keep only keypoints located on cylinders and compute the 3D coordinates in - the world/object frame given the 2D image coordinates. + * Keep only keypoints located on cylinders and compute the 3D coordinates in + * the world/object frame given the 2D image coordinates. * - \param cMo : Homogeneous matrix between the world and the camera frames. - \param cam : Camera parameters. - \param candidates : In input, list of keypoints detected in the whole - image, in output, list of keypoints only located on cylinders. - \param cylinders : List of vpCylinder corresponding of the cylinder objects in the - scene, projected in the camera frame. - \param vectorOfCylinderRois : For each cylinder, the corresponding list of bounding box. - \param points : Output list of computed 3D coordinates in the world/object frame for each - keypoint located on a cylinder. - \param descriptors : Optional parameter, pointer to the descriptors to filter. + * \param cMo : Homogeneous matrix between the world and the camera frames. + * \param cam : Camera parameters. + * \param candidates : In input, list of keypoints detected in the whole + * image, in output, list of keypoints only located on cylinders. + * \param cylinders : List of vpCylinder corresponding of the cylinder objects in the + * scene, projected in the camera frame. + * \param vectorOfCylinderRois : For each cylinder, the corresponding list of bounding box. + * \param points : Output list of computed 3D coordinates in the world/object frame for each + * keypoint located on a cylinder. + * \param descriptors : Optional parameter, pointer to the descriptors to filter. */ static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, @@ -569,19 +569,19 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint std::vector &points, cv::Mat *descriptors = NULL); /*! - Keep only vpImagePoint located on cylinders and compute the 3D coordinates - in the world/object frame given the 2D image coordinates. + * Keep only vpImagePoint located on cylinders and compute the 3D coordinates + * in the world/object frame given the 2D image coordinates. * - \param cMo : Homogeneous matrix between the world and the camera frames. - \param cam : Camera parameters. - \param candidates : In input, list of vpImagePoint located in the image, in - output, list of vpImagePoint only located on cylinders. - \param cylinders : List of vpCylinder corresponding of the cylinder objects in the scene, - projected in the camera frame. - \param vectorOfCylinderRois : For each cylinder, the corresponding list of bounding box. - \param points : Output list of computed 3D coordinates in the world/object frame for each - vpImagePoint located on a cylinder. - \param descriptors : Optional parameter, pointer to the descriptors to filter. + * \param cMo : Homogeneous matrix between the world and the camera frames. + * \param cam : Camera parameters. + * \param candidates : In input, list of vpImagePoint located in the image, in + * output, list of vpImagePoint only located on cylinders. + * \param cylinders : List of vpCylinder corresponding of the cylinder objects in the scene, + * projected in the camera frame. + * \param vectorOfCylinderRois : For each cylinder, the corresponding list of bounding box. + * \param points : Output list of computed 3D coordinates in the world/object frame for each + * vpImagePoint located on a cylinder. + * \param descriptors : Optional parameter, pointer to the descriptors to filter. */ static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, @@ -590,361 +590,361 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint std::vector &points, cv::Mat *descriptors = NULL); /*! - Compute the pose using the correspondence between 2D points and 3D points - using OpenCV function with RANSAC method. + * Compute the pose using the correspondence between 2D points and 3D points + * using OpenCV function with RANSAC method. * - \param imagePoints : List of 2D points corresponding to the location of the detected keypoints. - \param objectPoints : List of the 3D points in the object frame matched. - \param cam : Camera parameters. - \param cMo : Homogeneous matrix between the object frame and the camera frame. - \param inlierIndex : List of indexes of inliers. - \param elapsedTime : Elapsed time. - \param func : Function pointer to filter the final pose returned by OpenCV pose estimation method. - \return True if the pose has been computed, false otherwise (not enough points, or size list mismatch). - */ + * \param imagePoints : List of 2D points corresponding to the location of the detected keypoints. + * \param objectPoints : List of the 3D points in the object frame matched. + * \param cam : Camera parameters. + * \param cMo : Homogeneous matrix between the object frame and the camera frame. + * \param inlierIndex : List of indexes of inliers. + * \param elapsedTime : Elapsed time. + * \param func : Function pointer to filter the final pose returned by OpenCV pose estimation method. + * \return True if the pose has been computed, false otherwise (not enough points, or size list mismatch). + */ bool computePose(const std::vector &imagePoints, const std::vector &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector &inlierIndex, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL); /*! - Compute the pose using the correspondence between 2D points and 3D points - using ViSP function with RANSAC method. + * Compute the pose using the correspondence between 2D points and 3D points + * using ViSP function with RANSAC method. * - \param objectVpPoints : List of vpPoint with coordinates expressed in the object and in the camera frame. - \param cMo : Homogeneous matrix between the object frame and the camera frame. - \param inliers : List of inliers. - \param elapsedTime : Elapsed time. - \param func : Function pointer to filter the pose in Ransac pose estimation, if we want - to eliminate the poses which do not respect some criterion - \return True if the pose has been computed, false otherwise (not enough points, or size list mismatch). + * \param objectVpPoints : List of vpPoint with coordinates expressed in the object and in the camera frame. + * \param cMo : Homogeneous matrix between the object frame and the camera frame. + * \param inliers : List of inliers. + * \param elapsedTime : Elapsed time. + * \param func : Function pointer to filter the pose in Ransac pose estimation, if we want + * to eliminate the poses which do not respect some criterion + * \return True if the pose has been computed, false otherwise (not enough points, or size list mismatch). */ bool computePose(const std::vector &objectVpPoints, vpHomogeneousMatrix &cMo, std::vector &inliers, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL); /*! - Compute the pose using the correspondence between 2D points and 3D points - using ViSP function with RANSAC method. + * Compute the pose using the correspondence between 2D points and 3D points + * using ViSP function with RANSAC method. * - \param objectVpPoints : List of vpPoint with coordinates expressed in the object and in the camera frame. - \param cMo : Homogeneous matrix between the object frame and the camera frame. - \param inliers : List of inlier points. - \param inlierIndex : List of inlier index. - \param elapsedTime : Elapsed time. - \return True if the pose has been computed, false otherwise (not enough points, or size list mismatch). - \param func : Function pointer to filter the pose in Ransac pose estimation, if we want to eliminate the poses which - do not respect some criterion + * \param objectVpPoints : List of vpPoint with coordinates expressed in the object and in the camera frame. + * \param cMo : Homogeneous matrix between the object frame and the camera frame. + * \param inliers : List of inlier points. + * \param inlierIndex : List of inlier index. + * \param elapsedTime : Elapsed time. + * \return True if the pose has been computed, false otherwise (not enough points, or size list mismatch). + * \param func : Function pointer to filter the pose in Ransac pose estimation, if we want to eliminate the poses which + * do not respect some criterion */ bool computePose(const std::vector &objectVpPoints, vpHomogeneousMatrix &cMo, std::vector &inliers, std::vector &inlierIndex, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL); /*! - Initialize the size of the matching image (case with a matching side by - side between IRef and ICurrent). + * Initialize the size of the matching image (case with a matching side by + * side between IRef and ICurrent). * - \param IRef : Reference image. - \param ICurrent : Current image. - \param IMatching : Image matching. + * \param IRef : Reference image. + * \param ICurrent : Current image. + * \param IMatching : Image matching. */ void createImageMatching(vpImage &IRef, vpImage &ICurrent, vpImage &IMatching); /*! - Initialize the size of the matching image with appropriate size according - to the number of training images. Used to display the matching of keypoints - detected in the current image with those detected in multiple training - images. + * Initialize the size of the matching image with appropriate size according + * to the number of training images. Used to display the matching of keypoints + * detected in the current image with those detected in multiple training + * images. * - \param ICurrent : Current image. - \param IMatching : Image initialized with appropriate size. + * \param ICurrent : Current image. + * \param IMatching : Image initialized with appropriate size. */ void createImageMatching(vpImage &ICurrent, vpImage &IMatching); /*! - Initialize the size of the matching image (case with a matching side by - side between IRef and ICurrent). + * Initialize the size of the matching image (case with a matching side by + * side between IRef and ICurrent). * - \param IRef : Reference image. - \param ICurrent : Current image. - \param IMatching : Image matching. + * \param IRef : Reference image. + * \param ICurrent : Current image. + * \param IMatching : Image matching. */ void createImageMatching(vpImage &IRef, vpImage &ICurrent, vpImage &IMatching); /*! - Initialize the size of the matching image with appropriate size according - to the number of training images. Used to display the matching of keypoints - detected in the current image with those detected in multiple training - images. + * Initialize the size of the matching image with appropriate size according + * to the number of training images. Used to display the matching of keypoints + * detected in the current image with those detected in multiple training + * images. * - \param ICurrent : Current image. - \param IMatching : Image initialized with appropriate size. + * \param ICurrent : Current image. + * \param IMatching : Image initialized with appropriate size. */ void createImageMatching(vpImage &ICurrent, vpImage &IMatching); /*! - Detect keypoints in the image. + * Detect keypoints in the image. * - \param I : Input image. - \param keyPoints : Output list of the detected keypoints. - \param rectangle : Optional rectangle of the region of interest. + * \param I : Input image. + * \param keyPoints : Output list of the detected keypoints. + * \param rectangle : Optional rectangle of the region of interest. */ void detect(const vpImage &I, std::vector &keyPoints, const vpRect &rectangle = vpRect()); /*! - Detect keypoints in the image. + * Detect keypoints in the image. * - \param I_color : Input image. - \param keyPoints : Output list of the detected keypoints. - \param rectangle : Optional rectangle of the region of interest. + * \param I_color : Input image. + * \param keyPoints : Output list of the detected keypoints. + * \param rectangle : Optional rectangle of the region of interest. */ void detect(const vpImage &I_color, std::vector &keyPoints, const vpRect &rectangle = vpRect()); /*! - Detect keypoints in the image. + * Detect keypoints in the image. * - \param matImg : Input image. - \param keyPoints : Output list of the detected keypoints. - \param mask : Optional 8-bit integer mask to detect only where mask[i][j] != 0. + * \param matImg : Input image. + * \param keyPoints : Output list of the detected keypoints. + * \param mask : Optional 8-bit integer mask to detect only where mask[i][j] != 0. */ void detect(const cv::Mat &matImg, std::vector &keyPoints, const cv::Mat &mask = cv::Mat()); /*! - Detect keypoints in the image. + * Detect keypoints in the image. * - \param I : Input image. - \param keyPoints : Output list of the detected keypoints. - \param elapsedTime : Elapsed time. - \param rectangle : Optional rectangle of the region of interest. + * \param I : Input image. + * \param keyPoints : Output list of the detected keypoints. + * \param elapsedTime : Elapsed time. + * \param rectangle : Optional rectangle of the region of interest. */ void detect(const vpImage &I, std::vector &keyPoints, double &elapsedTime, const vpRect &rectangle = vpRect()); /*! - Detect keypoints in the image. + * Detect keypoints in the image. * - \param I_color : Input image. - \param keyPoints : Output list of the detected keypoints. - \param elapsedTime : Elapsed time. - \param rectangle : Optional rectangle of the region of interest. + * \param I_color : Input image. + * \param keyPoints : Output list of the detected keypoints. + * \param elapsedTime : Elapsed time. + * \param rectangle : Optional rectangle of the region of interest. */ void detect(const vpImage &I_color, std::vector &keyPoints, double &elapsedTime, const vpRect &rectangle = vpRect()); /*! - Detect keypoints in the image. + * Detect keypoints in the image. * - \param matImg : Input image. - \param keyPoints : Output list of the detected keypoints. - \param elapsedTime : Elapsed time. - \param mask : Optional 8-bit integer mask to detect only where mask[i][j] != 0. + * \param matImg : Input image. + * \param keyPoints : Output list of the detected keypoints. + * \param elapsedTime : Elapsed time. + * \param mask : Optional 8-bit integer mask to detect only where mask[i][j] != 0. */ void detect(const cv::Mat &matImg, std::vector &keyPoints, double &elapsedTime, - const cv::Mat &mask = cv::Mat()); - - /*! - Apply a set of affine transformations to the image, detect keypoints and - reproject them into initial image coordinates. - See http://www.ipol.im/pub/algo/my_affine_sift/ for the details. - See https://github.com/Itseez/opencv/blob/master/samples/python2/asift.py - for the Python implementation by Itseez and Matt Sheckells for the current - implementation in C++. - \param I : Input image. - \param listOfKeypoints : List of detected keypoints in the multiple images after - affine transformations. - \param listOfDescriptors : Corresponding list of descriptors. - \param listOfAffineI : Optional parameter, list of images after affine - transformations. - */ + const cv::Mat &mask = cv::Mat()); + + /*! + * Apply a set of affine transformations to the image, detect keypoints and + * reproject them into initial image coordinates. + * See http://www.ipol.im/pub/algo/my_affine_sift/ for the details. + * See https://github.com/Itseez/opencv/blob/master/samples/python2/asift.py + * for the Python implementation by Itseez and Matt Sheckells for the current + * implementation in C++. + * \param I : Input image. + * \param listOfKeypoints : List of detected keypoints in the multiple images after + * affine transformations. + * \param listOfDescriptors : Corresponding list of descriptors. + * \param listOfAffineI : Optional parameter, list of images after affine + * transformations. + */ void detectExtractAffine(const vpImage &I, std::vector > &listOfKeypoints, std::vector &listOfDescriptors, std::vector > *listOfAffineI = NULL); /*! - Display the reference and the detected keypoints in the images. + * Display the reference and the detected keypoints in the images. * - \param IRef : Input reference image. - \param ICurrent : Input current image. - \param size : Size of the displayed cross. + * \param IRef : Input reference image. + * \param ICurrent : Input current image. + * \param size : Size of the displayed cross. */ void display(const vpImage &IRef, const vpImage &ICurrent, unsigned int size = 3); /*! - Display the reference keypoints. + * Display the reference keypoints. * - \param ICurrent : Input current image. - \param size : Size of the displayed crosses. - \param color : Color of the crosses. + * \param ICurrent : Input current image. + * \param size : Size of the displayed crosses. + * \param color : Color of the crosses. */ void display(const vpImage &ICurrent, unsigned int size = 3, const vpColor &color = vpColor::green); /*! - Display the reference and the detected keypoints in the images. + * Display the reference and the detected keypoints in the images. * - \param IRef : Input reference image. - \param ICurrent : Input current image. - \param size : Size of the displayed cross. + * \param IRef : Input reference image. + * \param ICurrent : Input current image. + * \param size : Size of the displayed cross. */ void display(const vpImage &IRef, const vpImage &ICurrent, unsigned int size = 3); /*! - Display the reference keypoints. + * Display the reference keypoints. * - \param ICurrent : Input current image. - \param size : Size of the displayed crosses. - \param color : Color of the crosses. + * \param ICurrent : Input current image. + * \param size : Size of the displayed crosses. + * \param color : Color of the crosses. */ void display(const vpImage &ICurrent, unsigned int size = 3, const vpColor &color = vpColor::green); /*! - Display the matching lines between the detected keypoints with those - detected in one training image. + * Display the matching lines between the detected keypoints with those + * detected in one training image. * - \param IRef : Reference image, used to have the x-offset. - \param IMatching : Resulting image matching. - \param crossSize : Size of the displayed crosses. - \param lineThickness : Thickness of the displayed lines. - \param color : Color to use, if none, we pick randomly a color for each pair - of matching. + * \param IRef : Reference image, used to have the x-offset. + * \param IMatching : Resulting image matching. + * \param crossSize : Size of the displayed crosses. + * \param lineThickness : Thickness of the displayed lines. + * \param color : Color to use, if none, we pick randomly a color for each pair + * of matching. */ void displayMatching(const vpImage &IRef, vpImage &IMatching, unsigned int crossSize, unsigned int lineThickness = 1, const vpColor &color = vpColor::green); /*! - Display matching between keypoints detected in the current image and with - those detected in the multiple training images. Display also RANSAC inliers - if the list is supplied. + * Display matching between keypoints detected in the current image and with + * those detected in the multiple training images. Display also RANSAC inliers + * if the list is supplied. * - \param ICurrent : Current image. - \param IMatching : Resulting matching image. - \param ransacInliers : List of Ransac inliers or empty list if not available. - \param crossSize : Size of the displayed crosses. - \param lineThickness : Thickness of the displayed line. + * \param ICurrent : Current image. + * \param IMatching : Resulting matching image. + * \param ransacInliers : List of Ransac inliers or empty list if not available. + * \param crossSize : Size of the displayed crosses. + * \param lineThickness : Thickness of the displayed line. */ void displayMatching(const vpImage &ICurrent, vpImage &IMatching, const std::vector &ransacInliers = std::vector(), unsigned int crossSize = 3, unsigned int lineThickness = 1); /*! - Display the matching lines between the detected keypoints with those - detected in one training image. + * Display the matching lines between the detected keypoints with those + * detected in one training image. * - \param IRef : Reference image, used to have the x-offset. - \param IMatching : Resulting image matching. - \param crossSize : Size of the displayed crosses. - \param lineThickness : Thickness of the displayed lines. - \param color : Color to use, if none, we pick randomly a color for each pair - of matching. + * \param IRef : Reference image, used to have the x-offset. + * \param IMatching : Resulting image matching. + * \param crossSize : Size of the displayed crosses. + * \param lineThickness : Thickness of the displayed lines. + * \param color : Color to use, if none, we pick randomly a color for each pair + * of matching. */ void displayMatching(const vpImage &IRef, vpImage &IMatching, unsigned int crossSize, unsigned int lineThickness = 1, const vpColor &color = vpColor::green); /*! - Display the matching lines between the detected keypoints with those - detected in one training image. + * Display the matching lines between the detected keypoints with those + * detected in one training image. * - \param IRef : Reference image, used to have the x-offset. - \param IMatching : Resulting image matching. - \param crossSize : Size of the displayed crosses. - \param lineThickness : Thickness of the displayed lines. - \param color : Color to use, if none, we pick randomly a color for each pair - of matching. + * \param IRef : Reference image, used to have the x-offset. + * \param IMatching : Resulting image matching. + * \param crossSize : Size of the displayed crosses. + * \param lineThickness : Thickness of the displayed lines. + * \param color : Color to use, if none, we pick randomly a color for each pair + * of matching. */ void displayMatching(const vpImage &IRef, vpImage &IMatching, unsigned int crossSize, unsigned int lineThickness = 1, const vpColor &color = vpColor::green); /*! - Display matching between keypoints detected in the current image and with - those detected in the multiple training images. Display also RANSAC inliers - if the list is supplied. + * Display matching between keypoints detected in the current image and with + * those detected in the multiple training images. Display also RANSAC inliers + * if the list is supplied. * - \param ICurrent : Current image. - \param IMatching : Resulting matching image. - \param ransacInliers : List of Ransac inliers or empty list if not available. - \param crossSize : Size of the displayed crosses. - \param lineThickness : Thickness of the displayed line. + * \param ICurrent : Current image. + * \param IMatching : Resulting matching image. + * \param ransacInliers : List of Ransac inliers or empty list if not available. + * \param crossSize : Size of the displayed crosses. + * \param lineThickness : Thickness of the displayed line. */ void displayMatching(const vpImage &ICurrent, vpImage &IMatching, const std::vector &ransacInliers = std::vector(), unsigned int crossSize = 3, unsigned int lineThickness = 1); /*! - Extract the descriptors for each keypoints of the list. + * Extract the descriptors for each keypoints of the list. * - \param I : Input image. - \param keyPoints : List of keypoints we want to extract their descriptors. - \param descriptors : Descriptors matrix with at each row the descriptors - values for each keypoint. - \param trainPoints : Pointer to the list of 3D train points, when a keypoint - cannot be extracted, we need to remove the corresponding 3D point. + * \param I : Input image. + * \param keyPoints : List of keypoints we want to extract their descriptors. + * \param descriptors : Descriptors matrix with at each row the descriptors + * values for each keypoint. + * \param trainPoints : Pointer to the list of 3D train points, when a keypoint + * cannot be extracted, we need to remove the corresponding 3D point. */ void extract(const vpImage &I, std::vector &keyPoints, cv::Mat &descriptors, std::vector *trainPoints = NULL); /*! - Extract the descriptors for each keypoints of the list. + * Extract the descriptors for each keypoints of the list. * - \param I_color : Input image. - \param keyPoints : List of keypoints we want to extract their descriptors. - \param descriptors : Descriptors matrix with at each row the descriptors - values for each keypoint. - \param trainPoints : Pointer to the list of 3D train points, when a keypoint - cannot be extracted, we need to remove the corresponding 3D point. + * \param I_color : Input image. + * \param keyPoints : List of keypoints we want to extract their descriptors. + * \param descriptors : Descriptors matrix with at each row the descriptors + * values for each keypoint. + * \param trainPoints : Pointer to the list of 3D train points, when a keypoint + * cannot be extracted, we need to remove the corresponding 3D point. */ void extract(const vpImage &I_color, std::vector &keyPoints, cv::Mat &descriptors, std::vector *trainPoints = NULL); /*! - Extract the descriptors for each keypoints of the list. + * Extract the descriptors for each keypoints of the list. * - \param matImg : Input image. - \param keyPoints : List of keypoints we want to extract their descriptors. - \param descriptors : Descriptors matrix with at each row the descriptors - values for each keypoint. - \param trainPoints : Pointer to the list of 3D train points, when a keypoint cannot - be extracted, we need to remove the corresponding 3D point. + * \param matImg : Input image. + * \param keyPoints : List of keypoints we want to extract their descriptors. + * \param descriptors : Descriptors matrix with at each row the descriptors + * values for each keypoint. + * \param trainPoints : Pointer to the list of 3D train points, when a keypoint cannot + * be extracted, we need to remove the corresponding 3D point. */ void extract(const cv::Mat &matImg, std::vector &keyPoints, cv::Mat &descriptors, std::vector *trainPoints = NULL); /*! - Extract the descriptors for each keypoints of the list. + * Extract the descriptors for each keypoints of the list. * - \param I : Input image. - \param keyPoints : List of keypoints we want to extract their descriptors. - \param descriptors : Descriptors matrix with at each row the descriptors - values for each keypoint. - \param elapsedTime : Elapsed time. - \param trainPoints : Pointer to the list of 3D train points, when a keypoint - cannot be extracted, we need to remove the corresponding 3D point. + * \param I : Input image. + * \param keyPoints : List of keypoints we want to extract their descriptors. + * \param descriptors : Descriptors matrix with at each row the descriptors + * values for each keypoint. + * \param elapsedTime : Elapsed time. + * \param trainPoints : Pointer to the list of 3D train points, when a keypoint + * cannot be extracted, we need to remove the corresponding 3D point. */ void extract(const vpImage &I, std::vector &keyPoints, cv::Mat &descriptors, double &elapsedTime, std::vector *trainPoints = NULL); /*! - Extract the descriptors for each keypoints of the list. + * Extract the descriptors for each keypoints of the list. * - \param I_color : Input image. - \param keyPoints : List of keypoints we want to extract their descriptors. - \param descriptors : Descriptors matrix with at each row the descriptors - values for each keypoint. - \param elapsedTime : Elapsed time. - \param trainPoints : Pointer to the list of 3D train points, when a keypoint - cannot be extracted, we need to remove the corresponding 3D point. + * \param I_color : Input image. + * \param keyPoints : List of keypoints we want to extract their descriptors. + * \param descriptors : Descriptors matrix with at each row the descriptors + * values for each keypoint. + * \param elapsedTime : Elapsed time. + * \param trainPoints : Pointer to the list of 3D train points, when a keypoint + * cannot be extracted, we need to remove the corresponding 3D point. */ void extract(const vpImage &I_color, std::vector &keyPoints, cv::Mat &descriptors, double &elapsedTime, std::vector *trainPoints = NULL); /*! - Extract the descriptors for each keypoints of the list. + * Extract the descriptors for each keypoints of the list. * - \param matImg : Input image. - \param keyPoints : List of keypoints we want to extract their descriptors. - \param descriptors : Descriptors matrix with at each row the descriptors - values for each keypoint. - \param elapsedTime : Elapsed time. - \param trainPoints : Pointer to the list of 3D train points, when a keypoint - cannot be extracted, we need to remove the corresponding 3D point. + * \param matImg : Input image. + * \param keyPoints : List of keypoints we want to extract their descriptors. + * \param descriptors : Descriptors matrix with at each row the descriptors + * values for each keypoint. + * \param elapsedTime : Elapsed time. + * \param trainPoints : Pointer to the list of 3D train points, when a keypoint + * cannot be extracted, we need to remove the corresponding 3D point. */ void extract(const cv::Mat &matImg, std::vector &keyPoints, cv::Mat &descriptors, double &elapsedTime, std::vector *trainPoints = NULL); @@ -1148,20 +1148,20 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint inline unsigned int getNbImages() const { return static_cast(m_mapOfImages.size()); } /*! - Get the 3D coordinates of the object points matched (the corresponding 3D - coordinates in the object frame of the keypoints detected in the current - image after the matching). + * Get the 3D coordinates of the object points matched (the corresponding 3D + * coordinates in the object frame of the keypoints detected in the current + * image after the matching). * - \param objectPoints : List of 3D coordinates in the object frame. + * \param objectPoints : List of 3D coordinates in the object frame. */ void getObjectPoints(std::vector &objectPoints) const; /*! - Get the 3D coordinates of the object points matched (the corresponding 3D - coordinates in the object frame of the keypoints detected in the current - image after the matching). + * Get the 3D coordinates of the object points matched (the corresponding 3D + * coordinates in the object frame of the keypoints detected in the current + * image after the matching). * - \param objectPoints : List of 3D coordinates in the object frame. + * \param objectPoints : List of 3D coordinates in the object frame. */ void getObjectPoints(std::vector &objectPoints) const; @@ -1181,22 +1181,22 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint inline cv::Mat getQueryDescriptors() const { return m_queryDescriptors; } /*! - Get the query keypoints list in OpenCV type. + * Get the query keypoints list in OpenCV type. * - \param matches : If false return the list of all query keypoints extracted in the current image. - If true, return only the query keypoints list that have matches. - \param keyPoints : List of query keypoints (or keypoints detected in the - current image). + * \param matches : If false return the list of all query keypoints extracted in the current image. + * If true, return only the query keypoints list that have matches. + * \param keyPoints : List of query keypoints (or keypoints detected in the + * current image). */ void getQueryKeyPoints(std::vector &keyPoints, bool matches = true) const; /*! - Get the query keypoints list in ViSP type. + * Get the query keypoints list in ViSP type. * - \param keyPoints : List of query keypoints (or keypoints detected in the - current image). - \param matches : If false return the list of all query keypoints extracted in the current image. - If true, return only the query keypoints list that have matches. + * \param keyPoints : List of query keypoints (or keypoints detected in the + * current image). + * \param matches : If false return the list of all query keypoints extracted in the current image. + * If true, return only the query keypoints list that have matches. */ void getQueryKeyPoints(std::vector &keyPoints, bool matches = true) const; @@ -1223,204 +1223,204 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint inline cv::Mat getTrainDescriptors() const { return m_trainDescriptors; } /*! - Get the train keypoints list in OpenCV type. + * Get the train keypoints list in OpenCV type. * - \param keyPoints : List of train keypoints (or reference keypoints). + * \param keyPoints : List of train keypoints (or reference keypoints). */ void getTrainKeyPoints(std::vector &keyPoints) const; /*! - Get the train keypoints list in ViSP type. + * Get the train keypoints list in ViSP type. * - \param keyPoints : List of train keypoints (or reference keypoints). + * \param keyPoints : List of train keypoints (or reference keypoints). */ void getTrainKeyPoints(std::vector &keyPoints) const; /*! - Get the train points (the 3D coordinates in the object frame) list in - OpenCV type. + * Get the train points (the 3D coordinates in the object frame) list in + * OpenCV type. * - \param points : List of train points (or reference points). + * \param points : List of train points (or reference points). */ void getTrainPoints(std::vector &points) const; /*! - Get the train points (the 3D coordinates in the object frame) list in ViSP - type. + * Get the train points (the 3D coordinates in the object frame) list in ViSP + * type. * - \param points : List of train points (or reference points). + * \param points : List of train points (or reference points). */ void getTrainPoints(std::vector &points) const; /*! - Initialize a matcher based on its name. + * Initialize a matcher based on its name. * - \param matcherName : Name of the matcher (e.g BruteForce, FlannBased). + * \param matcherName : Name of the matcher (e.g BruteForce, FlannBased). */ void initMatcher(const std::string &matcherName); /*! - Insert a reference image and a current image side-by-side. + * Insert a reference image and a current image side-by-side. * - \param IRef : Reference image. - \param ICurrent : Current image. - \param IMatching : Matching image for displaying all the matching between - the query keypoints and those detected in the training images. + * \param IRef : Reference image. + * \param ICurrent : Current image. + * \param IMatching : Matching image for displaying all the matching between + * the query keypoints and those detected in the training images. */ void insertImageMatching(const vpImage &IRef, const vpImage &ICurrent, vpImage &IMatching); /*! - Insert the different training images in the matching image. + * Insert the different training images in the matching image. * - \param ICurrent : Current image. - \param IMatching : Matching image for displaying all the matching between - the query keypoints and those detected in the training images + * \param ICurrent : Current image. + * \param IMatching : Matching image for displaying all the matching between + * the query keypoints and those detected in the training images */ void insertImageMatching(const vpImage &ICurrent, vpImage &IMatching); /*! - Insert a reference image and a current image side-by-side. + * Insert a reference image and a current image side-by-side. * - \param IRef : Reference image. - \param ICurrent : Current image. - \param IMatching : Matching image for displaying all the matching between - the query keypoints and those detected in the training images. + * \param IRef : Reference image. + * \param ICurrent : Current image. + * \param IMatching : Matching image for displaying all the matching between + * the query keypoints and those detected in the training images. */ void insertImageMatching(const vpImage &IRef, const vpImage &ICurrent, vpImage &IMatching); /*! - Insert the different training images in the matching image. + * Insert the different training images in the matching image. * - \param ICurrent : Current image. - \param IMatching : Matching image for displaying all the matching between - the query keypoints and those detected in the training images + * \param ICurrent : Current image. + * \param IMatching : Matching image for displaying all the matching between + * the query keypoints and those detected in the training images */ void insertImageMatching(const vpImage &ICurrent, vpImage &IMatching); /*! - Load configuration parameters from an XML config file. + * Load configuration parameters from an XML config file. * - \param configFile : Path to the XML config file. + * \param configFile : Path to the XML config file. */ void loadConfigFile(const std::string &configFile); /*! - Load learning data saved on disk. + * Load learning data saved on disk. * - \param filename : Path of the learning file. - \param binaryMode : If true, the learning file is in a binary mode, - otherwise it is in XML mode. - \param append : If true, concatenate the learning data, otherwise reset the variables. + * \param filename : Path of the learning file. + * \param binaryMode : If true, the learning file is in a binary mode, + * otherwise it is in XML mode. + * \param append : If true, concatenate the learning data, otherwise reset the variables. */ void loadLearningData(const std::string &filename, bool binaryMode = false, bool append = false); /*! - Match keypoints based on distance between their descriptors. + * Match keypoints based on distance between their descriptors. * - \param trainDescriptors : Train descriptors (or reference descriptors). - \param queryDescriptors : Query descriptors. - \param matches : Output list of matches. - \param elapsedTime : Elapsed time. + * \param trainDescriptors : Train descriptors (or reference descriptors). + * \param queryDescriptors : Query descriptors. + * \param matches : Output list of matches. + * \param elapsedTime : Elapsed time. */ void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector &matches, double &elapsedTime); /*! - Match keypoints detected in the image with those built in the reference - list. + * Match keypoints detected in the image with those built in the reference + * list. * - \param I : Input current image. - \return The number of matched keypoints. + * \param I : Input current image. + * \return The number of matched keypoints. */ unsigned int matchPoint(const vpImage &I); /*! - Match keypoints detected in a region of interest of the image with those - built in the reference list. + * Match keypoints detected in a region of interest of the image with those + * built in the reference list. * - \param I : Input image. - \param iP : Coordinate of the top-left corner of the region of interest. - \param height : Height of the region of interest. - \param width : Width of the region of interest. - \return The number of matched keypoints. + * \param I : Input image. + * \param iP : Coordinate of the top-left corner of the region of interest. + * \param height : Height of the region of interest. + * \param width : Width of the region of interest. + * \return The number of matched keypoints. */ unsigned int matchPoint(const vpImage &I, const vpImagePoint &iP, unsigned int height, unsigned int width); /*! - Match keypoints detected in a region of interest of the image with those - built in the reference list. + * Match keypoints detected in a region of interest of the image with those + * built in the reference list. * - \param I : Input image. - \param rectangle : Rectangle of the region of interest. - \return The number of matched keypoints. + * \param I : Input image. + * \param rectangle : Rectangle of the region of interest. + * \return The number of matched keypoints. */ unsigned int matchPoint(const vpImage &I, const vpRect &rectangle); /*! - Match query keypoints with those built in the reference list using buildReference(). + * Match query keypoints with those built in the reference list using buildReference(). * - \param queryKeyPoints : List of the query keypoints. - \param queryDescriptors : List of the query descriptors. - - \return The number of matched keypoints. + * \param queryKeyPoints : List of the query keypoints. + * \param queryDescriptors : List of the query descriptors. + * + * \return The number of matched keypoints. */ unsigned int matchPoint(const std::vector &queryKeyPoints, const cv::Mat &queryDescriptors); /*! - Match keypoints detected in the image with those built in the reference - list and compute the pose. + * Match keypoints detected in the image with those built in the reference + * list and compute the pose. * - \param I : Input image. - \param cam : Camera parameters. - \param cMo : Homogeneous matrix between the object frame and the camera frame. - \param func : Function pointer to filter the pose in Ransac pose - estimation, if we want to eliminate the poses which do not respect some criterion. - \param rectangle : Rectangle corresponding to the ROI (Region of Interest) to consider. - \return True if the matching and the pose estimation are OK, false otherwise. + * \param I : Input image. + * \param cam : Camera parameters. + * \param cMo : Homogeneous matrix between the object frame and the camera frame. + * \param func : Function pointer to filter the pose in Ransac pose + * estimation, if we want to eliminate the poses which do not respect some criterion. + * \param rectangle : Rectangle corresponding to the ROI (Region of Interest) to consider. + * \return True if the matching and the pose estimation are OK, false otherwise. */ bool matchPoint(const vpImage &I, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect()); /*! - Match keypoints detected in the image with those built in the reference - list and compute the pose. + * Match keypoints detected in the image with those built in the reference + * list and compute the pose. * - \param I : Input image. - \param cam : Camera parameters. - \param cMo : Homogeneous matrix between the object frame and the camera frame. - \param error : Reprojection mean square error (in pixel) between the - 2D points and the projection of the 3D points with the estimated pose. - \param elapsedTime : Time to detect, extract, match and compute the pose. - \param func : Function pointer to filter the pose in Ransac pose - estimation, if we want to eliminate the poses which do not respect some criterion. - \param rectangle : Rectangle corresponding to the ROI (Region of Interest) to consider. - \return True if the matching and the pose estimation are OK, false otherwise. + * \param I : Input image. + * \param cam : Camera parameters. + * \param cMo : Homogeneous matrix between the object frame and the camera frame. + * \param error : Reprojection mean square error (in pixel) between the + * 2D points and the projection of the 3D points with the estimated pose. + * \param elapsedTime : Time to detect, extract, match and compute the pose. + * \param func : Function pointer to filter the pose in Ransac pose + * estimation, if we want to eliminate the poses which do not respect some criterion. + * \param rectangle : Rectangle corresponding to the ROI (Region of Interest) to consider. + * \return True if the matching and the pose estimation are OK, false otherwise. */ bool matchPoint(const vpImage &I, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect()); /*! - Match keypoints detected in the image with those built in the reference - list and return the bounding box and the center of gravity. - * - \param I : Input image. - \param boundingBox : Bounding box that contains the good matches. - \param centerOfGravity : Center of gravity computed from the location of - the good matches (could differ of the center of the bounding box). - \param isPlanarObject : If the object is planar, the homography matrix is - estimated to eliminate outliers, otherwise it is the fundamental matrix - which is estimated. - \param imPts1 : Pointer to the list of reference keypoints if not null. - \param imPts2 : Pointer to the list of current keypoints if not null. - \param meanDescriptorDistance : Pointer to the value - of the average distance of the descriptors if not null. - \param detection_score : Pointer to the value of the detection score if not null. - \param rectangle : Rectangle corresponding to the ROI (Region of Interest) - to consider. - \return True if the object is present, false otherwise. + * Match keypoints detected in the image with those built in the reference + * list and return the bounding box and the center of gravity. + * + * \param I : Input image. + * \param boundingBox : Bounding box that contains the good matches. + * \param centerOfGravity : Center of gravity computed from the location of + * the good matches (could differ of the center of the bounding box). + * \param isPlanarObject : If the object is planar, the homography matrix is + * estimated to eliminate outliers, otherwise it is the fundamental matrix + * which is estimated. + * \param imPts1 : Pointer to the list of reference keypoints if not null. + * \param imPts2 : Pointer to the list of current keypoints if not null. + * \param meanDescriptorDistance : Pointer to the value + * of the average distance of the descriptors if not null. + * \param detectionScore : Pointer to the value of the detection score if not null. + * \param rectangle : Rectangle corresponding to the ROI (Region of Interest) + * to consider. + * \return True if the object is present, false otherwise. */ bool matchPointAndDetect(const vpImage &I, vpRect &boundingBox, vpImagePoint ¢erOfGravity, const bool isPlanarObject = true, std::vector *imPts1 = NULL, @@ -1428,89 +1428,89 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint double *detectionScore = NULL, const vpRect &rectangle = vpRect()); /*! - Match keypoints detected in the image with those built in the reference - list, compute the pose and return also the bounding box and the center of - gravity. - * - \param I : Input image. - \param cam : Camera parameters. - \param cMo : Homogeneous matrix between the object frame and the camera frame. - \param error : Reprojection mean square error (in pixel) between the - 2D points and the projection of the 3D points with the estimated pose. - \param elapsedTime : Time to detect, extract, match and compute the pose. - \param boundingBox : Bounding box that contains the good matches. - \param centerOfGravity : Center of gravity computed from the location of - the good matches (could differ of the center of the bounding box). - \param func : Function pointer to filter the pose in Ransac pose estimation, if we - want to eliminate the poses which do not respect some criterion. - \param rectangle : Rectangle corresponding to the ROI (Region of Interest) to consider. - \return True if the matching and the pose estimation are OK, false otherwise. - */ + * Match keypoints detected in the image with those built in the reference + * list, compute the pose and return also the bounding box and the center of + * gravity. + * + * \param I : Input image. + * \param cam : Camera parameters. + * \param cMo : Homogeneous matrix between the object frame and the camera frame. + * \param error : Reprojection mean square error (in pixel) between the + * 2D points and the projection of the 3D points with the estimated pose. + * \param elapsedTime : Time to detect, extract, match and compute the pose. + * \param boundingBox : Bounding box that contains the good matches. + * \param centerOfGravity : Center of gravity computed from the location of + * the good matches (could differ of the center of the bounding box). + * \param func : Function pointer to filter the pose in Ransac pose estimation, if we + * want to eliminate the poses which do not respect some criterion. + * \param rectangle : Rectangle corresponding to the ROI (Region of Interest) to consider. + * \return True if the matching and the pose estimation are OK, false otherwise. + */ bool matchPointAndDetect(const vpImage &I, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, double &error, double &elapsedTime, vpRect &boundingBox, vpImagePoint ¢erOfGravity, bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect()); /*! - Match keypoints detected in the image with those built in the reference - list. + * Match keypoints detected in the image with those built in the reference + * list. * - \param I_color : Input current image. - \return The number of matched keypoints. + * \param I_color : Input current image. + * \return The number of matched keypoints. */ unsigned int matchPoint(const vpImage &I_color); /*! - Match keypoints detected in a region of interest of the image with those - built in the reference list. + * Match keypoints detected in a region of interest of the image with those + * built in the reference list. * - \param I_color : Input image. - \param iP : Coordinate of the top-left corner of the region of interest. - \param height : Height of the region of interest. - \param width : Width of the region of interest. - \return The number of matched keypoints. + * \param I_color : Input image. + * \param iP : Coordinate of the top-left corner of the region of interest. + * \param height : Height of the region of interest. + * \param width : Width of the region of interest. + * \return The number of matched keypoints. */ unsigned int matchPoint(const vpImage &I_color, const vpImagePoint &iP, unsigned int height, unsigned int width); /*! - Match keypoints detected in a region of interest of the image with those - built in the reference list. + * Match keypoints detected in a region of interest of the image with those + * built in the reference list. * - \param I_color : Input image. - \param rectangle : Rectangle of the region of interest. - \return The number of matched keypoints. + * \param I_color : Input image. + * \param rectangle : Rectangle of the region of interest. + * \return The number of matched keypoints. */ unsigned int matchPoint(const vpImage &I_color, const vpRect &rectangle); /*! - Match keypoints detected in the image with those built in the reference - list and compute the pose. + * Match keypoints detected in the image with those built in the reference + * list and compute the pose. * - \param I_color : Input image. - \param cam : Camera parameters. - \param cMo : Homogeneous matrix between the object frame and the camera frame. - \param func : Function pointer to filter the pose in Ransac pose - estimation, if we want to eliminate the poses which do not respect some criterion. - \param rectangle : Rectangle corresponding to the ROI (Region of Interest) to consider. - \return True if the matching and the pose estimation are OK, false otherwise. + * \param I_color : Input image. + * \param cam : Camera parameters. + * \param cMo : Homogeneous matrix between the object frame and the camera frame. + * \param func : Function pointer to filter the pose in Ransac pose + * estimation, if we want to eliminate the poses which do not respect some criterion. + * \param rectangle : Rectangle corresponding to the ROI (Region of Interest) to consider. + * \return True if the matching and the pose estimation are OK, false otherwise. */ bool matchPoint(const vpImage &I_color, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect()); /*! - Match keypoints detected in the image with those built in the reference - list and compute the pose. + * Match keypoints detected in the image with those built in the reference + * list and compute the pose. * - \param I_color : Input image. - \param cam : Camera parameters. - \param cMo : Homogeneous matrix between the object frame and the camera frame. - \param error : Reprojection mean square error (in pixel) between the - 2D points and the projection of the 3D points with the estimated pose. - \param elapsedTime : Time to detect, extract, match and compute the pose. - \param func : Function pointer to filter the pose in Ransac pose - estimation, if we want to eliminate the poses which do not respect some criterion. - \param rectangle : Rectangle corresponding to the ROI (Region of Interest) to consider. - \return True if the matching and the pose estimation are OK, false otherwise. + * \param I_color : Input image. + * \param cam : Camera parameters. + * \param cMo : Homogeneous matrix between the object frame and the camera frame. + * \param error : Reprojection mean square error (in pixel) between the + * 2D points and the projection of the 3D points with the estimated pose. + * \param elapsedTime : Time to detect, extract, match and compute the pose. + * \param func : Function pointer to filter the pose in Ransac pose + * estimation, if we want to eliminate the poses which do not respect some criterion. + * \param rectangle : Rectangle corresponding to the ROI (Region of Interest) to consider. + * \return True if the matching and the pose estimation are OK, false otherwise. */ bool matchPoint(const vpImage &I_color, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL, @@ -1522,12 +1522,12 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint void reset(); /*! - Save the learning data in a file in XML or binary mode. + * Save the learning data in a file in XML or binary mode. * - \param filename : Path of the save file. - \param binaryMode : If true, the data are saved in binary mode, otherwise - in XML mode. - \param saveTrainingImages : If true, save also the training images on disk. + * \param filename : Path of the save file. + * \param binaryMode : If true, the data are saved in binary mode, otherwise + * in XML mode. + * \param saveTrainingImages : If true, save also the training images on disk. */ void saveLearningData(const std::string &filename, bool binaryMode = false, bool saveTrainingImages = true); @@ -2107,18 +2107,18 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint void affineSkew(double tilt, double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai); /*! - Compute the pose estimation error, the mean square error (in pixel) between - the location of the detected keypoints and the location of the projection - of the 3D model with the estimated pose. + * Compute the pose estimation error, the mean square error (in pixel) between + * the location of the detected keypoints and the location of the projection + * of the 3D model with the estimated pose. * - \param matchKeyPoints : List of pairs between the detected keypoints and - the corresponding 3D points. - \param cam : Camera parameters. - \param cMo_est : Estimated pose of the object. + * \param matchKeyPoints : List of pairs between the detected keypoints and + * the corresponding 3D points. + * \param cam : Camera parameters. + * \param cMo_est : Estimated pose of the object. * - \return The mean square error (in pixel) between the location of the - detected keypoints and the location of the projection of the 3D model with - the estimated pose. + * \return The mean square error (in pixel) between the location of the + * detected keypoints and the location of the projection of the 3D model with + * the estimated pose. */ double computePoseEstimationError(const std::vector > &matchKeyPoints, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo_est); @@ -2129,38 +2129,38 @@ class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint void filterMatches(); /*! - Initialize method for RANSAC parameters and for detectors, extractors and - matcher, and for others parameters. + * Initialize method for RANSAC parameters and for detectors, extractors and + * matcher, and for others parameters. */ void init(); /*! - Initialize a keypoint detector based on its name. + * Initialize a keypoint detector based on its name. * - \param detectorName : Name of the detector (e.g FAST, SIFT, SURF, etc.). + * \param detectorName : Name of the detector (e.g FAST, SIFT, SURF, etc.). */ void initDetector(const std::string &detectorNames); /*! - Initialize a list of keypoints detectors if we want to concatenate multiple - detectors. + * Initialize a list of keypoints detectors if we want to concatenate multiple + * detectors. * - \param detectorNames : List of detector names. + * \param detectorNames : List of detector names. */ void initDetectors(const std::vector &detectorNames); /*! - Initialize a descriptor extractor based on its name. + * Initialize a descriptor extractor based on its name. * - \param extractorName : Name of the extractor (e.g SIFT, SURF, ORB, etc.). + * \param extractorName : Name of the extractor (e.g SIFT, SURF, ORB, etc.). */ void initExtractor(const std::string &extractorName); /*! - Initialize a list of descriptor extractors if we want to concatenate - multiple extractors. + * Initialize a list of descriptor extractors if we want to concatenate + * multiple extractors. * - \param extractorNames : List of extractor names. + * \param extractorNames : List of extractor names. */ void initExtractors(const std::vector &extractorNames); diff --git a/modules/vision/src/key-point/vpKeyPoint.cpp b/modules/vision/src/key-point/vpKeyPoint.cpp index c459d95982..ce49956d51 100644 --- a/modules/vision/src/key-point/vpKeyPoint.cpp +++ b/modules/vision/src/key-point/vpKeyPoint.cpp @@ -3266,7 +3266,7 @@ bool vpKeyPoint::matchPoint(const vpImage &I_color, const vpCameraParame bool vpKeyPoint::matchPointAndDetect(const vpImage &I, vpRect &boundingBox, vpImagePoint ¢erOfGravity, const bool isPlanarObject, std::vector *imPts1, std::vector *imPts2, - double *meanDescriptorDistance, double *detection_score, const vpRect &rectangle) + double *meanDescriptorDistance, double *detectionScore, const vpRect &rectangle) { if (imPts1 != NULL && imPts2 != NULL) { imPts1->clear(); @@ -3286,8 +3286,8 @@ bool vpKeyPoint::matchPointAndDetect(const vpImage &I, vpRect &bo if (meanDescriptorDistance != NULL) { *meanDescriptorDistance = meanDescriptorDistanceTmp; } - if (detection_score != NULL) { - *detection_score = score; + if (detectionScore != NULL) { + *detectionScore = score; } if (m_filteredMatches.size() >= 4) { diff --git a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCentered.h b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCentered.h index 2733ea9d0b..d48712d45d 100644 --- a/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCentered.h +++ b/modules/visual_features/include/visp3/visual_features/vpFeatureMomentCentered.h @@ -58,8 +58,9 @@ class vpMomentDatabase; * (i,j). * * vpFeatureMomentCentered computes interaction matrices all interaction - * matrices up to vpMomentObject::getOrder()-1. \attention The maximum order - * reached by vpFeatureMomentBasic is NOT the maximum order of the + * matrices up to vpMomentObject::getOrder()-1. + * + * \attention The maximum order reached by vpFeatureMomentBasic is NOT the maximum order of the * vpMomentObject, it is one unit smaller. For example if you define your * vpMomentObject up to order n then vpFeatureMomentBasic will be able to * compute interaction matrices up to order n-1 that is \f$ L_{m_{ij}} \f$ with @@ -72,7 +73,6 @@ class vpMomentDatabase; */ class VISP_EXPORT vpFeatureMomentCentered : public vpFeatureMoment { - protected: unsigned int order; /*! @@ -94,12 +94,7 @@ class VISP_EXPORT vpFeatureMomentCentered : public vpFeatureMoment throw vpException(vpException::functionNotImplementedError, "Not implemented!"); } #endif - /*! - * Interaction matrix corresponding to \f$ \mu_{ij} \f$ moment - * \param select_one : first index (i) - * \param select_two : second index (j) - * \return Interaction matrix corresponding to the moment - */ + vpMatrix interaction(unsigned int select_one, unsigned int select_two) const; /*! diff --git a/modules/visual_features/src/visual-feature/vpFeatureMomentCentered.cpp b/modules/visual_features/src/visual-feature/vpFeatureMomentCentered.cpp index 7373e2f280..a6642f887f 100644 --- a/modules/visual_features/src/visual-feature/vpFeatureMomentCentered.cpp +++ b/modules/visual_features/src/visual-feature/vpFeatureMomentCentered.cpp @@ -45,7 +45,7 @@ #include /*! - * Default constructor + * Default constructor. * \param moments_ : Database of moment primitives. * \param A_ : First plane coefficient for a plane equation of the following type Ax+By+C=1/Z. * \param B_ : Second plane coefficient for a plane equation of the following type Ax+By+C=1/Z. @@ -58,7 +58,7 @@ vpFeatureMomentCentered::vpFeatureMomentCentered(vpMomentDatabase &moments_, dou { } /*! - * Interaction matrix corresponding to \f$ \mu_{ij} \f$ moment + * Interaction matrix corresponding to \f$ \mu_{ij} \f$ moment. * \param select_one : first index (i). * \param select_two : second index (j). * \return Interaction matrix corresponding to the moment. From 6f09e697002e054a4dc7fb03ac0671cf61723961 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Wed, 25 Oct 2023 10:47:56 +0200 Subject: [PATCH 05/14] Fix override --- .../gui/include/visp3/gui/vpDisplayWin32.h | 15 +- .../mbt/include/visp3/mbt/vpMbHiddenFaces.h | 451 +++++++++--------- 2 files changed, 222 insertions(+), 244 deletions(-) diff --git a/modules/gui/include/visp3/gui/vpDisplayWin32.h b/modules/gui/include/visp3/gui/vpDisplayWin32.h index 3d3c31f0fc..47a8d8611a 100644 --- a/modules/gui/include/visp3/gui/vpDisplayWin32.h +++ b/modules/gui/include/visp3/gui/vpDisplayWin32.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,11 +29,7 @@ * * Description: * Windows 32 display base class - * - * Authors: - * Bruno Renier - * -*****************************************************************************/ + */ #include @@ -165,10 +160,10 @@ class VISP_EXPORT vpDisplayWin32 : public vpDisplay void displayCircle(const vpImagePoint ¢er, unsigned int radius, const vpColor &color, bool fill = false, unsigned int thickness = 1) override; - void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness = 1); override + void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness = 1) override; - void displayDotLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, - unsigned int thickness = 1) override; + void displayDotLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, + unsigned int thickness = 1) override; void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness = 1) override; diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbHiddenFaces.h b/modules/tracker/mbt/include/visp3/mbt/vpMbHiddenFaces.h index b5a5cea431..8edb9db333 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbHiddenFaces.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbHiddenFaces.h @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -30,13 +29,8 @@ * * Description: * Generic model based tracker. This class declares the methods to implement - *in order to have a model based tracker. - * - * Authors: - * Romain Tallonneau - * Aurelien Yol - * -*****************************************************************************/ + * in order to have a model based tracker. + */ #pragma once #ifndef vpMbHiddenFaces_HH @@ -60,12 +54,12 @@ template class vpMbHiddenFaces; template void swap(vpMbHiddenFaces &first, vpMbHiddenFaces &second); /*! - \class vpMbHiddenFaces - - \brief Implementation of the polygons management for the model-based - trackers. - - \ingroup group_mbt_faces + * \class vpMbHiddenFaces + * + * \brief Implementation of the polygons management for the model-based + * trackers. + * + * \ingroup group_mbt_faces */ template class vpMbHiddenFaces { @@ -118,10 +112,10 @@ template class vpMbHiddenFaces #endif /*! - Get the list of polygons. - - \return Mbt Klt polygons list. - */ + * Get the list of polygons. + * + * \return Mbt Klt polygons list. + */ std::vector &getPolygon() { return Lpol; } #ifdef VISP_HAVE_OGRE @@ -129,40 +123,40 @@ template class vpMbHiddenFaces #endif /*! - get the number of visible polygons. - - \return number of visible polygons. - */ + * Get the number of visible polygons. + * + * \return number of visible polygons. + */ unsigned int getNbVisiblePolygon() const { return nbVisiblePolygon; } #ifdef VISP_HAVE_OGRE /*! - Get the number of rays that will be sent toward each polygon for - visibility test. Each ray will go from the optic center of the camera to a - random point inside the considered polygon. - - \sa getGoodNbRayCastingAttemptsRatio() - - \return Number of rays sent. - */ + * Get the number of rays that will be sent toward each polygon for + * visibility test. Each ray will go from the optic center of the camera to a + * random point inside the considered polygon. + * + * \sa getGoodNbRayCastingAttemptsRatio() + * + * \return Number of rays sent. + */ unsigned int getNbRayCastingAttemptsForVisibility() { return nbRayAttempts; } /*! - Get the Ogre3D Context. - - \return A pointer on a vpAROgre instance. - */ + * Get the Ogre3D Context. + * + * \return A pointer on a vpAROgre instance. + */ vpAROgre *getOgreContext() { return ogre; } /*! - Get the ratio of visibility attempts that has to be successful to consider - a polygon as visible. - - \sa getNbRayCastingAttemptsForVisibility() - - \return Ratio of successful attempts that has to be considered. Value will - be between 0.0 (0%) and 1.0 (100%). - */ + * Get the ratio of visibility attempts that has to be successful to consider + * a polygon as visible. + * + * \sa getNbRayCastingAttemptsForVisibility() + * + * \return Ratio of successful attempts that has to be considered. Value will + * be between 0.0 (0%) and 1.0 (100%). + */ double getGoodNbRayCastingAttemptsRatio() { return ratioVisibleRay; } #endif @@ -170,69 +164,69 @@ template class vpMbHiddenFaces #ifdef VISP_HAVE_OGRE /*! - Tell whether if Ogre Context is initialised or not. - - \return True if it does, false otherwise. - */ + * Tell whether if Ogre Context is initialised or not. + * + * \return True if it does, false otherwise. + */ bool isOgreInitialised() { return ogreInitialised; } #endif /*! - Check if the polygon at position i in the list is visible. - - \param i : TPosition in the list. - - \return Return true if the polygon is visible. -*/ + * Check if the polygon at position i in the list is visible. + * + * \param i : TPosition in the list. + * + * \return Return true if the polygon is visible. + */ bool isVisible(unsigned int i) { return Lpol[i]->isVisible(); } #ifdef VISP_HAVE_OGRE bool isVisibleOgre(const vpTranslationVector &cameraPos, const unsigned int &index); #endif - //! operator[] as modifier. + //! Operator[] as modifier. inline PolygonType *operator[](unsigned int i) { return Lpol[i]; } - //! operator[] as reader. + //! Operator[] as reader. inline const PolygonType *operator[](unsigned int i) const { return Lpol[i]; } void reset(); #ifdef VISP_HAVE_OGRE /*! - Set the background size (by default it is 640x480). - The background size has to match with the size of the image that you are - using for the traking. - - \warning This function has to be called before initOgre(). - - \param h : Height of the background - \param w : Width of the background - */ + * Set the background size (by default it is 640x480). + * The background size has to match with the size of the image that you are + * using for the tracking. + * + * \warning This function has to be called before initOgre(). + * + * \param h : Height of the background + * \param w : Width of the background + */ void setBackgroundSizeOgre(const unsigned int &h, const unsigned int &w) { ogreBackground = vpImage(h, w, 0); } /*! - Set the number of rays that will be sent toward each polygon for - visibility test. Each ray will go from the optic center of the camera to a - random point inside the considered polygon. - - \sa setGoodNbRayCastingAttemptsRatio(const double &) - - \param attempts Number of rays to be sent. - */ + * Set the number of rays that will be sent toward each polygon for + * visibility test. Each ray will go from the optic center of the camera to a + * random point inside the considered polygon. + * + * \sa setGoodNbRayCastingAttemptsRatio(const double &) + * + * \param attempts Number of rays to be sent. + */ void setNbRayCastingAttemptsForVisibility(const unsigned int &attempts) { nbRayAttempts = attempts; } /*! - Set the ratio of visibility attempts that has to be successful to consider - a polygon as visible. - - \sa setNbRayCastingAttemptsForVisibility(const unsigned int &) - - \param ratio : Ratio of successful attempts that has to be considered. - Value has to be between 0.0 (0%) and 1.0 (100%). - */ + * Set the ratio of visibility attempts that has to be successful to consider + * a polygon as visible. + * + * \sa setNbRayCastingAttemptsForVisibility(const unsigned int &) + * + * \param ratio : Ratio of successful attempts that has to be considered. + * Value has to be between 0.0 (0%) and 1.0 (100%). + */ void setGoodNbRayCastingAttemptsRatio(const double &ratio) { ratioVisibleRay = ratio; @@ -242,16 +236,16 @@ template class vpMbHiddenFaces ratioVisibleRay = 0.0; } /*! - Enable/Disable the appearance of Ogre config dialog on startup. - - \warning This method has only effect when Ogre is used and Ogre visibility - test is enabled using setOgreVisibilityTest() with true parameter. - - \param showConfigDialog : if true, shows Ogre dialog window (used to set - Ogre rendering options) when Ogre visibility is enabled. By default, this - functionality is turned off. - */ - inline void setOgreShowConfigDialog(bool showConfigDialog) override { ogreShowConfigDialog = showConfigDialog; } + * Enable/Disable the appearance of Ogre config dialog on startup. + * + * \warning This method has only effect when Ogre is used and Ogre visibility + * test is enabled using setOgreVisibilityTest() with true parameter. + * + * \param showConfigDialog : if true, shows Ogre dialog window (used to set + * Ogre rendering options) when Ogre visibility is enabled. By default, this + * functionality is turned off. + */ + inline void setOgreShowConfigDialog(bool showConfigDialog) { ogreShowConfigDialog = showConfigDialog; } #endif unsigned int setVisible(unsigned int width, unsigned int height, const vpCameraParameters &cam, @@ -270,16 +264,16 @@ template class vpMbHiddenFaces bool &changed); #endif /*! - Get the number of polygons. - - \return Size of the list. - */ + * Get the number of polygons. + * + * \return Size of the list. + */ inline unsigned int size() const { return (unsigned int)Lpol.size(); } }; /*! - Basic constructor. -*/ + * Basic constructor. + */ template vpMbHiddenFaces::vpMbHiddenFaces() : Lpol(), nbVisiblePolygon(0), scanlineRender() { @@ -294,8 +288,8 @@ vpMbHiddenFaces::vpMbHiddenFaces() : Lpol(), nbVisiblePolygon(0), s } /*! - Basic destructor. -*/ + * Basic destructor. + */ template vpMbHiddenFaces::~vpMbHiddenFaces() { for (unsigned int i = 0; i < Lpol.size(); i++) { @@ -325,8 +319,8 @@ template vpMbHiddenFaces::~vpMbHiddenFaces() } /*! - \relates vpMbHiddenFaces -*/ + * \relates vpMbHiddenFaces + */ template vpMbHiddenFaces::vpMbHiddenFaces(const vpMbHiddenFaces ©) : Lpol(), nbVisiblePolygon(copy.nbVisiblePolygon), scanlineRender(copy.scanlineRender) @@ -360,8 +354,8 @@ template void swap(vpMbHiddenFaces &first, vpMb } /*! - Copy assignment operator. -*/ + * Copy assignment operator. + */ template vpMbHiddenFaces &vpMbHiddenFaces::operator=(vpMbHiddenFaces other) { @@ -371,10 +365,10 @@ vpMbHiddenFaces &vpMbHiddenFaces::operator=(vpMbHidden } /*! - Add a polygon to the list of polygons. - - \param p : The polygon to add. -*/ + * Add a polygon to the list of polygons. + * + * \param p : The polygon to add. + */ template void vpMbHiddenFaces::addPolygon(PolygonType *p) { PolygonType *p_new = new PolygonType; @@ -393,8 +387,8 @@ template void vpMbHiddenFaces::addPolygon(Polyg } /*! - Reset the Hidden faces (remove the list of PolygonType) -*/ + * Reset the hidden faces (remove the list of PolygonType). + */ template void vpMbHiddenFaces::reset() { nbVisiblePolygon = 0; @@ -431,12 +425,12 @@ template void vpMbHiddenFaces::reset() } /*! - Compute the clipped points of the polygons that have been added via - addPolygon(). - - \param cMo : Pose that will be used to clip the polygons. - \param cam : Camera parameters that will be used to clip the polygons. -*/ + * Compute the clipped points of the polygons that have been added via + * addPolygon(). + * + * \param cMo : Pose that will be used to clip the polygons. + * \param cam : Camera parameters that will be used to clip the polygons. + */ template void vpMbHiddenFaces::computeClippedPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam) { @@ -453,13 +447,13 @@ void vpMbHiddenFaces::computeClippedPolygons(const vpHomogeneousMat } /*! - Render the scene in order to perform, later via computeScanLineQuery(), - visibility tests. - - \param cam : Camera parameters that will be used to render the scene. - \param w : Width of the render window. - \param h : Height of the render window. -*/ + * Render the scene in order to perform, later via computeScanLineQuery(), + * visibility tests. + * + * \param cam : Camera parameters that will be used to render the scene. + * \param w : Width of the render window. + * \param h : Height of the render window. + */ template void vpMbHiddenFaces::computeScanLineRender(const vpCameraParameters &cam, const unsigned int &w, const unsigned int &h) @@ -487,16 +481,15 @@ void vpMbHiddenFaces::computeScanLineRender(const vpCameraParameter } /*! - Compute Scanline visibility results for a line. - - \warning computeScanLineRender() function has to be called before - - \param a : First point of the line. - \param b : Second point of the line. - \param lines : Result of the scanline visibility. List of the visible parts - of the line. \param displayResults : True if the results have to be - displayed. False otherwise -*/ + * Compute scanline visibility results for a line. + * + * \warning computeScanLineRender() function has to be called before + * + * \param a : First point of the line. + * \param b : Second point of the line. + * \param lines : Result of the scanline visibility. List of the visible parts of the line. + * \param displayResults : True if the results have to be displayed. False otherwise. + */ template void vpMbHiddenFaces::computeScanLineQuery(const vpPoint &a, const vpPoint &b, std::vector > &lines, @@ -506,19 +499,19 @@ void vpMbHiddenFaces::computeScanLineQuery(const vpPoint &a, const } /*! - Compute the number of visible polygons. - - \param cMo : The pose of the camera - \param angleAppears : Angle used to test the appearance of a face - \param angleDisappears : Angle used to test the disappearance of a face - \param changed : True if a face appeared, disappeared or too many points - have been lost. False otherwise \param useOgre : True if a Ogre is used to - test the visibility, False otherwise \param not_used : Unused parameter. - \param I : Image used to test if a face is entirely projected in the image. - \param cam : Camera parameters. - - \return Return the number of visible polygons -*/ + * Compute the number of visible polygons. + * + * \param cMo : The pose of the camera + * \param angleAppears : Angle used to test the appearance of a face + * \param angleDisappears : Angle used to test the disappearance of a face + * \param changed : True if a face appeared, disappeared or too many points have been lost. False otherwise + * \param useOgre : True if a Ogre is used to test the visibility, False otherwise + * \param not_used : Unused parameter. + * \param I : Image used to test if a face is entirely projected in the image. + * \param cam : Camera parameters. + * + * \return Return the number of visible polygons + */ template unsigned int vpMbHiddenFaces::setVisiblePrivate(const vpHomogeneousMatrix &cMo, const double &angleAppears, const double &angleDisappears, bool &changed, bool useOgre, @@ -549,23 +542,21 @@ unsigned int vpMbHiddenFaces::setVisiblePrivate(const vpHomogeneous } /*! - Compute the visibility of a given face index. - - \param cMo : The pose of the camera - \param angleAppears : Angle used to test the appearance of a face - \param angleDisappears : Angle used to test the disappearance of a face - \param changed : True if a face appeared, disappeared or too many points - have been lost. False otherwise - \param useOgre : True if a Ogre is used to test the visibility, False otherwise. - \param not_used : Unused parameter. - \param width, height Image size. - \param cam : Camera parameters. - \param cameraPos : Position of the camera. Used only when Ogre is used as - 3rd party. - \param index : Index of the face to consider. - - \return Return true if the face is visible. -*/ + * Compute the visibility of a given face index. + * + * \param cMo : The pose of the camera + * \param angleAppears : Angle used to test the appearance of a face + * \param angleDisappears : Angle used to test the disappearance of a face + * \param changed : True if a face appeared, disappeared or too many points have been lost. False otherwise. + * \param useOgre : True if a Ogre is used to test the visibility, False otherwise. + * \param not_used : Unused parameter. + * \param width, height Image size. + * \param cam : Camera parameters. + * \param cameraPos : Position of the camera. Used only when Ogre is used as 3rd party. + * \param index : Index of the face to consider. + * + * \return Return true if the face is visible. + */ template bool vpMbHiddenFaces::computeVisibility(const vpHomogeneousMatrix &cMo, const double &angleAppears, const double &angleDisappears, bool &changed, bool useOgre, @@ -653,18 +644,16 @@ bool vpMbHiddenFaces::computeVisibility(const vpHomogeneousMatrix & } /*! - Compute the number of visible polygons. - - \param width, height : Image size used to check if the region of interest is inside the - image. - \param cam : Camera parameters. - \param cMo : The pose of the camera. - \param angle : Angle used to test the appearance and disappearance of a face. - \param changed : True if a face appeared, disappeared or too many - points have been lost. False otherwise. - - \return Return the number of visible polygons -*/ + * Compute the number of visible polygons. + * + * \param width, height : Image size used to check if the region of interest is inside the image. + * \param cam : Camera parameters. + * \param cMo : The pose of the camera. + * \param angle : Angle used to test the appearance and disappearance of a face. + * \param changed : True if a face appeared, disappeared or too many points have been lost. False otherwise. + * + * \return Return the number of visible polygons + */ template unsigned int vpMbHiddenFaces::setVisible(unsigned int width, unsigned int height, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, @@ -674,19 +663,17 @@ unsigned int vpMbHiddenFaces::setVisible(unsigned int width, unsign } /*! - Compute the number of visible polygons. - - \param width, height : Image size used to check if the region of interest is inside the - image. - \param cam : Camera parameters. - \param cMo : The pose of the camera. - \param changed : True if a face appeared, disappeared or too many points - have been lost. False otherwise. - \param angleAppears : Angle used to test the appearance of a face. - \param angleDisappears : Angle used to test the disappearance of a face. - - \return Return the number of visible polygons -*/ + * Compute the number of visible polygons. + * + * \param width, height : Image size used to check if the region of interest is inside the image. + * \param cam : Camera parameters. + * \param cMo : The pose of the camera. + * \param changed : True if a face appeared, disappeared or too many points have been lost. False otherwise. + * \param angleAppears : Angle used to test the appearance of a face. + * \param angleDisappears : Angle used to test the disappearance of a face. + * + * \return Return the number of visible polygons + */ template unsigned int vpMbHiddenFaces::setVisible(unsigned int width, unsigned int height, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, @@ -697,16 +684,15 @@ unsigned int vpMbHiddenFaces::setVisible(unsigned int width, unsign } /*! - Compute the number of visible polygons. - - \param cMo : The pose of the camera - \param angleAppears : Angle used to test the appearance of a face - \param angleDisappears : Angle used to test the disappearance of a face - \param changed : True if a face appeared, disappeared or too many points - have been lost. False otherwise - - \return Return the number of visible polygons -*/ + * Compute the number of visible polygons. + * + * \param cMo : The pose of the camera + * \param angleAppears : Angle used to test the appearance of a face + * \param angleDisappears : Angle used to test the disappearance of a face + * \param changed : True if a face appeared, disappeared or too many points have been lost. False otherwise + * + * \return Return the number of visible polygons + */ template unsigned int vpMbHiddenFaces::setVisible(const vpHomogeneousMatrix &cMo, const double &angleAppears, const double &angleDisappears, bool &changed) @@ -716,10 +702,10 @@ unsigned int vpMbHiddenFaces::setVisible(const vpHomogeneousMatrix #ifdef VISP_HAVE_OGRE /*! - Initialise the ogre context for face visibility tests. - - \param cam : Camera parameters. -*/ + * Initialise the ogre context for face visibility tests. + * + * \param cam : Camera parameters. + */ template void vpMbHiddenFaces::initOgre(const vpCameraParameters &cam) { ogreInitialised = true; @@ -748,10 +734,10 @@ template void vpMbHiddenFaces::initOgre(const v } /*! - Update the display in Ogre Window. - - \param cMo : Pose used to display. -*/ + * Update the display in Ogre Window. + * + * \param cMo : Pose used to display. + */ template void vpMbHiddenFaces::displayOgre(const vpHomogeneousMatrix &cMo) { if (ogreInitialised && !ogre->isWindowHidden()) { @@ -767,19 +753,17 @@ template void vpMbHiddenFaces::displayOgre(cons } /*! - Compute the number of visible polygons through Ogre3D. - - \param width, height : Image size used to check if the region of interest is inside the - image. - \param cam : Camera parameters. - \param cMo : The pose of the camera. - \param changed : True if a face appeared, disappeared or too many points - have been lost. False otherwise. - \param angleAppears : Angle used to test the appearance of a face. - \param angleDisappears : Angle used to test the disappearance of a face. - - \return Return the number of visible polygons -*/ + * Compute the number of visible polygons through Ogre3D. + * + * \param width, height : Image size used to check if the region of interest is inside the image. + * \param cam : Camera parameters. + * \param cMo : The pose of the camera. + * \param changed : True if a face appeared, disappeared or too many points have been lost. False otherwise. + * \param angleAppears : Angle used to test the appearance of a face. + * \param angleDisappears : Angle used to test the disappearance of a face. + * + * \return Return the number of visible polygons + */ template unsigned int vpMbHiddenFaces::setVisibleOgre(unsigned int width, unsigned int height, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, @@ -790,16 +774,15 @@ unsigned int vpMbHiddenFaces::setVisibleOgre(unsigned int width, un } /*! - Compute the number of visible polygons through Ogre3D. - - \param cMo : The pose of the camera - \param angleAppears : Angle used to test the appearance of a face - \param angleDisappears : Angle used to test the disappearance of a face - \param changed : True if a face appeared, disappeared or too many points - have been lost. False otherwise - - \return Return the number of visible polygons -*/ + * Compute the number of visible polygons through Ogre3D. + * + * \param cMo : The pose of the camera + * \param angleAppears : Angle used to test the appearance of a face + * \param angleDisappears : Angle used to test the disappearance of a face + * \param changed : True if a face appeared, disappeared or too many points have been lost. False otherwise + * + * \return Return the number of visible polygons + */ template unsigned int vpMbHiddenFaces::setVisibleOgre(const vpHomogeneousMatrix &cMo, const double &angleAppears, const double &angleDisappears, bool &changed) @@ -808,13 +791,13 @@ unsigned int vpMbHiddenFaces::setVisibleOgre(const vpHomogeneousMat } /*! - Test the visibility of a polygon through Ogre3D via RayCasting. - - \param cameraPos : Position of the camera in the 3D world. - \param index : Index of the polygon. - - \return Return true if the polygon is visible, False otherwise. -*/ + * Test the visibility of a polygon through Ogre3D via RayCasting. + * + * \param cameraPos : Position of the camera in the 3D world. + * \param index : Index of the polygon. + * + * \return Return true if the polygon is visible, False otherwise. + */ template bool vpMbHiddenFaces::isVisibleOgre(const vpTranslationVector &cameraPos, const unsigned int &index) { From bbe3cb837340aacd6ac60afa46e8903adbfd3534 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Wed, 25 Oct 2023 13:59:22 +0200 Subject: [PATCH 06/14] New override fixes --- .../tracking/forward-projection/vpPoint.cpp | 94 +++++++++---------- .../include/visp3/mbt/vpMbGenericTracker.h | 7 +- .../mbt/include/visp3/mbt/vpMbKltTracker.h | 2 +- 3 files changed, 49 insertions(+), 54 deletions(-) diff --git a/modules/core/src/tracking/forward-projection/vpPoint.cpp b/modules/core/src/tracking/forward-projection/vpPoint.cpp index 75e7949761..efbec9be21 100644 --- a/modules/core/src/tracking/forward-projection/vpPoint.cpp +++ b/modules/core/src/tracking/forward-projection/vpPoint.cpp @@ -132,13 +132,15 @@ void vpPoint::setWorldCoordinates(const vpColVector &oP_) oP[1] = oP_[1]; oP[2] = oP_[2]; oP[3] = 1.; - } else if (oP_.size() == 4) { + } + else if (oP_.size() == 4) { oP[0] = oP_[0]; oP[1] = oP_[1]; oP[2] = oP_[2]; oP[3] = oP_[3]; oP /= oP[3]; - } else { + } + else { throw(vpException(vpException::dimensionError, "Cannot initialize vpPoint from vector with size %d", oP_.size())); } } @@ -160,13 +162,15 @@ void vpPoint::setWorldCoordinates(const std::vector &oP_) oP[1] = oP_[1]; oP[2] = oP_[2]; oP[3] = 1.; - } else if (oP_.size() == 4) { + } + else if (oP_.size() == 4) { oP[0] = oP_[0]; oP[1] = oP_[1]; oP[2] = oP_[2]; oP[3] = oP_[3]; oP /= oP[3]; - } else { + } + else { throw(vpException(vpException::dimensionError, "Cannot initialize vpPoint from vector with size %d", oP_.size())); } } @@ -283,36 +287,36 @@ void vpPoint::changeFrame(const vpHomogeneousMatrix &cMo) frame are set to the same coordinates than the one in the camera frame. */ const vpPoint -operator*(const vpHomogeneousMatrix &aMb, const vpPoint& bP) +operator*(const vpHomogeneousMatrix &aMb, const vpPoint &bP) { - vpPoint aP ; + vpPoint aP; - vpColVector v(4),v1(4) ; + vpColVector v(4), v1(4); - v[0] = bP.get_X() ; - v[1] = bP.get_Y() ; - v[2] = bP.get_Z() ; - v[3] = bP.get_W() ; + v[0] = bP.get_X(); + v[1] = bP.get_Y(); + v[2] = bP.get_Z(); + v[3] = bP.get_W(); - v1[0] = aMb[0][0]*v[0] + aMb[0][1]*v[1]+ aMb[0][2]*v[2]+ aMb[0][3]*v[3] ; - v1[1] = aMb[1][0]*v[0] + aMb[1][1]*v[1]+ aMb[1][2]*v[2]+ aMb[1][3]*v[3] ; - v1[2] = aMb[2][0]*v[0] + aMb[2][1]*v[1]+ aMb[2][2]*v[2]+ aMb[2][3]*v[3] ; - v1[3] = aMb[3][0]*v[0] + aMb[3][1]*v[1]+ aMb[3][2]*v[2]+ aMb[3][3]*v[3] ; + v1[0] = aMb[0][0]*v[0] + aMb[0][1]*v[1]+ aMb[0][2]*v[2]+ aMb[0][3]*v[3]; + v1[1] = aMb[1][0]*v[0] + aMb[1][1]*v[1]+ aMb[1][2]*v[2]+ aMb[1][3]*v[3]; + v1[2] = aMb[2][0]*v[0] + aMb[2][1]*v[1]+ aMb[2][2]*v[2]+ aMb[2][3]*v[3]; + v1[3] = aMb[3][0]*v[0] + aMb[3][1]*v[1]+ aMb[3][2]*v[2]+ aMb[3][3]*v[3]; - v1 /= v1[3] ; + v1 /= v1[3]; // v1 = M*v ; - aP.set_X(v1[0]) ; - aP.set_Y(v1[1]) ; - aP.set_Z(v1[2]) ; - aP.set_W(v1[3]) ; + aP.set_X(v1[0]); + aP.set_Y(v1[1]); + aP.set_Z(v1[2]); + aP.set_W(v1[3]); - aP.set_oX(v1[0]) ; - aP.set_oY(v1[1]) ; - aP.set_oZ(v1[2]) ; - aP.set_oW(v1[3]) ; + aP.set_oX(v1[0]); + aP.set_oY(v1[1]); + aP.set_oZ(v1[2]); + aP.set_oW(v1[3]); - return aP ; + return aP; } /*! @@ -325,25 +329,25 @@ operator*(const vpHomogeneousMatrix &aMb, const vpPoint& bP) \return A point with 2D coordinates in the image plane a. */ const vpPoint -operator*(const vpHomography &aHb, const vpPoint& bP) +operator*(const vpHomography &aHb, const vpPoint &bP) { - vpPoint aP ; - vpColVector v(3),v1(3) ; + vpPoint aP; + vpColVector v(3), v1(3); - v[0] = bP.get_x() ; - v[1] = bP.get_y() ; - v[2] = bP.get_w() ; + v[0] = bP.get_x(); + v[1] = bP.get_y(); + v[2] = bP.get_w(); - v1[0] = aHb[0][0]*v[0] + aHb[0][1]*v[1]+ aHb[0][2]*v[2] ; - v1[1] = aHb[1][0]*v[0] + aHb[1][1]*v[1]+ aHb[1][2]*v[2] ; - v1[2] = aHb[2][0]*v[0] + aHb[2][1]*v[1]+ aHb[2][2]*v[2] ; + v1[0] = aHb[0][0]*v[0] + aHb[0][1]*v[1]+ aHb[0][2]*v[2]; + v1[1] = aHb[1][0]*v[0] + aHb[1][1]*v[1]+ aHb[1][2]*v[2]; + v1[2] = aHb[2][0]*v[0] + aHb[2][1]*v[1]+ aHb[2][2]*v[2]; // v1 = M*v ; - aP.set_x(v1[0]) ; - aP.set_y(v1[1]) ; - aP.set_w(v1[2]) ; + aP.set_x(v1[0]); + aP.set_y(v1[1]); + aP.set_w(v1[2]); - return aP ; + return aP; } #endif //! For memory issue (used by the vpServo class only). @@ -402,20 +406,6 @@ void vpPoint::display(const vpImage &I, const vpHomogeneousMatrix &cMo, vpFeatureDisplay::displayPoint(_p[0], _p[1], cam, I, color, thickness); } -VISP_EXPORT std::ostream &operator<<(std::ostream &os, const vpPoint & /* vpp */) { return (os << "vpPoint"); } - -#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) -vpPoint &vpPoint::operator=(const vpPoint &vpp) -{ - p = vpp.p; - cP = vpp.cP; - oP = vpp.oP; - cPAvailable = vpp.cPAvailable; - - return *this; -} -#endif - /*! * Display the projection of a 3D point in image \e I. * diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h index b576899404..bdd129c628 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbGenericTracker.h @@ -782,8 +782,13 @@ class VISP_EXPORT vpMbGenericTracker : public vpMbTracker #endif using vpMbDepthDenseTracker::setPose; #endif - virtual void setPose(const vpImage *const I, const vpImage *const I_color, +#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO) + virtual void setPose(const vpImage *I, const vpImage *I_color, const vpHomogeneousMatrix &cdMo) override; +#else + virtual void setPose(const vpImage *I, const vpImage *I_color, + const vpHomogeneousMatrix &cdMo); +#endif }; #ifdef VISP_HAVE_NLOHMANN_JSON friend void to_json(nlohmann::json &j, const TrackerWrapper &t); diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h index 448f7b4bc3..6bb6556938 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h @@ -468,7 +468,7 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker void preTracking(const vpImage &I); bool postTracking(const vpImage &I, vpColVector &w); virtual void reinit(const vpImage &I); - virtual void setPose(const vpImage *const I, const vpImage *const I_color, + virtual void setPose(const vpImage *I, const vpImage *I_color, const vpHomogeneousMatrix &cdMo); //@} }; From bf36aa624d13b10bed4b1bfdbdd5d16be9d33f84 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Wed, 25 Oct 2023 18:36:22 +0200 Subject: [PATCH 07/14] Revert back changes in testImageCircle.cpp to see if test pass on ci --- .../test/tools/geometry/testImageCircle.cpp | 308 +++++++++--------- 1 file changed, 153 insertions(+), 155 deletions(-) diff --git a/modules/core/test/tools/geometry/testImageCircle.cpp b/modules/core/test/tools/geometry/testImageCircle.cpp index 66da386f36..b029a3a212 100644 --- a/modules/core/test/tools/geometry/testImageCircle.cpp +++ b/modules/core/test/tools/geometry/testImageCircle.cpp @@ -1,4 +1,5 @@ -/* +/**************************************************************************** + * * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -28,8 +29,9 @@ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * Description: - * Test vpImageCircle. - */ + * Test vpRect. + * +*****************************************************************************/ #include #include @@ -45,12 +47,11 @@ bool compareAngles(const float &actualVal, const float &theoreticalVal) float ensureIsBetweenMinPiAndPi(const float &theta) { float theta1 = theta; - float pi = static_cast(M_PI); - if (theta1 > pi) { - theta1 -= 2.0 * pi; + if (theta1 > M_PI) { + theta1 -= 2.0 * M_PI; } - else if (theta1 < -pi) { - theta1 += 2.0 * pi; + else if (theta1 < -M_PI) { + theta1 += 2.0 * M_PI; } return theta1; } @@ -66,15 +67,12 @@ int main() const float HEIGHT_SWITCHED = WIDTH; // The RoI must be inverted in order to cross left and right axes while crossing only the top axis vpRect switchedRoI(OFFSET, OFFSET, WIDTH_SWITCHED, HEIGHT_SWITCHED); bool hasSucceeded = true; - float pi = static_cast(M_PI); - float pi_2 = static_cast(M_PI_2); - float pi_4 = static_cast(M_PI_4); // Test with no intersections { vpImageCircle circle(vpImagePoint(HEIGHT / 2.f, WIDTH / 2.f), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * pi * RADIUS; + float theoreticalValue = 2.f * M_PI * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -96,7 +94,7 @@ int main() vpRect roiSquare(OFFSET, OFFSET, HEIGHT, HEIGHT); vpImageCircle circle(vpImagePoint(OFFSET + HEIGHT / 2.f, OFFSET + HEIGHT / 2.f), HEIGHT / 2.f); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * pi * HEIGHT / 2.f; + float theoreticalValue = 2.f * M_PI * HEIGHT / 2.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -121,7 +119,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 4.f * pi * RADIUS /3.f; + float theoreticalValue = 4.f * M_PI * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -146,7 +144,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * pi * RADIUS /3.f; + float theoreticalValue = 2.f * M_PI * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -171,7 +169,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * pi * RADIUS; + float theoreticalValue = 2.f * M_PI * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -196,7 +194,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 4.f * pi * RADIUS /3.f; + float theoreticalValue = 4.f * M_PI * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -221,7 +219,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * pi * RADIUS /3.f; + float theoreticalValue = 2.f * M_PI * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -246,7 +244,7 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * pi * RADIUS; + float theoreticalValue = 2.f * M_PI * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -267,12 +265,12 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + RADIUS * sin(theta) - float theta = pi / 3.f; + float theta = M_PI / 3.f; float uc = OFFSET + 100.f; float vc = OFFSET + RADIUS * sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 5.f * pi * RADIUS /3.f; + float theoreticalValue = 5.f * M_PI * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -293,12 +291,12 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + RADIUS * sin(theta) - float theta = -2.f * pi/3.f; + float theta = -2.f * M_PI/3.f; float uc = OFFSET + 100.f; float vc = OFFSET + RADIUS * std::sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = pi * RADIUS /3.f; + float theoreticalValue = M_PI * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -319,12 +317,12 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + RADIUS * sin(theta) - float theta = pi_2; + float theta = M_PI_2; float uc = OFFSET + 100.f; float vc = OFFSET + RADIUS * sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * pi * RADIUS; + float theoreticalValue = 2.f * M_PI * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -345,12 +343,12 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + HEIGHT + RADIUS * sin(theta) - float theta = -pi / 3.f; + float theta = -M_PI / 3.f; float uc = OFFSET + 100.f; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 5.f * pi * RADIUS /3.f; + float theoreticalValue = 5.f * M_PI * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -371,12 +369,12 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + HEIGHT + RADIUS * sin(theta) - float theta = pi / 3.f; + float theta = M_PI / 3.f; float uc = OFFSET + 100.f; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = pi * RADIUS /3.f; + float theoreticalValue = M_PI * RADIUS /3.f; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -400,7 +398,7 @@ int main() float vc = OFFSET + HEIGHT - RADIUS; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * pi * RADIUS; + float theoreticalValue = 2.f * M_PI * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -427,7 +425,7 @@ int main() float vc = OFFSET; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = pi_2 * RADIUS; + float theoreticalValue = M_PI_2 * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -453,13 +451,13 @@ int main() // (4): umin = uc + r cos(theta_v_max) ; v_cross_max = vc - r sin(theta_v_max) >= vmin && <= vmin + height // (3) & (4) => uc = umin - r cos(theta_v_min) = umin - r cos(theta_v_max) <=> theta_v_min = - theta_v_max // (3) & (4) => vc >= vmin + r sin(theta_v_min) && vc >= vmin + r sin (theta_v_max) - float theta_v_min = pi / 4.f; + float theta_v_min = M_PI / 4.f; float uc = OFFSET - RADIUS * std::cos(theta_v_min); float vc = OFFSET + RADIUS * std::sin(theta_v_min) + 1.f; vc = std::max(vc, OFFSET + RADIUS * std::sin(-theta_v_min) + 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = pi_2 * RADIUS; + float theoreticalValue = M_PI_2 * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -487,13 +485,13 @@ int main() // (1) => uc + r cos(theta_u_top_min) >= umin <=> uc >= umin - r cos(theta_u_top_min) // (2) => uc + r cos(theta_u_top_max) >= umin <=> uc >= umin - r cos(theta_u_top_max) - float theta_u_top_min = -1.1f * pi_2; + float theta_u_top_min = -1.1f * M_PI_2; float uc = OFFSET - RADIUS * std::cos(theta_u_top_min) + 1.f; - uc = std::max(uc, OFFSET - RADIUS * std::cos(pi - theta_u_top_min) + 1.f); + uc = std::max(uc, OFFSET - RADIUS * std::cos((float)M_PI - theta_u_top_min) + 1.f); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 0.2f * pi_2 * RADIUS; + float theoreticalValue = 0.2f * M_PI_2 * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -523,10 +521,10 @@ int main() // (3) & (4) =>{ uc = umin - r cos(theta_v_min) & { uc = umin - r cos(- theta_v_min) // (3) & (4) { vc >= vmin - r sin(theta_v_min) & { vc >= vmin - r cos(- theta_v_min) - float theta_u_top_min = 5.f * pi / 8.f; - float theta_u_top_max = pi - theta_u_top_min; + float theta_u_top_min = 5.f * M_PI / 8.f; + float theta_u_top_max = M_PI - theta_u_top_min; float uc = OFFSET - RADIUS * std::cos(theta_u_top_min) + 1.f; - uc = std::max(uc, OFFSET - RADIUS * std::cos(pi - theta_u_top_min) + 1.f); + uc = std::max(uc, OFFSET - RADIUS * std::cos((float)M_PI - theta_u_top_min) + 1.f); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); float theta_v_min = std::acos((OFFSET - uc)/RADIUS); theta_v_min = ensureIsBetweenMinPiAndPi(theta_v_min); @@ -566,13 +564,13 @@ int main() // (1) => vc = vmin + r sin(theta_u_top_min) // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_top_min = 2.f * pi / 3.f; - float theta_v_max = -pi_2; + float theta_u_top_min = 2.f * M_PI / 3.f; + float theta_v_max = -M_PI_2; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_max); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min);; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (pi_2 + pi / 3.f) * RADIUS; + float theoreticalValue = (M_PI_2 + M_PI / 3.f) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -601,13 +599,13 @@ int main() // (1) <=> asin((vc - vmin)/r) >= acos[(umin + width - uc)/r] <=> vc >= r sin(acos[(umin + width - uc)/r]) + vmin // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_max = -7.f * pi / 8.f; + float theta_v_max = -7.f * M_PI / 8.f; float theta_v_min = -theta_v_max; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_max); float vc = RADIUS * std::sin(std::acos((OFFSET + WIDTH - uc)/RADIUS)) + OFFSET + 1.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * pi - (theta_v_min - theta_v_max)) * RADIUS; + float theoreticalValue = (2.f * M_PI - (theta_v_min - theta_v_max)) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -636,8 +634,8 @@ int main() // Choice: theta_u_top_min = -0.9 * PI / 2 // (1) => vc = vmin + r sin(theta_u_top_min) // (2) vc - r sin(theta_v_min) <= vmin => asin((vc - vmin)/r) <= theta_v_min - float theta_u_top_min = -0.9f * pi_2; - float theta_u_top_max = pi - theta_u_top_min; + float theta_u_top_min = -0.9f * M_PI_2; + float theta_u_top_max = M_PI - theta_u_top_min; theta_u_top_max = ensureIsBetweenMinPiAndPi(theta_u_top_max); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); float theta_v_min = std::asin((vc - OFFSET)/RADIUS) + 1.f; @@ -674,10 +672,10 @@ int main() // (2) & (4) =>{ uc = umin - r cos(theta_v_min) & { uc = umin - r cos(- theta_v_min) // (2) & (4) { vc >= vmin - r sin(theta_v_min) & { vc >= vmin - r cos(- theta_v_min) - float theta_u_top_min = 5.f * pi / 8.f; - float theta_u_top_max = pi - theta_u_top_min; + float theta_u_top_min = 5.f * M_PI / 8.f; + float theta_u_top_max = M_PI - theta_u_top_min; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_u_top_min) - 1.f; - uc = std::min(uc, OFFSET + WIDTH - RADIUS * std::cos(pi - theta_u_top_min) - 1.f); + uc = std::min(uc, OFFSET + WIDTH - RADIUS * std::cos((float)M_PI - theta_u_top_min) - 1.f); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); float theta_v_min = std::acos((OFFSET + WIDTH - uc)/RADIUS); theta_v_min = ensureIsBetweenMinPiAndPi(theta_v_min); @@ -689,7 +687,7 @@ int main() } vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * pi - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max))) * RADIUS; + float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max))) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -717,13 +715,13 @@ int main() // (3) => vc = vmin + height + r sin(theta_u_bot_max) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_min = pi_2; - float theta_u_bot_max = -pi / 3.f; + float theta_v_min = M_PI_2; + float theta_u_bot_max = -M_PI / 3.f; float uc = OFFSET - RADIUS * std::cos(theta_v_min); float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_max);; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (pi_2 + pi / 3.f) * RADIUS; + float theoreticalValue = (M_PI_2 + M_PI / 3.f) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -753,7 +751,7 @@ int main() // (4) => vc <= vmin + height + r sin(theta_v_max) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_min = pi_4 / 2.f; + float theta_v_min = M_PI_4 / 2.f; float theta_v_max = -theta_v_min; float uc = OFFSET - RADIUS * std::cos(theta_v_min); float vc = std::min(OFFSET + HEIGHT + RADIUS * std::sin(theta_v_min) - 1.f, OFFSET + HEIGHT + RADIUS * std::sin(theta_v_max) - 1.f); @@ -789,8 +787,8 @@ int main() // (1) => uc >= umin - r cos(theta_u_bot_max) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = 5.f * pi_4 / 2.f; - float theta_u_bot_max = pi - theta_u_bot_min; + float theta_u_bot_min = 5.f * M_PI_4 / 2.f; + float theta_u_bot_max = M_PI - theta_u_bot_min; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); float uc = std::max(OFFSET - RADIUS * std::cos(theta_u_bot_min) + 1.f, OFFSET - RADIUS * std::cos(theta_u_bot_max) + 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); @@ -825,10 +823,10 @@ int main() // (2) & (4) => vc < vmin + height + r sin(theta_v_min) & vc < vmin + height + r sin(-theta_v_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = -5.f * pi / 8.f; - float theta_u_bot_max = pi - theta_u_bot_min; + float theta_u_bot_min = -5.f * M_PI / 8.f; + float theta_u_bot_max = M_PI - theta_u_bot_min; theta_u_bot_max = ensureIsBetweenMinPiAndPi(theta_u_bot_max); - float theta_v_min = 7.f * pi / 8.f; + float theta_v_min = 7.f * M_PI / 8.f; float theta_v_max = -theta_v_min; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); float uc = OFFSET - RADIUS * std::cos(theta_v_min); @@ -862,13 +860,13 @@ int main() // (1) => vc = vmin + height + r sin(theta_u_bot_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = -2.f * pi / 3.f; - float theta_v_min = pi_2; + float theta_u_bot_min = -2.f * M_PI / 3.f; + float theta_v_min = M_PI_2; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_min); float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min);; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (pi_2 + pi / 3.f) * RADIUS; + float theoreticalValue = (M_PI_2 + M_PI / 3.f) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -897,12 +895,12 @@ int main() // (2) & (4) => vc <= vmin + height + r sin(theta_v_min) & vc <= vmin + height + r sin(-theta_v_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_min = 5.f * pi / 6.f; + float theta_v_min = 5.f * M_PI / 6.f; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_min); float vc = std::min(OFFSET + HEIGHT + RADIUS * std::sin(theta_v_min) - 1.f, OFFSET + HEIGHT + RADIUS * std::sin(-theta_v_min) - 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (pi / 3.f) * RADIUS; // <=> 2.f * M_PI / 6.f + float theoreticalValue = (M_PI / 3.f) * RADIUS; // <=> 2.f * M_PI / 6.f bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -931,12 +929,12 @@ int main() // (1) & (3) => uc < umin + width - r cos(theta_u_bot_min) & uc <= umin + width - r cos(PI - theta_u_bot_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = 4.f * pi / 6.f; + float theta_u_bot_min = 4.f * M_PI / 6.f; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); - float uc = std::min(OFFSET + WIDTH - RADIUS * std::cos(theta_u_bot_min) - 1.f, OFFSET + WIDTH - RADIUS * std::cos((float)pi -theta_u_bot_min) - 1.f); + float uc = std::min(OFFSET + WIDTH - RADIUS * std::cos(theta_u_bot_min) - 1.f, OFFSET + WIDTH - RADIUS * std::cos((float)M_PI -theta_u_bot_min) - 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (pi / 3.f) * RADIUS; // <=> 2.f * M_PI / 6.f + float theoreticalValue = (M_PI / 3.f) * RADIUS; // <=> 2.f * M_PI / 6.f bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -966,16 +964,16 @@ int main() // (2) & (4) => vc < vmin + height + r sin(theta_v_min) & vc < vmin + height + r sin(-theta_v_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = -7.f * pi / 8.f; - float theta_u_bot_max = pi - theta_u_bot_min; + float theta_u_bot_min = -7.f * M_PI / 8.f; + float theta_u_bot_max = M_PI - theta_u_bot_min; theta_u_bot_max = ensureIsBetweenMinPiAndPi(theta_u_bot_max); - float theta_v_max = -3.f * pi / 8.f; + float theta_v_max = -3.f * M_PI / 8.f; float theta_v_min = -theta_v_max; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); float uc = OFFSET - RADIUS * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * pi - ((theta_v_min - theta_v_max) + (theta_u_bot_max - theta_u_bot_min))) * RADIUS; + float theoreticalValue = (2.f * M_PI - ((theta_v_min - theta_v_max) + (theta_u_bot_max - theta_u_bot_min))) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1004,12 +1002,12 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 5.f * pi / 8.f; - float theta_u_top_max = 3.f * pi / 8.f; - float theta_v_min = 7.f * pi / 8.f; + float theta_u_top_min = 5.f * M_PI / 8.f; + float theta_u_top_max = 3.f * M_PI / 8.f; + float theta_v_min = 7.f * M_PI / 8.f; float theta_v_max = -theta_v_min; - float theta_u_bottom_min = -5.f * pi / 8.f; - float theta_u_bottom_max = -3.f * pi / 8.f; + float theta_u_bottom_min = -5.f * M_PI / 8.f; + float theta_u_bottom_max = -3.f * M_PI / 8.f; float vc = OFFSET + HEIGHT / 2.f; float radius = -(OFFSET - vc)/ std::sin(theta_u_top_min); float uc = OFFSET - radius * std::cos(theta_v_min); @@ -1044,9 +1042,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_max = pi / 6.f; - float theta_u_top_min = pi - theta_u_top_max; - float theta_v_min = pi / 3.f; + float theta_u_top_max = M_PI / 6.f; + float theta_u_top_min = M_PI - theta_u_top_max; + float theta_v_min = M_PI / 3.f; float theta_u_bottom_max = -theta_u_top_max; float radius = HEIGHT; float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1082,9 +1080,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 4.f * pi / 6.f; - float theta_u_top_max = pi - theta_u_top_min; - float theta_v_min = pi; + float theta_u_top_min = 4.f * M_PI / 6.f; + float theta_u_top_max = M_PI - theta_u_top_min; + float theta_v_min = M_PI; float theta_u_bottom_min = -theta_u_top_min; float theta_u_bottom_max = -theta_u_top_max; float radius = HEIGHT / (2.f * std::sin(theta_u_top_min)); // vmin + h - vmin = (vc - r sin(-theta_u_top_min)) - (vc - r sin(theta_top_min)) @@ -1092,7 +1090,7 @@ int main() float uc = OFFSET - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * pi - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1121,8 +1119,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = pi_2; - float theta_v_min = pi_4; + float theta_u_top_min = M_PI_2; + float theta_v_min = M_PI_4; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1158,8 +1156,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = pi_2; - float theta_v_min = 3.f * pi_4; + float theta_u_top_min = M_PI_2; + float theta_v_min = 3.f * M_PI_4; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1196,8 +1194,8 @@ int main() // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max float theta_u_top_max = 0.f; - float theta_u_bot_max = -pi / 3.f; - float theta_v_max = -pi / 6.f; + float theta_u_bot_max = -M_PI / 3.f; + float theta_v_max = -M_PI / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_max) - std::sin(theta_u_bot_max)); float uc = OFFSET - radius * std::cos(theta_v_max); float vc = OFFSET + radius * std::sin(theta_u_top_max); @@ -1232,9 +1230,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_max = pi / 3.f; + float theta_u_top_max = M_PI / 3.f; float theta_u_bot_max = 0.f; - float theta_v_min = pi / 6.f; + float theta_v_min = M_PI / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_max) - std::sin(theta_u_bot_max)); float uc = OFFSET - radius * std::cos(theta_v_min); float vc = OFFSET + radius * std::sin(theta_u_top_max); @@ -1269,18 +1267,18 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 5.f * pi / 8.f; - float theta_u_top_max = 3.f * pi / 8.f; - float theta_v_min = 1.f * pi / 8.f; + float theta_u_top_min = 5.f * M_PI / 8.f; + float theta_u_top_max = 3.f * M_PI / 8.f; + float theta_v_min = 1.f * M_PI / 8.f; float theta_v_max = -theta_v_min; - float theta_u_bottom_min = -5.f * pi / 8.f; - float theta_u_bottom_max = -3.f * pi / 8.f; + float theta_u_bottom_min = -5.f * M_PI / 8.f; + float theta_u_bottom_max = -3.f * M_PI / 8.f; float vc = OFFSET + HEIGHT / 2.f; float radius = -(OFFSET - vc)/ std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * pi - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1309,15 +1307,15 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 5.f * pi / 6.f; - float theta_v_min = 2.f * pi / 3.f; + float theta_u_top_min = 5.f * M_PI / 6.f; + float theta_v_min = 2.f * M_PI / 3.f; float theta_u_bottom_min = -theta_u_top_min; float radius = HEIGHT; float vc = OFFSET + radius * std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * pi - (theta_u_top_min - theta_u_bottom_min)) * radius; + float theoreticalValue = (2.f * M_PI - (theta_u_top_min - theta_u_bottom_min)) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1346,8 +1344,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 4.f * pi / 6.f; - float theta_u_top_max = pi - theta_u_top_min; + float theta_u_top_min = 4.f * M_PI / 6.f; + float theta_u_top_max = M_PI - theta_u_top_min; float theta_v_min = 0; float theta_u_bottom_min = -theta_u_top_min; float theta_u_bottom_max = -theta_u_top_max; @@ -1356,7 +1354,7 @@ int main() float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * pi - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1385,15 +1383,15 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = pi_2; - float theta_v_min = 3.f * pi_4; + float theta_u_top_min = M_PI_2; + float theta_v_min = 3.f * M_PI_4; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * pi - (theta_v_min - theta_v_max)) * radius; + float theoreticalValue = (2.f * M_PI - (theta_v_min - theta_v_max)) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1422,15 +1420,15 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = pi_2; - float theta_v_min = pi_4; + float theta_u_top_min = M_PI_2; + float theta_v_min = M_PI_4; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * pi - (theta_v_min - theta_v_max)) * radius; + float theoreticalValue = (2.f * M_PI - (theta_v_min - theta_v_max)) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1459,15 +1457,15 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = pi; - float theta_u_bot_min = -2.f * pi / 3.f; - float theta_v_max = -5.f * pi / 6.f; + float theta_u_top_min = M_PI; + float theta_u_bot_min = -2.f * M_PI / 3.f; + float theta_v_max = -5.f * M_PI / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_min) - std::sin(theta_u_bot_min)); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_max); float vc = OFFSET + radius * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * pi - (theta_u_top_min - theta_v_max)) * radius; + float theoreticalValue = (2.f * M_PI - (theta_u_top_min - theta_v_max)) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1496,9 +1494,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 2.f * pi / 3.f; - float theta_u_bot_min = pi; - float theta_v_min = 5.f * pi / 6.f; + float theta_u_top_min = 2.f * M_PI / 3.f; + float theta_u_bot_min = M_PI; + float theta_v_min = 5.f * M_PI / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_min) - std::sin(theta_u_bot_min)); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1535,12 +1533,12 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_min = 7.f * pi / 8.f; + float theta_v_left_min = 7.f * M_PI / 8.f; float theta_v_left_max = -theta_v_left_min; - float theta_v_right_min = pi / 8.f; + float theta_v_right_min = M_PI / 8.f; float theta_v_right_max = -theta_v_right_min; - float theta_u_top_min = 5.f * pi / 8.f; - float theta_u_top_max = pi - theta_u_top_min; + float theta_u_top_min = 5.f * M_PI / 8.f; + float theta_u_top_max = M_PI - theta_u_top_min; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1576,12 +1574,12 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_top_min = -2.f * pi / 3.f; + float theta_u_top_min = -2.f * M_PI / 3.f; float uc = OFFSET + WIDTH_SWITCHED/2.f; float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); - float theoreticalValue = (pi/3.f) * RADIUS; + float theoreticalValue = (M_PI/3.f) * RADIUS; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1612,9 +1610,9 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_max = -5.f * pi / 8.f; - float theta_v_right_max = -3.f *pi / 8.f; - float theta_u_top_min = -7.f * pi / 8.f; + float theta_v_left_max = -5.f * M_PI / 8.f; + float theta_v_right_max = -3.f *M_PI / 8.f; + float theta_u_top_min = -7.f * M_PI / 8.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_max) - std::cos(theta_v_left_max)); float uc = OFFSET - radius * std::cos(theta_v_left_max); float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1651,9 +1649,9 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_top_max = -pi / 3.f; + float theta_u_top_max = -M_PI / 3.f; float theta_v_right_max = 0.f; - float theta_v_left_max = -pi_2; + float theta_v_left_max = -M_PI_2; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_max) - std::cos(theta_v_left_max)); float uc = OFFSET; float vc = OFFSET + radius * std::sin(theta_u_top_max); @@ -1690,9 +1688,9 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_top_min = -2.f * pi / 3.f; - float theta_v_left_max = pi; - float theta_v_right_max = -pi_2; + float theta_u_top_min = -2.f * M_PI / 3.f; + float theta_v_left_max = M_PI; + float theta_v_right_max = -M_PI_2; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_max) - std::cos(theta_v_left_max)); float uc = OFFSET + WIDTH_SWITCHED; float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1729,12 +1727,12 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_min = 7.f * pi / 8.f; + float theta_v_left_min = 7.f * M_PI / 8.f; float theta_v_left_max = -theta_v_left_min; - float theta_v_right_min = pi / 8.f; + float theta_v_right_min = M_PI / 8.f; float theta_v_right_max = -theta_v_right_min; - float theta_u_bot_min = -5.f * pi / 8.f; - float theta_u_bot_max = -pi - theta_u_bot_min; + float theta_u_bot_min = -5.f * M_PI / 8.f; + float theta_u_bot_max = -M_PI - theta_u_bot_min; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); @@ -1770,10 +1768,10 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_min = 2.f * pi / 3.f; - float theta_u_bot_max = pi - theta_u_bot_min; - float theta_v_left_min = 5.f * pi / 6.f; - float theta_v_right_min = pi / 6.f; + float theta_u_bot_min = 2.f * M_PI / 3.f; + float theta_u_bot_max = M_PI - theta_u_bot_min; + float theta_v_left_min = 5.f * M_PI / 6.f; + float theta_v_right_min = M_PI / 6.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); @@ -1809,9 +1807,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_min = 7.f * pi / 8.f; - float theta_v_left_min = 5.f * pi / 8.f; - float theta_v_right_min = 3.f * pi / 8.f; + float theta_u_bot_min = 7.f * M_PI / 8.f; + float theta_v_left_min = 5.f * M_PI / 8.f; + float theta_v_right_min = 3.f * M_PI / 8.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); @@ -1847,8 +1845,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_max = pi / 3.f; - float theta_v_left_min = pi_2; + float theta_u_bot_max = M_PI / 3.f; + float theta_v_left_min = M_PI_2; float theta_v_right_min = 0.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET; @@ -1885,9 +1883,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_min = 2.f * pi / 3.f; - float theta_v_right_min = pi_2; - float theta_v_left_min = pi; + float theta_u_bot_min = 2.f * M_PI / 3.f; + float theta_v_right_min = M_PI_2; + float theta_v_left_min = M_PI; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED; float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); @@ -1919,17 +1917,17 @@ int main() // (6): u_cross_bot_max = uc + r cos(theta_u_bottom_max) <= umin_roi + width ; vmin_roi + height = vc - r sin(theta_u_bottom_max) // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 2.f * pi / 3.f; - float theta_u_top_max = pi / 3.f; - float theta_u_bottom_min = -2.f * pi / 3.f; - float theta_u_bottom_max = -pi / 3.f; + float theta_u_top_min = 2.f * M_PI / 3.f; + float theta_u_top_max = M_PI / 3.f; + float theta_u_bottom_min = -2.f * M_PI / 3.f; + float theta_u_bottom_max = -M_PI / 3.f; float uc = OFFSET + WIDTH / 2.f; float vc = OFFSET + HEIGHT / 2.f; float radius = -(OFFSET - vc)/ std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * pi - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { @@ -1955,9 +1953,9 @@ int main() // (6): u_min + width = uc + r cos(theta_v_right_max); v_cross_right_max = vc - r sin(theta_v_right_max) // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_min = 5.f * pi / 6.f; + float theta_v_left_min = 5.f * M_PI / 6.f; float theta_v_left_max = -theta_v_left_min; - float theta_v_right_min = pi / 6.f; + float theta_v_right_min = M_PI / 6.f; float theta_v_right_max = -theta_v_right_min; float uc = OFFSET + HEIGHT / 2.f; float vc = OFFSET + WIDTH / 2.f; @@ -1987,14 +1985,14 @@ int main() // Choosing theta_v_left_min = 7 PI / 8 and circle at the center of the RoI // umin = uc + r cos(theta_v_left_min) => r = (umin - uc) / cos(theta_v_left_min) vpRect squareRoI(OFFSET, OFFSET, HEIGHT, HEIGHT); - float theta_v_left_min = 7.f * pi / 8.f; + float theta_v_left_min = 7.f * M_PI / 8.f; float uc = OFFSET + HEIGHT / 2.f; float vc = OFFSET + HEIGHT / 2.f; float radius = (OFFSET - uc) / std::cos(theta_v_left_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(squareRoI); - float theoreticalValue = pi * radius; + float theoreticalValue = M_PI * radius; bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { From 2e3a5ab906146983f6d2c4a7b83961a629b996ca Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Fri, 27 Oct 2023 12:19:46 +0200 Subject: [PATCH 08/14] Fix testImageCircle.cpp since introduction of (float)M_PI using M_PIf macro - The arc angle corresponding to the part of the circle that is inside a roi could be negative - Fix consists in adding 4*pi when negative --- .../core/include/visp3/core/vpImageCircle.h | 25 +- modules/core/include/visp3/core/vpMath.h | 60 ++- modules/core/src/image/vpImageCircle.cpp | 117 ++--- .../test/tools/geometry/testImageCircle.cpp | 439 +++++++++--------- 4 files changed, 324 insertions(+), 317 deletions(-) diff --git a/modules/core/include/visp3/core/vpImageCircle.h b/modules/core/include/visp3/core/vpImageCircle.h index 33343e1f75..17fefa8b03 100644 --- a/modules/core/include/visp3/core/vpImageCircle.h +++ b/modules/core/include/visp3/core/vpImageCircle.h @@ -32,9 +32,9 @@ */ /*! - \file vpImageCircle.h - \brief Image circle, i.e. circle in the image space. -*/ + * \file vpImageCircle.h + * \brief Image circle, i.e. circle in the image space. + */ #ifndef _vpImageCircle_h_ #define _vpImageCircle_h_ @@ -50,8 +50,8 @@ #endif /** - * \brief Class that defines a 2D circle in an image. - */ + * \brief Class that defines a 2D circle in an image. + */ class VISP_EXPORT vpImageCircle { public: @@ -72,28 +72,25 @@ class VISP_EXPORT vpImageCircle vpImageCircle(const cv::Vec3f &vec); #endif - /*! - * Default destructor. - */ - virtual ~vpImageCircle(); - /*! * Compute the angular coverage, in terms of radians, that is contained in the Region of Interest (RoI). - * \sa \ref vpImageCircle::computeArcLengthInRoI() "vpImageCircle::computeArcLengthInRoI(const vpRect &roi)" + * \sa computeArcLengthInRoI(), computeArcLengthInRoI(const vpRect &roi) * \param[in] roi The rectangular RoI in which we want to know the number of pixels of the circle that are contained. - * \return Returns 2.f * M_PI for a circle that is fully visible in the RoI, or the sum of the angles of the arc(s) that is(are) visible in the RoI. + * \return Returns angular coverage of a circle in a ROI as an angle value in radians. + * More precisely, it returns 2.f * M_PI for a circle that is fully visible in the RoI, or the sum of the angles + * of the arc(s) that is(are) visible in the RoI. */ float computeAngularCoverageInRoI(const vpRect &roi) const; /*! * Compute the arc length, in terms of number of pixels, that is contained in the Region of Interest (RoI). - * \sa \ref vpImageCircle::computeAngularCoverageInRoI() "vpImageCircle::computeAngularCoverageInRoI(const vpRect &roi)" + * \sa computeAngularCoverageInRoI(), computeAngularCoverageInRoI(const vpRect &roi) * \param[in] roi The rectangular RoI in which we want to know the number of pixels of the circle that are contained. * \return The number of pixels of the circle that are contained in the RoI. */ float computeArcLengthInRoI(const vpRect &roi) const; - /*! + /*! * Get the center of the image (2D) circle * \return The center of the image (2D) circle. */ diff --git a/modules/core/include/visp3/core/vpMath.h b/modules/core/include/visp3/core/vpMath.h index 237b2decfb..2b56d70061 100644 --- a/modules/core/include/visp3/core/vpMath.h +++ b/modules/core/include/visp3/core/vpMath.h @@ -77,6 +77,19 @@ #endif +#endif + +#ifndef M_PIf +#define M_PIf 3.14159265358979323846f +#endif + +#ifndef M_PI_2f +#define M_PI_2f (M_PIf / 2.0f) +#endif + +#ifndef M_PI_4f +#define M_PI_4f (M_PIf / 4.0f) + #include #include @@ -88,12 +101,11 @@ class vpRxyzVector; class vpTranslationVector; /*! - \class vpMath - \ingroup group_core_math_tools - \brief Provides simple mathematics computation tools that are not - available in the C mathematics library (math.h) - -*/ + * \class vpMath + * \ingroup group_core_math_tools + * \brief Provides simple mathematics computation tools that are not + * available in the C mathematics library (math.h) + */ class VISP_EXPORT vpMath { public: @@ -117,6 +129,42 @@ class VISP_EXPORT vpMath static vpColVector rad(const vpColVector &r); + /*! + * Convert angle between \f$-\pi\f$ and \f$\pi\f$. + * + * \param[in] theta The input angle we want to ensure it is in the interval \f$[-\pi ; \pi]\f$. + * \return The corresponding angle in the interval \f$[-\pi ; \pi]\f$. + */ + static float getAngleBetweenMinPiAndPi(const float &theta) + { + float theta1 = theta; + if (theta1 > M_PIf) { + theta1 -= 2.0f * M_PIf; + } + else if (theta1 < -M_PIf) { + theta1 += 2.0f * M_PIf; + } + return theta1; + } + + /*! + * Convert angle between \f$-\pi\f$ and \f$\pi\f$. + * + * \param[in] theta The input angle we want to ensure it is in the interval \f$[-\pi ; \pi]\f$. + * \return The corresponding angle in the interval \f$[-\pi ; \pi]\f$. + */ + static double getAngleBetweenMinPiAndPi(const double &theta) + { + double theta1 = theta; + if (theta1 > M_PI) { + theta1 -= 2.0 * M_PI; + } + else if (theta1 < -M_PI) { + theta1 += 2.0 * M_PI; + } + return theta1; + } + /*! Compute x square value. \return Square value \f$ x^2 \f$. diff --git a/modules/core/src/image/vpImageCircle.cpp b/modules/core/src/image/vpImageCircle.cpp index b0508f9b37..f6b30c3bac 100644 --- a/modules/core/src/image/vpImageCircle.cpp +++ b/modules/core/src/image/vpImageCircle.cpp @@ -32,6 +32,7 @@ */ #include +#include vpImageCircle::vpImageCircle() : m_center() @@ -51,31 +52,10 @@ vpImageCircle::vpImageCircle(const vpImagePoint ¢er, const float &radius) vpImageCircle::vpImageCircle(const cv::Vec3f &vec) : m_center(vec[1], vec[0]) , m_radius(vec[2]) -{ } -#endif - -vpImageCircle::~vpImageCircle() -{ } - -/*! - * \brief Express \b theta between \f$-\pi\f$ and \f$\pi\f$. - * - * \param[in] theta The input angle we want to ensure it is in the interval \f$[-\pi ; \pi]\f$. - * \return The input angle in the interval \f$[-\pi ; \pi]\f$. - */ -float getAngleBetweenMinPiAndPi(const float &theta) { - float theta1 = theta; - float pi = static_cast(M_PI); - if (theta1 > pi) { - theta1 -= 2.0f * pi; - } - else if (theta1 < -pi) { - theta1 += 2.0f * pi; - } - return theta1; } +#endif /*! * \brief Compute the length of the angular interval of the circle when it intersects @@ -92,7 +72,7 @@ void computeIntersectionsLeftBorderOnly(const float &u_c, const float &umin_roi, // umin_roi = u_c + r cos(theta) // theta = acos((umin_roi - u_c) / r) float theta1 = std::acos((umin_roi - u_c)/ radius); - theta1 = getAngleBetweenMinPiAndPi(theta1); + theta1 = vpMath::getAngleBetweenMinPiAndPi(theta1); float theta2 = -1.f * theta1; float theta_min = std::min(theta1, theta2); float theta_max = std::max(theta1, theta2); @@ -114,12 +94,11 @@ void computeIntersectionsRightBorderOnly(const float &u_c, const float &umax_roi // u = u_c + r cos(theta) // theta = acos((u - u_c) / r) float theta1 = std::acos((umax_roi - u_c) / radius); - theta1 = getAngleBetweenMinPiAndPi(theta1); + theta1 = vpMath::getAngleBetweenMinPiAndPi(theta1); float theta2 = -1.f * theta1; float theta_min = std::min(theta1, theta2); float theta_max = std::max(theta1, theta2); - float pi = static_cast(M_PI); - delta_theta = 2.f * pi - (theta_max - theta_min); + delta_theta = 2.f * M_PIf - (theta_max - theta_min); } /*! @@ -137,25 +116,24 @@ void computeIntersectionsTopBorderOnly(const float &v_c, const float &vmin_roi, // v = vc - r sin(theta) because the v-axis goes down // theta = asin((vc - v)/r) float theta1 = std::asin((v_c - vmin_roi) / radius); - float pi = static_cast(M_PI); - theta1 = getAngleBetweenMinPiAndPi(theta1); + theta1 = vpMath::getAngleBetweenMinPiAndPi(theta1); float theta2 = 0.f; if (theta1 >= 0.f) { - theta2 = pi - theta1; + theta2 = M_PIf - theta1; } else { - theta2 = -theta1 - pi; + theta2 = -theta1 - M_PIf; } float theta_min = std::min(theta1, theta2); float theta_max = std::max(theta1, theta2); if (std::abs(theta_max - theta_min) * radius < 1.f) { // Between the maximum and minimum theta there is less than 1 pixel of difference // It meens that the full circle is visible - delta_theta = 2.f * pi; + delta_theta = 2.f * M_PIf; } else if (theta1 > 0.f) { - delta_theta = 2.f * pi - (theta_max - theta_min); + delta_theta = 2.f * M_PIf - (theta_max - theta_min); } else { delta_theta = theta_max - theta_min; @@ -177,28 +155,27 @@ void computeIntersectionsBottomBorderOnly(const float &v_c, const float &vmax_ro // v = vc - r sin(theta) because the v-axis goes down // theta = asin((vc - v)/r) float theta1 = std::asin((v_c - vmax_roi) / radius); - float pi = static_cast(M_PI); - theta1 = getAngleBetweenMinPiAndPi(theta1); + theta1 = vpMath::getAngleBetweenMinPiAndPi(theta1); float theta2 = 0.f; if (theta1 >= 0.f) { - theta2 = pi - theta1; + theta2 = M_PIf - theta1; } else { - theta2 = -theta1 - pi; + theta2 = -theta1 - M_PIf; } float theta_min = std::min(theta1, theta2); float theta_max = std::max(theta1, theta2); if (std::abs(theta_max - theta_min) * radius < 1.f) { // Between the maximum and minimum theta there is less than 1 pixel of difference - // It means that the full circle is visible - delta_theta = 2.f * pi; + // It meens that the full circle is visible + delta_theta = 2.f * M_PIf; } else if (theta1 > 0.f) { delta_theta = theta_max - theta_min; } else { - delta_theta = 2.f * pi - (theta_max - theta_min); + delta_theta = 2.f * M_PIf - (theta_max - theta_min); } } @@ -225,14 +202,13 @@ void computePerpendicularAxesIntersections(const float &u_c, const float &v_c, c // v = vc - r sin(theta) because the v-axis goes down // theta = asin((vc - v)/r) float theta_u_cross = std::asin((v_c - crossing_u)/radius); - float pi = static_cast(M_PI); - theta_u_cross = getAngleBetweenMinPiAndPi(theta_u_cross); + theta_u_cross = vpMath::getAngleBetweenMinPiAndPi(theta_u_cross); float theta_u_cross_2 = 0.f; if (theta_u_cross > 0) { - theta_u_cross_2 = pi - theta_u_cross; + theta_u_cross_2 = M_PIf - theta_u_cross; } else { - theta_u_cross_2 = -pi - theta_u_cross; + theta_u_cross_2 = -M_PIf - theta_u_cross; } // Computing the corresponding u-coordinates at which the u-axis is crossed float u_ucross = u_c + radius * std::cos(theta_u_cross); @@ -255,7 +231,7 @@ void computePerpendicularAxesIntersections(const float &u_c, const float &v_c, c // u = u_c + r cos(theta) // theta = acos((u - u_c) / r) float theta_v_cross = std::acos((crossing_v - u_c)/radius); - theta_v_cross = getAngleBetweenMinPiAndPi(theta_v_cross); + theta_v_cross = vpMath::getAngleBetweenMinPiAndPi(theta_v_cross); float theta_v_cross_2 = -theta_v_cross; // Computing the corresponding v-coordinates at which the v-axis is crossed // v = v_c - radius sin(theta) because the v-axis is oriented towards the bottom @@ -352,20 +328,19 @@ void computeIntersectionsTopRight(const float &u_c, const float &v_c, const floa float u_umax = crossing_theta_u_max.second; float v_vmin = crossing_theta_v_min.second; float v_vmax = crossing_theta_v_max.second; - float pi = static_cast(M_PI); if (u_umin <= umax_roi && v_vmin < vmin_roi && u_umax >= umax_roi && v_vmax >= vmin_roi) { // The circle crosses only once each axis and the center is below the top border //Case crossing once delta_theta = theta_v_max - theta_u_min; if (delta_theta < 0) { // The arc cannot be negative - delta_theta += 2.f * pi; + delta_theta += 2.f * M_PIf; } } else if (u_umin <= umax_roi && v_vmin >= vmin_roi && u_umax <= umax_roi && v_vmax >= vmin_roi) { // The circle crosses twice each axis //Case crossing twice - delta_theta = 2.f * pi - ((theta_u_min - theta_u_max)+(theta_v_min - theta_v_max)); + delta_theta = 2 * M_PIf - ((theta_u_min - theta_u_max)+(theta_v_min - theta_v_max)); } else if (u_umin >= umax_roi && v_vmin >= vmin_roi && u_umax >= umax_roi && v_vmax >= vmin_roi) { // The circle crosses the u-axis outside the roi @@ -459,20 +434,19 @@ void computeIntersectionsBottomRight(const float &u_c, const float &v_c, const f float u_umax = crossing_theta_u_max.second; float v_vmin = crossing_theta_v_min.second; float v_vmax = crossing_theta_v_max.second; - float pi = static_cast(M_PI); if (u_umin <= umax_roi && u_umax > umax_roi && v_vmin <= vmax_roi && v_vmax > vmax_roi) { // The circle crosses only once each axis //Case crossing once delta_theta = theta_u_min - theta_v_min; if (delta_theta < 0) { // An arc length cannot be negative it means that theta_u_max was comprise in the bottom left quadrant of the circle - delta_theta += 2.f * pi; + delta_theta += 2.f * M_PIf; } } else if (u_umin <= umax_roi && u_umax <= umax_roi && v_vmin <= vmax_roi && v_vmax <= vmax_roi) { // The circle crosses twice each axis //Case crossing twice - delta_theta = 2.f * pi - ((theta_v_min - theta_v_max) + (theta_u_max - theta_u_min)); + delta_theta = 2.f * M_PIf - ((theta_v_min - theta_v_max) + (theta_u_max - theta_u_min)); } else if (u_umin > umax_roi && u_umax > umax_roi && v_vmin <= vmax_roi && v_vmax <= vmax_roi) { // The circle crosses the u-axis outside the roi @@ -528,23 +502,23 @@ void computeIntersectionsTopLeftBottom(const float &u_c, const float &v_c, const float u_umin_bottom = crossing_theta_u_min.second; float u_umax_bottom = crossing_theta_u_max.second; if (u_umin_top >= umin_roi && u_umin_bottom >= umin_roi && v_vmin >= vmin_roi && v_vmax <= vmax_roi) { - // case intersection top + left + bottom twice + // case intersection top + left + bottom twice delta_theta = (theta_v_min - theta_u_min_top) + (theta_u_max_top - theta_u_max_bottom) + (theta_u_min_bottom - theta_v_max); } else if (u_umin_top <= umin_roi && v_vmin <= vmin_roi && u_umin_bottom <= umin_roi && v_vmax >= vmax_roi) { - // case intersection top and bottom + // case intersection top and bottom delta_theta = (theta_u_max_top - theta_u_max_bottom); } else if (u_umax_top <= umin_roi && u_umax_bottom <= umin_roi && v_vmin >= vmin_roi && v_vmax <= vmax_roi) { - // case left only + // case left only computeIntersectionsLeftBorderOnly(u_c, umin_roi, radius, delta_theta); } else if (u_umax_bottom > umin_roi && v_vmin >= vmin_roi) { - // case bottom/left corner + // case bottom/left corner computeIntersectionsBottomLeft(u_c, v_c, umin_roi, vmax_roi, radius, delta_theta); } else if (u_umax_top > umin_roi && v_vmax <= vmax_roi) { - // case top/left corner + // case top/left corner computeIntersectionsTopLeft(u_c, v_c, umin_roi, vmin_roi, radius, delta_theta); } } @@ -588,10 +562,9 @@ void computeIntersectionsTopRightBottom(const float &u_c, const float &v_c, cons float theta_u_max_bottom = crossing_theta_u_max.first; float u_umin_bottom = crossing_theta_u_min.second; float u_umax_bottom = crossing_theta_u_max.second; - float pi = static_cast(M_PI); if (u_umax_top <= umax_roi && u_umax_bottom <= umax_roi && v_vmin >= vmin_roi && v_vmax <= vmax_roi) { // case intersection top + right + bottom twice - delta_theta = 2.f * pi - ((theta_u_min_top - theta_u_max_top) + (theta_v_min - theta_v_max) + (theta_u_max_bottom - theta_u_min_bottom)); + delta_theta = 2.f * M_PIf - ((theta_u_min_top - theta_u_max_top) + (theta_v_min - theta_v_max) + (theta_u_max_bottom - theta_u_min_bottom)); } else if (u_umin_top <= umax_roi && u_umax_top > umax_roi && v_vmin <= vmin_roi && u_umin_bottom <= umax_roi && u_umax_bottom > umax_roi && v_vmax >= vmax_roi) { // case intersection top and bottom @@ -629,14 +602,13 @@ void computeIntersectionsTopBottomOnly(const float &u_c, const float &v_c, const // v = vc - r sin(theta) because the v-axis goes down // theta = asin((vc - vmin_roi)/r) float theta_u_cross_top = std::asin((v_c - vmin_roi)/radius); - theta_u_cross_top = getAngleBetweenMinPiAndPi(theta_u_cross_top); + theta_u_cross_top = vpMath::getAngleBetweenMinPiAndPi(theta_u_cross_top); float theta_u_cross_top_2 = 0.f; - float pi = static_cast(M_PI); if (theta_u_cross_top > 0) { - theta_u_cross_top_2 = pi - theta_u_cross_top; + theta_u_cross_top_2 = M_PIf - theta_u_cross_top; } else { - theta_u_cross_top_2 = -pi - theta_u_cross_top; + theta_u_cross_top_2 = -M_PIf - theta_u_cross_top; } // Computing the corresponding u-coordinates at which the u-axis is crossed @@ -657,13 +629,13 @@ void computeIntersectionsTopBottomOnly(const float &u_c, const float &v_c, const // v = vc - r sin(theta) because the v-axis goes down // theta = asin((vc - vmax_roi)/r) float theta_u_cross_bottom = std::asin((v_c - vmax_roi)/radius); - theta_u_cross_bottom = getAngleBetweenMinPiAndPi(theta_u_cross_bottom); + theta_u_cross_bottom = vpMath::getAngleBetweenMinPiAndPi(theta_u_cross_bottom); float theta_u_cross_bottom_2 = 0.f; if (theta_u_cross_bottom > 0) { - theta_u_cross_bottom_2 = pi - theta_u_cross_bottom; + theta_u_cross_bottom_2 = M_PIf - theta_u_cross_bottom; } else { - theta_u_cross_bottom_2 = -pi - theta_u_cross_bottom; + theta_u_cross_bottom_2 = -M_PIf - theta_u_cross_bottom; } // Computing the corresponding u-coordinates at which the u-axis is crossed @@ -683,7 +655,7 @@ void computeIntersectionsTopBottomOnly(const float &u_c, const float &v_c, const // Computing the the length of the angular interval of the circle when it intersects // only with the top and bottom borders of the Region of Interest (RoI) - delta_theta = 2.f * pi - ((theta_u_cross_top_min - theta_u_cross_top_max) + (theta_u_cross_bottom_max - theta_u_cross_bottom_min)); + delta_theta = 2.f * M_PIf - ((theta_u_cross_top_min - theta_u_cross_top_max) + (theta_u_cross_bottom_max - theta_u_cross_bottom_min)); } /*! @@ -833,7 +805,7 @@ void computeIntersectionsLeftRightOnly(const float &u_c, const float &v_c, const // theta = acos((umin_roi - u_c)/r) // theta_min = -theta_max float theta_v_cross_left = std::acos((umin_roi - u_c)/radius); - theta_v_cross_left = getAngleBetweenMinPiAndPi(theta_v_cross_left); + theta_v_cross_left = vpMath::getAngleBetweenMinPiAndPi(theta_v_cross_left); float theta_v_cross_left_2 = -theta_v_cross_left; // Computing the corresponding v-coordinates at which the v-axis is crossed @@ -855,7 +827,7 @@ void computeIntersectionsLeftRightOnly(const float &u_c, const float &v_c, const // theta = acos((umin_roi - u_c)/r) // theta_min = -theta_max float theta_v_cross_right = std::acos((umax_roi - u_c)/radius); - theta_v_cross_right = getAngleBetweenMinPiAndPi(theta_v_cross_right); + theta_v_cross_right = vpMath::getAngleBetweenMinPiAndPi(theta_v_cross_right); float theta_v_cross_right_2 = -theta_v_cross_right; // Computing the corresponding v-coordinates at which the v-axis is crossed @@ -942,12 +914,11 @@ float vpImageCircle::computeAngularCoverageInRoI(const vpRect &roi) const bool touchBottomBorder = (v_c + radius) >= vmax_roi; bool isHorizontallyOK = (!touchLeftBorder && !touchRightBorder); bool isVerticallyOK = (!touchTopBorder && !touchBottomBorder); - float pi = static_cast(M_PI); if (isHorizontallyOK && isVerticallyOK && roi.isInside(m_center)) { // Easy case // The circle has its center in the image and its radius is not too great // to make it fully contained in the RoI - delta_theta = 2.f * pi; + delta_theta = 2.f * M_PIf; } else if (touchBottomBorder && !touchLeftBorder && !touchRightBorder && !touchTopBorder) { // Touches/intersects only the bottom border of the RoI @@ -982,6 +953,7 @@ float vpImageCircle::computeAngularCoverageInRoI(const vpRect &roi) const computeIntersectionsTopRight(u_c, v_c, vmin_roi, umax_roi, radius, delta_theta); } else if (touchBottomBorder && touchTopBorder && touchLeftBorder && !touchRightBorder) { + std::cout << "DEBUG ici" << std::endl; // Touches/intersects the top, left and bottom borders of the RoI computeIntersectionsTopLeftBottom(u_c, v_c, umin_roi, vmin_roi, vmax_roi, radius, delta_theta); } @@ -1023,8 +995,11 @@ float vpImageCircle::computeAngularCoverageInRoI(const vpRect &roi) const float vpImageCircle::computeArcLengthInRoI(const vpRect &roi) const { float delta_theta = computeAngularCoverageInRoI(roi); - float arcLength = delta_theta * m_radius; - return arcLength; + if (delta_theta < 0) { // Needed since M_PIf is used + delta_theta += 4 * M_PIf; + } + + return delta_theta * m_radius; } vpImagePoint vpImageCircle::getCenter() const diff --git a/modules/core/test/tools/geometry/testImageCircle.cpp b/modules/core/test/tools/geometry/testImageCircle.cpp index b029a3a212..bc4cd432b1 100644 --- a/modules/core/test/tools/geometry/testImageCircle.cpp +++ b/modules/core/test/tools/geometry/testImageCircle.cpp @@ -1,5 +1,4 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. * Copyright (C) 2005 - 2023 by Inria. All rights reserved. * @@ -29,33 +28,21 @@ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * Description: - * Test vpRect. - * -*****************************************************************************/ + * Test vpRect and vpImagePoint. + */ #include #include #include +#include #include -bool compareAngles(const float &actualVal, const float &theoreticalVal) +bool equal(const float &actualVal, const float &theoreticalVal) { // Allow up to 1 pixel of difference, due to rounding effects return (std::abs(theoreticalVal - actualVal) < 1.f); } -float ensureIsBetweenMinPiAndPi(const float &theta) -{ - float theta1 = theta; - if (theta1 > M_PI) { - theta1 -= 2.0 * M_PI; - } - else if (theta1 < -M_PI) { - theta1 += 2.0 * M_PI; - } - return theta1; -} - int main() { const float OFFSET = 5.f; @@ -72,8 +59,8 @@ int main() { vpImageCircle circle(vpImagePoint(HEIGHT / 2.f, WIDTH / 2.f), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 2.f * M_PIf * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -94,8 +81,8 @@ int main() vpRect roiSquare(OFFSET, OFFSET, HEIGHT, HEIGHT); vpImageCircle circle(vpImagePoint(OFFSET + HEIGHT / 2.f, OFFSET + HEIGHT / 2.f), HEIGHT / 2.f); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * HEIGHT / 2.f; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 2.f * M_PIf * HEIGHT / 2.f; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -119,8 +106,8 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 4.f * M_PI * RADIUS /3.f; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 4.f * M_PIf * RADIUS /3.f; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -144,8 +131,8 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS /3.f; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 2.f * M_PIf * RADIUS /3.f; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -169,8 +156,8 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 2.f * M_PIf * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -194,8 +181,8 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 4.f * M_PI * RADIUS /3.f; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 4.f * M_PIf * RADIUS /3.f; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -219,8 +206,8 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS /3.f; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 2.f * M_PIf * RADIUS /3.f; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -244,8 +231,8 @@ int main() float vc = OFFSET + 100.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 2.f * M_PIf * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -265,13 +252,13 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + RADIUS * sin(theta) - float theta = M_PI / 3.f; + float theta = M_PIf / 3.f; float uc = OFFSET + 100.f; float vc = OFFSET + RADIUS * sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 5.f * M_PI * RADIUS /3.f; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 5.f * M_PIf * RADIUS /3.f; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -291,13 +278,13 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + RADIUS * sin(theta) - float theta = -2.f * M_PI/3.f; + float theta = -2.f * M_PIf/3.f; float uc = OFFSET + 100.f; float vc = OFFSET + RADIUS * std::sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = M_PI * RADIUS /3.f; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = M_PIf * RADIUS /3.f; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -317,13 +304,13 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + RADIUS * sin(theta) - float theta = M_PI_2; + float theta = M_PI_2f; float uc = OFFSET + 100.f; float vc = OFFSET + RADIUS * sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 2.f * M_PIf * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -343,13 +330,13 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + HEIGHT + RADIUS * sin(theta) - float theta = -M_PI / 3.f; + float theta = -M_PIf / 3.f; float uc = OFFSET + 100.f; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 5.f * M_PI * RADIUS /3.f; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 5.f * M_PIf * RADIUS /3.f; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -369,13 +356,13 @@ int main() { // v = vc - r sin(theta) // Formula: vc = OFFSET + HEIGHT + RADIUS * sin(theta) - float theta = M_PI / 3.f; + float theta = M_PIf / 3.f; float uc = OFFSET + 100.f; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = M_PI * RADIUS /3.f; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = M_PIf * RADIUS /3.f; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -398,8 +385,8 @@ int main() float vc = OFFSET + HEIGHT - RADIUS; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 2.f * M_PI * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 2.f * M_PIf * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -425,8 +412,8 @@ int main() float vc = OFFSET; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = M_PI_2 * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = M_PI_2f * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -451,14 +438,14 @@ int main() // (4): umin = uc + r cos(theta_v_max) ; v_cross_max = vc - r sin(theta_v_max) >= vmin && <= vmin + height // (3) & (4) => uc = umin - r cos(theta_v_min) = umin - r cos(theta_v_max) <=> theta_v_min = - theta_v_max // (3) & (4) => vc >= vmin + r sin(theta_v_min) && vc >= vmin + r sin (theta_v_max) - float theta_v_min = M_PI / 4.f; + float theta_v_min = M_PIf / 4.f; float uc = OFFSET - RADIUS * std::cos(theta_v_min); float vc = OFFSET + RADIUS * std::sin(theta_v_min) + 1.f; vc = std::max(vc, OFFSET + RADIUS * std::sin(-theta_v_min) + 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = M_PI_2 * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = M_PI_2f * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -485,14 +472,14 @@ int main() // (1) => uc + r cos(theta_u_top_min) >= umin <=> uc >= umin - r cos(theta_u_top_min) // (2) => uc + r cos(theta_u_top_max) >= umin <=> uc >= umin - r cos(theta_u_top_max) - float theta_u_top_min = -1.1f * M_PI_2; + float theta_u_top_min = -1.1f * M_PI_2f; float uc = OFFSET - RADIUS * std::cos(theta_u_top_min) + 1.f; - uc = std::max(uc, OFFSET - RADIUS * std::cos((float)M_PI - theta_u_top_min) + 1.f); + uc = std::max(uc, OFFSET - RADIUS * std::cos(M_PIf - theta_u_top_min) + 1.f); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = 0.2f * M_PI_2 * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = 0.2f * M_PI_2f * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -521,13 +508,13 @@ int main() // (3) & (4) =>{ uc = umin - r cos(theta_v_min) & { uc = umin - r cos(- theta_v_min) // (3) & (4) { vc >= vmin - r sin(theta_v_min) & { vc >= vmin - r cos(- theta_v_min) - float theta_u_top_min = 5.f * M_PI / 8.f; - float theta_u_top_max = M_PI - theta_u_top_min; + float theta_u_top_min = 5.f * M_PIf / 8.f; + float theta_u_top_max = M_PIf - theta_u_top_min; float uc = OFFSET - RADIUS * std::cos(theta_u_top_min) + 1.f; - uc = std::max(uc, OFFSET - RADIUS * std::cos((float)M_PI - theta_u_top_min) + 1.f); + uc = std::max(uc, OFFSET - RADIUS * std::cos(M_PIf - theta_u_top_min) + 1.f); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); float theta_v_min = std::acos((OFFSET - uc)/RADIUS); - theta_v_min = ensureIsBetweenMinPiAndPi(theta_v_min); + theta_v_min = vpMath::getAngleBetweenMinPiAndPi(theta_v_min); float theta_v_max = -theta_v_min; if (theta_v_max < 0) { float temp = theta_v_max; @@ -537,7 +524,7 @@ int main() vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = ((theta_v_max - theta_u_top_min) + (theta_u_top_max - theta_v_min)) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -564,14 +551,14 @@ int main() // (1) => vc = vmin + r sin(theta_u_top_min) // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_top_min = 2.f * M_PI / 3.f; - float theta_v_max = -M_PI_2; + float theta_u_top_min = 2.f * M_PIf / 3.f; + float theta_v_max = -M_PI_2f; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_max); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min);; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (M_PI_2 + M_PI / 3.f) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (M_PI_2f + M_PIf / 3.f) * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -599,14 +586,14 @@ int main() // (1) <=> asin((vc - vmin)/r) >= acos[(umin + width - uc)/r] <=> vc >= r sin(acos[(umin + width - uc)/r]) + vmin // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_max = -7.f * M_PI / 8.f; + float theta_v_max = -7.f * M_PIf / 8.f; float theta_v_min = -theta_v_max; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_max); float vc = RADIUS * std::sin(std::acos((OFFSET + WIDTH - uc)/RADIUS)) + OFFSET + 1.f; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - (theta_v_min - theta_v_max)) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (2.f * M_PIf - (theta_v_min - theta_v_max)) * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -634,16 +621,16 @@ int main() // Choice: theta_u_top_min = -0.9 * PI / 2 // (1) => vc = vmin + r sin(theta_u_top_min) // (2) vc - r sin(theta_v_min) <= vmin => asin((vc - vmin)/r) <= theta_v_min - float theta_u_top_min = -0.9f * M_PI_2; - float theta_u_top_max = M_PI - theta_u_top_min; - theta_u_top_max = ensureIsBetweenMinPiAndPi(theta_u_top_max); + float theta_u_top_min = -0.9f * M_PI_2f; + float theta_u_top_max = M_PIf - theta_u_top_min; + theta_u_top_max = vpMath::getAngleBetweenMinPiAndPi(theta_u_top_max); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); float theta_v_min = std::asin((vc - OFFSET)/RADIUS) + 1.f; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = std::abs(theta_u_top_min - theta_u_top_max) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -672,13 +659,13 @@ int main() // (2) & (4) =>{ uc = umin - r cos(theta_v_min) & { uc = umin - r cos(- theta_v_min) // (2) & (4) { vc >= vmin - r sin(theta_v_min) & { vc >= vmin - r cos(- theta_v_min) - float theta_u_top_min = 5.f * M_PI / 8.f; - float theta_u_top_max = M_PI - theta_u_top_min; + float theta_u_top_min = 5.f * M_PIf / 8.f; + float theta_u_top_max = M_PIf - theta_u_top_min; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_u_top_min) - 1.f; - uc = std::min(uc, OFFSET + WIDTH - RADIUS * std::cos((float)M_PI - theta_u_top_min) - 1.f); + uc = std::min(uc, OFFSET + WIDTH - RADIUS * std::cos(M_PIf - theta_u_top_min) - 1.f); float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); float theta_v_min = std::acos((OFFSET + WIDTH - uc)/RADIUS); - theta_v_min = ensureIsBetweenMinPiAndPi(theta_v_min); + theta_v_min = vpMath::getAngleBetweenMinPiAndPi(theta_v_min); float theta_v_max = -theta_v_min; if (theta_v_min < 0) { float temp = theta_v_min; @@ -687,8 +674,8 @@ int main() } vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max))) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (2.f * M_PIf - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max))) * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -715,14 +702,14 @@ int main() // (3) => vc = vmin + height + r sin(theta_u_bot_max) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_min = M_PI_2; - float theta_u_bot_max = -M_PI / 3.f; + float theta_v_min = M_PI_2f; + float theta_u_bot_max = -M_PIf / 3.f; float uc = OFFSET - RADIUS * std::cos(theta_v_min); float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_max);; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (M_PI_2 + M_PI / 3.f) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (M_PI_2f + M_PIf / 3.f) * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -751,14 +738,14 @@ int main() // (4) => vc <= vmin + height + r sin(theta_v_max) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_min = M_PI_4 / 2.f; + float theta_v_min = M_PI_4f / 2.f; float theta_v_max = -theta_v_min; float uc = OFFSET - RADIUS * std::cos(theta_v_min); float vc = std::min(OFFSET + HEIGHT + RADIUS * std::sin(theta_v_min) - 1.f, OFFSET + HEIGHT + RADIUS * std::sin(theta_v_max) - 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = (2.f * theta_v_min) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -787,14 +774,14 @@ int main() // (1) => uc >= umin - r cos(theta_u_bot_max) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = 5.f * M_PI_4 / 2.f; - float theta_u_bot_max = M_PI - theta_u_bot_min; + float theta_u_bot_min = 5.f * M_PI_4f / 2.f; + float theta_u_bot_max = M_PIf - theta_u_bot_min; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); float uc = std::max(OFFSET - RADIUS * std::cos(theta_u_bot_min) + 1.f, OFFSET - RADIUS * std::cos(theta_u_bot_max) + 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = (theta_u_bot_min - theta_u_bot_max) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -823,17 +810,17 @@ int main() // (2) & (4) => vc < vmin + height + r sin(theta_v_min) & vc < vmin + height + r sin(-theta_v_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = -5.f * M_PI / 8.f; - float theta_u_bot_max = M_PI - theta_u_bot_min; - theta_u_bot_max = ensureIsBetweenMinPiAndPi(theta_u_bot_max); - float theta_v_min = 7.f * M_PI / 8.f; + float theta_u_bot_min = -5.f * M_PIf / 8.f; + float theta_u_bot_max = M_PIf - theta_u_bot_min; + theta_u_bot_max = vpMath::getAngleBetweenMinPiAndPi(theta_u_bot_max); + float theta_v_min = 7.f * M_PIf / 8.f; float theta_v_max = -theta_v_min; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); float uc = OFFSET - RADIUS * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = ((theta_v_min - theta_u_bot_max) + (theta_u_bot_min - theta_v_max)) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -860,14 +847,14 @@ int main() // (1) => vc = vmin + height + r sin(theta_u_bot_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = -2.f * M_PI / 3.f; - float theta_v_min = M_PI_2; + float theta_u_bot_min = -2.f * M_PIf / 3.f; + float theta_v_min = M_PI_2f; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_min); float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min);; vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (M_PI_2 + M_PI / 3.f) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (M_PI_2f + M_PIf / 3.f) * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -895,13 +882,13 @@ int main() // (2) & (4) => vc <= vmin + height + r sin(theta_v_min) & vc <= vmin + height + r sin(-theta_v_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_v_min = 5.f * M_PI / 6.f; + float theta_v_min = 5.f * M_PIf / 6.f; float uc = OFFSET + WIDTH - RADIUS * std::cos(theta_v_min); float vc = std::min(OFFSET + HEIGHT + RADIUS * std::sin(theta_v_min) - 1.f, OFFSET + HEIGHT + RADIUS * std::sin(-theta_v_min) - 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (M_PI / 3.f) * RADIUS; // <=> 2.f * M_PI / 6.f - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (M_PIf / 3.f) * RADIUS; // <=> 2.f * M_PIf / 6.f + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -929,13 +916,13 @@ int main() // (1) & (3) => uc < umin + width - r cos(theta_u_bot_min) & uc <= umin + width - r cos(PI - theta_u_bot_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = 4.f * M_PI / 6.f; + float theta_u_bot_min = 4.f * M_PIf / 6.f; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); - float uc = std::min(OFFSET + WIDTH - RADIUS * std::cos(theta_u_bot_min) - 1.f, OFFSET + WIDTH - RADIUS * std::cos((float)M_PI -theta_u_bot_min) - 1.f); + float uc = std::min(OFFSET + WIDTH - RADIUS * std::cos(theta_u_bot_min) - 1.f, OFFSET + WIDTH - RADIUS * std::cos(M_PIf -theta_u_bot_min) - 1.f); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (M_PI / 3.f) * RADIUS; // <=> 2.f * M_PI / 6.f - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (M_PIf / 3.f) * RADIUS; // <=> 2.f * M_PIf / 6.f + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -964,17 +951,17 @@ int main() // (2) & (4) => vc < vmin + height + r sin(theta_v_min) & vc < vmin + height + r sin(-theta_v_min) // (1) & (3) theta_u_bot_min = PI - theta_u_bot_max // (2) & (4) theta_v_min = - theta_v_max - float theta_u_bot_min = -7.f * M_PI / 8.f; - float theta_u_bot_max = M_PI - theta_u_bot_min; - theta_u_bot_max = ensureIsBetweenMinPiAndPi(theta_u_bot_max); - float theta_v_max = -3.f * M_PI / 8.f; + float theta_u_bot_min = -7.f * M_PIf / 8.f; + float theta_u_bot_max = M_PIf - theta_u_bot_min; + theta_u_bot_max = vpMath::getAngleBetweenMinPiAndPi(theta_u_bot_max); + float theta_v_max = -3.f * M_PIf / 8.f; float theta_v_min = -theta_v_max; float vc = OFFSET + HEIGHT + RADIUS * std::sin(theta_u_bot_min); float uc = OFFSET - RADIUS * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_v_min - theta_v_max) + (theta_u_bot_max - theta_u_bot_min))) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (2.f * M_PIf - ((theta_v_min - theta_v_max) + (theta_u_bot_max - theta_u_bot_min))) * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1002,19 +989,19 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 5.f * M_PI / 8.f; - float theta_u_top_max = 3.f * M_PI / 8.f; - float theta_v_min = 7.f * M_PI / 8.f; + float theta_u_top_min = 5.f * M_PIf / 8.f; + float theta_u_top_max = 3.f * M_PIf / 8.f; + float theta_v_min = 7.f * M_PIf / 8.f; float theta_v_max = -theta_v_min; - float theta_u_bottom_min = -5.f * M_PI / 8.f; - float theta_u_bottom_max = -3.f * M_PI / 8.f; + float theta_u_bottom_min = -5.f * M_PIf / 8.f; + float theta_u_bottom_max = -3.f * M_PIf / 8.f; float vc = OFFSET + HEIGHT / 2.f; float radius = -(OFFSET - vc)/ std::sin(theta_u_top_min); float uc = OFFSET - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = ((theta_v_min - theta_u_top_min) + (theta_u_top_max - theta_u_bottom_max) + (theta_u_bottom_min - theta_v_max)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1042,9 +1029,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_max = M_PI / 6.f; - float theta_u_top_min = M_PI - theta_u_top_max; - float theta_v_min = M_PI / 3.f; + float theta_u_top_max = M_PIf / 6.f; + float theta_u_top_min = M_PIf - theta_u_top_max; + float theta_v_min = M_PIf / 3.f; float theta_u_bottom_max = -theta_u_top_max; float radius = HEIGHT; float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1052,7 +1039,7 @@ int main() vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = (theta_u_top_max - theta_u_bottom_max) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1080,9 +1067,9 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 4.f * M_PI / 6.f; - float theta_u_top_max = M_PI - theta_u_top_min; - float theta_v_min = M_PI; + float theta_u_top_min = 4.f * M_PIf / 6.f; + float theta_u_top_max = M_PIf - theta_u_top_min; + float theta_v_min = M_PIf; float theta_u_bottom_min = -theta_u_top_min; float theta_u_bottom_max = -theta_u_top_max; float radius = HEIGHT / (2.f * std::sin(theta_u_top_min)); // vmin + h - vmin = (vc - r sin(-theta_u_top_min)) - (vc - r sin(theta_top_min)) @@ -1090,8 +1077,8 @@ int main() float uc = OFFSET - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (2.f * M_PIf - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1119,8 +1106,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = M_PI_2; - float theta_v_min = M_PI_4; + float theta_u_top_min = M_PI_2f; + float theta_v_min = M_PI_4f; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1128,7 +1115,7 @@ int main() vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = (theta_v_min - theta_v_max) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1156,8 +1143,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = M_PI_2; - float theta_v_min = 3.f * M_PI_4; + float theta_u_top_min = M_PI_2f; + float theta_v_min = 3.f * M_PI_4f; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); @@ -1165,7 +1152,7 @@ int main() vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = (theta_v_min - theta_v_max) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1194,15 +1181,15 @@ int main() // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max float theta_u_top_max = 0.f; - float theta_u_bot_max = -M_PI / 3.f; - float theta_v_max = -M_PI / 6.f; + float theta_u_bot_max = -M_PIf / 3.f; + float theta_v_max = -M_PIf / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_max) - std::sin(theta_u_bot_max)); float uc = OFFSET - radius * std::cos(theta_v_max); float vc = OFFSET + radius * std::sin(theta_u_top_max); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = (theta_u_top_max - theta_v_max) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1230,16 +1217,16 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_max = M_PI / 3.f; + float theta_u_top_max = M_PIf / 3.f; float theta_u_bot_max = 0.f; - float theta_v_min = M_PI / 6.f; + float theta_v_min = M_PIf / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_max) - std::sin(theta_u_bot_max)); float uc = OFFSET - radius * std::cos(theta_v_min); float vc = OFFSET + radius * std::sin(theta_u_top_max); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = (theta_v_min - theta_u_bot_max) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1267,19 +1254,19 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 5.f * M_PI / 8.f; - float theta_u_top_max = 3.f * M_PI / 8.f; - float theta_v_min = 1.f * M_PI / 8.f; + float theta_u_top_min = 5.f * M_PIf / 8.f; + float theta_u_top_max = 3.f * M_PIf / 8.f; + float theta_v_min = 1.f * M_PIf / 8.f; float theta_v_max = -theta_v_min; - float theta_u_bottom_min = -5.f * M_PI / 8.f; - float theta_u_bottom_max = -3.f * M_PI / 8.f; + float theta_u_bottom_min = -5.f * M_PIf / 8.f; + float theta_u_bottom_max = -3.f * M_PIf / 8.f; float vc = OFFSET + HEIGHT / 2.f; float radius = -(OFFSET - vc)/ std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (2.f * M_PIf - ((theta_u_top_min - theta_u_top_max) + (theta_v_min - theta_v_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1307,16 +1294,16 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 5.f * M_PI / 6.f; - float theta_v_min = 2.f * M_PI / 3.f; + float theta_u_top_min = 5.f * M_PIf / 6.f; + float theta_v_min = 2.f * M_PIf / 3.f; float theta_u_bottom_min = -theta_u_top_min; float radius = HEIGHT; float vc = OFFSET + radius * std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - (theta_u_top_min - theta_u_bottom_min)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (2.f * M_PIf - (theta_u_top_min - theta_u_bottom_min)) * radius; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1344,8 +1331,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 4.f * M_PI / 6.f; - float theta_u_top_max = M_PI - theta_u_top_min; + float theta_u_top_min = 4.f * M_PIf / 6.f; + float theta_u_top_max = M_PIf - theta_u_top_min; float theta_v_min = 0; float theta_u_bottom_min = -theta_u_top_min; float theta_u_bottom_max = -theta_u_top_max; @@ -1354,8 +1341,8 @@ int main() float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (2.f * M_PIf - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1383,16 +1370,16 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = M_PI_2; - float theta_v_min = 3.f * M_PI_4; + float theta_u_top_min = M_PI_2f; + float theta_v_min = 3.f * M_PI_4f; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - (theta_v_min - theta_v_max)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (2.f * M_PIf - (theta_v_min - theta_v_max)) * radius; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1420,16 +1407,16 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = M_PI_2; - float theta_v_min = M_PI_4; + float theta_u_top_min = M_PI_2f; + float theta_v_min = M_PI_4f; float theta_v_max = -theta_v_min; float radius = HEIGHT / 2.f; float vc = OFFSET + radius * std::sin(theta_u_top_min); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - (theta_v_min - theta_v_max)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (2.f * M_PIf - (theta_v_min - theta_v_max)) * radius; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1457,16 +1444,16 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = M_PI; - float theta_u_bot_min = -2.f * M_PI / 3.f; - float theta_v_max = -5.f * M_PI / 6.f; + float theta_u_top_min = M_PIf; + float theta_u_bot_min = -2.f * M_PIf / 3.f; + float theta_v_max = -5.f * M_PIf / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_min) - std::sin(theta_u_bot_min)); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_max); float vc = OFFSET + radius * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - (theta_u_top_min - theta_v_max)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (2.f * M_PIf - (theta_u_top_min - theta_v_max)) * radius; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1494,16 +1481,16 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_min = - theta_v_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 2.f * M_PI / 3.f; - float theta_u_bot_min = M_PI; - float theta_v_min = 5.f * M_PI / 6.f; + float theta_u_top_min = 2.f * M_PIf / 3.f; + float theta_u_bot_min = M_PIf; + float theta_v_min = 5.f * M_PIf / 6.f; float radius = HEIGHT / (std::sin(theta_u_top_min) - std::sin(theta_u_bot_min)); float uc = OFFSET + WIDTH - radius * std::cos(theta_v_min); float vc = OFFSET + radius * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); float theoreticalValue = (theta_u_bot_min - theta_v_min) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1533,19 +1520,19 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_min = 7.f * M_PI / 8.f; + float theta_v_left_min = 7.f * M_PIf / 8.f; float theta_v_left_max = -theta_v_left_min; - float theta_v_right_min = M_PI / 8.f; + float theta_v_right_min = M_PIf / 8.f; float theta_v_right_max = -theta_v_right_min; - float theta_u_top_min = 5.f * M_PI / 8.f; - float theta_u_top_max = M_PI - theta_u_top_min; + float theta_u_top_min = 5.f * M_PIf / 8.f; + float theta_u_top_max = M_PIf - theta_u_top_min; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + radius * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); float theoreticalValue = ((theta_v_left_min - theta_u_top_min) + (theta_u_top_max - theta_v_right_min) + (theta_v_right_max - theta_v_left_max)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1574,13 +1561,13 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_top_min = -2.f * M_PI / 3.f; + float theta_u_top_min = -2.f * M_PIf / 3.f; float uc = OFFSET + WIDTH_SWITCHED/2.f; float vc = OFFSET + RADIUS * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), RADIUS); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); - float theoreticalValue = (M_PI/3.f) * RADIUS; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (M_PIf/3.f) * RADIUS; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1610,16 +1597,16 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_max = -5.f * M_PI / 8.f; - float theta_v_right_max = -3.f *M_PI / 8.f; - float theta_u_top_min = -7.f * M_PI / 8.f; + float theta_v_left_max = -5.f * M_PIf / 8.f; + float theta_v_right_max = -3.f *M_PIf / 8.f; + float theta_u_top_min = -7.f * M_PIf / 8.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_max) - std::cos(theta_v_left_max)); float uc = OFFSET - radius * std::cos(theta_v_left_max); float vc = OFFSET + radius * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); float theoreticalValue = (theta_v_right_max - theta_v_left_max) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1649,16 +1636,16 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_top_max = -M_PI / 3.f; + float theta_u_top_max = -M_PIf / 3.f; float theta_v_right_max = 0.f; - float theta_v_left_max = -M_PI_2; + float theta_v_left_max = -M_PI_2f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_max) - std::cos(theta_v_left_max)); float uc = OFFSET; float vc = OFFSET + radius * std::sin(theta_u_top_max); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); float theoreticalValue = (theta_u_top_max - theta_v_left_max) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1688,16 +1675,16 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_top_min = -2.f * M_PI / 3.f; - float theta_v_left_max = M_PI; - float theta_v_right_max = -M_PI_2; + float theta_u_top_min = -2.f * M_PIf / 3.f; + float theta_v_left_max = M_PIf; + float theta_v_right_max = -M_PI_2f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_max) - std::cos(theta_v_left_max)); float uc = OFFSET + WIDTH_SWITCHED; float vc = OFFSET + radius * std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); float theoreticalValue = (theta_v_right_max - theta_u_top_min) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1727,19 +1714,19 @@ int main() // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_min = 7.f * M_PI / 8.f; + float theta_v_left_min = 7.f * M_PIf / 8.f; float theta_v_left_max = -theta_v_left_min; - float theta_v_right_min = M_PI / 8.f; + float theta_v_right_min = M_PIf / 8.f; float theta_v_right_max = -theta_v_right_min; - float theta_u_bot_min = -5.f * M_PI / 8.f; - float theta_u_bot_max = -M_PI - theta_u_bot_min; + float theta_u_bot_min = -5.f * M_PIf / 8.f; + float theta_u_bot_max = -M_PIf - theta_u_bot_min; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); float theoreticalValue = ((theta_v_left_min - theta_v_right_min) + (theta_v_right_max - theta_u_bot_max) + (theta_u_bot_min - theta_v_left_max)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1768,17 +1755,17 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_min = 2.f * M_PI / 3.f; - float theta_u_bot_max = M_PI - theta_u_bot_min; - float theta_v_left_min = 5.f * M_PI / 6.f; - float theta_v_right_min = M_PI / 6.f; + float theta_u_bot_min = 2.f * M_PIf / 3.f; + float theta_u_bot_max = M_PIf - theta_u_bot_min; + float theta_v_left_min = 5.f * M_PIf / 6.f; + float theta_v_right_min = M_PIf / 6.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); float theoreticalValue = ((theta_u_bot_min - theta_u_bot_max)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1807,16 +1794,16 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_min = 7.f * M_PI / 8.f; - float theta_v_left_min = 5.f * M_PI / 8.f; - float theta_v_right_min = 3.f * M_PI / 8.f; + float theta_u_bot_min = 7.f * M_PIf / 8.f; + float theta_v_left_min = 5.f * M_PIf / 8.f; + float theta_v_right_min = 3.f * M_PIf / 8.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED - radius * std::cos(theta_v_right_min); float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); float theoreticalValue = ((theta_v_left_min - theta_v_right_min)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1845,8 +1832,8 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_max = M_PI / 3.f; - float theta_v_left_min = M_PI_2; + float theta_u_bot_max = M_PIf / 3.f; + float theta_v_left_min = M_PI_2f; float theta_v_right_min = 0.f; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET; @@ -1854,7 +1841,7 @@ int main() vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); float theoreticalValue = ((theta_v_left_min - theta_u_bot_max)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1883,16 +1870,16 @@ int main() // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_u_bot_min = 2.f * M_PI / 3.f; - float theta_v_right_min = M_PI_2; - float theta_v_left_min = M_PI; + float theta_u_bot_min = 2.f * M_PIf / 3.f; + float theta_v_right_min = M_PI_2f; + float theta_v_left_min = M_PIf; float radius = WIDTH_SWITCHED / (std::cos(theta_v_right_min) - std::cos(theta_v_left_min)); float uc = OFFSET + WIDTH_SWITCHED; float vc = OFFSET + HEIGHT_SWITCHED + radius * std::sin(theta_u_bot_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); float theoreticalValue = ((theta_u_bot_min - theta_v_right_min)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1917,18 +1904,18 @@ int main() // (6): u_cross_bot_max = uc + r cos(theta_u_bottom_max) <= umin_roi + width ; vmin_roi + height = vc - r sin(theta_u_bottom_max) // (1) & (3) theta_u_top_min = PI - theta_u_top_max // (5) & (6) theta_u_bottom_min = PI - theta_u_bottom_max - float theta_u_top_min = 2.f * M_PI / 3.f; - float theta_u_top_max = M_PI / 3.f; - float theta_u_bottom_min = -2.f * M_PI / 3.f; - float theta_u_bottom_max = -M_PI / 3.f; + float theta_u_top_min = 2.f * M_PIf / 3.f; + float theta_u_top_max = M_PIf / 3.f; + float theta_u_bottom_min = -2.f * M_PIf / 3.f; + float theta_u_bottom_max = -M_PIf / 3.f; float uc = OFFSET + WIDTH / 2.f; float vc = OFFSET + HEIGHT / 2.f; float radius = -(OFFSET - vc)/ std::sin(theta_u_top_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(roi); - float theoreticalValue = (2.f * M_PI - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = (2.f * M_PIf - ((theta_u_top_min - theta_u_top_max) + (theta_u_bottom_max - theta_u_bottom_min))) * radius; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1953,9 +1940,9 @@ int main() // (6): u_min + width = uc + r cos(theta_v_right_max); v_cross_right_max = vc - r sin(theta_v_right_max) // (2) & (4) theta_v_left_min = - theta_v_left_max // (5) & (6) theta_v_right_min = - theta_v_right_max - float theta_v_left_min = 5.f * M_PI / 6.f; + float theta_v_left_min = 5.f * M_PIf / 6.f; float theta_v_left_max = -theta_v_left_min; - float theta_v_right_min = M_PI / 6.f; + float theta_v_right_min = M_PIf / 6.f; float theta_v_right_max = -theta_v_right_min; float uc = OFFSET + HEIGHT / 2.f; float vc = OFFSET + WIDTH / 2.f; @@ -1964,7 +1951,7 @@ int main() vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(switchedRoI); float theoreticalValue = ((theta_v_left_min - theta_v_right_min) + (theta_v_right_max - theta_v_left_max)) * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -1985,15 +1972,15 @@ int main() // Choosing theta_v_left_min = 7 PI / 8 and circle at the center of the RoI // umin = uc + r cos(theta_v_left_min) => r = (umin - uc) / cos(theta_v_left_min) vpRect squareRoI(OFFSET, OFFSET, HEIGHT, HEIGHT); - float theta_v_left_min = 7.f * M_PI / 8.f; + float theta_v_left_min = 7.f * M_PIf / 8.f; float uc = OFFSET + HEIGHT / 2.f; float vc = OFFSET + HEIGHT / 2.f; float radius = (OFFSET - uc) / std::cos(theta_v_left_min); vpImageCircle circle(vpImagePoint(vc, uc), radius); float arcLengthCircle = circle.computeArcLengthInRoI(squareRoI); - float theoreticalValue = M_PI * radius; - bool isValueOK = compareAngles(arcLengthCircle, theoreticalValue); + float theoreticalValue = M_PIf * radius; + bool isValueOK = equal(arcLengthCircle, theoreticalValue); std::string statusTest; if (isValueOK) { statusTest = "SUCCESS"; @@ -2010,9 +1997,9 @@ int main() } if (hasSucceeded) { - std::cout << "testImageCircle overall result: SUCCESS"; + std::cout << "testImageCircle overall result: SUCCESS" << std::endl; return EXIT_SUCCESS; } - std::cout << "testImageCircle overall result: FAILED"; + std::cout << "testImageCircle overall result: FAILED" << std::endl; return EXIT_FAILURE; } From 4c26fd7466b354241d25c938522d5fee11f1264b Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Fri, 27 Oct 2023 13:53:19 +0200 Subject: [PATCH 09/14] Add missing vpMath.h include --- modules/core/src/camera/vpCameraParameters.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/core/src/camera/vpCameraParameters.cpp b/modules/core/src/camera/vpCameraParameters.cpp index 8993f054d2..9abf1488f2 100644 --- a/modules/core/src/camera/vpCameraParameters.cpp +++ b/modules/core/src/camera/vpCameraParameters.cpp @@ -46,6 +46,7 @@ #include #include #include +#include const double vpCameraParameters::DEFAULT_PX_PARAMETER = 600.0; const double vpCameraParameters::DEFAULT_PY_PARAMETER = 600.0; From 9730575f2341ed75176a2b47ce822f67de1c7d89 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Fri, 27 Oct 2023 14:11:43 +0200 Subject: [PATCH 10/14] Fix wrong changes in vpMath.h --- modules/core/include/visp3/core/vpMath.h | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/modules/core/include/visp3/core/vpMath.h b/modules/core/include/visp3/core/vpMath.h index 2b56d70061..db8bf2e9c7 100644 --- a/modules/core/include/visp3/core/vpMath.h +++ b/modules/core/include/visp3/core/vpMath.h @@ -77,8 +77,6 @@ #endif -#endif - #ifndef M_PIf #define M_PIf 3.14159265358979323846f #endif @@ -89,6 +87,7 @@ #ifndef M_PI_4f #define M_PI_4f (M_PIf / 4.0f) +#endif #include #include @@ -194,9 +193,9 @@ class VISP_EXPORT vpMath } return (v < lower) ? lower : (upper < v) ? upper : v; #endif - } + } - // round x to the nearest integer + // round x to the nearest integer static inline int round(double x); // return the sign of x (+-1) @@ -331,14 +330,14 @@ class VISP_EXPORT vpMath private: static const double ang_min_sinc; static const double ang_min_mc; -}; + }; -// Begining of the inline functions definition + // Begining of the inline functions definition -/*! - Computes and returns x! - \param x : parameter of factorial function. -*/ + /*! + Computes and returns x! + \param x : parameter of factorial function. + */ double vpMath::fact(unsigned int x) { if ((x == 1) || (x == 0)) From ee4622376dc61060cb69095ea153997659c4cd98 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Fri, 27 Oct 2023 16:19:34 +0200 Subject: [PATCH 11/14] Revert changes to re-introduce std::ostream &operator<<(...) in vpPoint.cpp - fix for visp_java build --- modules/core/src/tracking/forward-projection/vpPoint.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/core/src/tracking/forward-projection/vpPoint.cpp b/modules/core/src/tracking/forward-projection/vpPoint.cpp index efbec9be21..50e439619b 100644 --- a/modules/core/src/tracking/forward-projection/vpPoint.cpp +++ b/modules/core/src/tracking/forward-projection/vpPoint.cpp @@ -406,6 +406,8 @@ void vpPoint::display(const vpImage &I, const vpHomogeneousMatrix &cMo, vpFeatureDisplay::displayPoint(_p[0], _p[1], cam, I, color, thickness); } +VISP_EXPORT std::ostream &operator<<(std::ostream &os, const vpPoint & /* vpp */) { return (os << "vpPoint"); } + /*! * Display the projection of a 3D point in image \e I. * From e05808391c5667a4e0e8b780ec80387894738d72 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Fri, 27 Oct 2023 16:22:04 +0200 Subject: [PATCH 12/14] Update java generator to avoid warning around removal finalize() make visp_java produced the following warning: [removal] finalize() in Object has been deprecated and marked for removal --- modules/java/generator/gen_java.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/modules/java/generator/gen_java.py b/modules/java/generator/gen_java.py index 907655c6d5..d7e980ac1f 100755 --- a/modules/java/generator/gen_java.py +++ b/modules/java/generator/gen_java.py @@ -1145,13 +1145,15 @@ def gen_class(self, ci): if ci.name != 'VpImgproc' and ci.name != self.Module or ci.base: # finalize() - ci.j_code.write( - """ - @Override - protected void finalize() throws Throwable { - delete(nativeObj); - } - """) + # Note 2023.10.27 warning: [removal] finalize() in Object has been deprecated and marked for removal + # Comment for now +# ci.j_code.write( +# """ +# @Override +# protected void finalize() throws Throwable { +# delete(nativeObj); +# } +# """) ci.jn_code.write( """ From 26bba30c66b88584739ae1e1032999a429da0857 Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Fri, 27 Oct 2023 16:24:01 +0200 Subject: [PATCH 13/14] Update java installation tutorial for macos --- doc/tutorial/java/tutorial-install-java.dox | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/doc/tutorial/java/tutorial-install-java.dox b/doc/tutorial/java/tutorial-install-java.dox index 23e29d1583..65ba0a7018 100644 --- a/doc/tutorial/java/tutorial-install-java.dox +++ b/doc/tutorial/java/tutorial-install-java.dox @@ -40,7 +40,10 @@ java 17.0.2 2022-01-18 LTS \subsection java_install_jdk_osx On Mac OSX platform -\warning On macOS Catalina 10.15.7 with Xcode 12.4, the installation of JDK 11 or 15 from the [Oracle](http://www.oracle.com/technetwork/java/javase/downloads/index.html) website by downloading and installing `jdk-11.0.10_osx-x64_bin.dmg` or `jdk-15.0.2_osx-x64_bin.dmg` doesn't allow the detection of JNI necessary to build `visp_java.jar`. That's why we recommend to install JDK using brew. +\warning On macOS Catalina 10.15.7 with Xcode 12.4, the installation of JDK 11 or 15 from the +[Oracle](http://www.oracle.com/technetwork/java/javase/downloads/index.html) website by downloading and installing +`jdk-11.0.10_osx-x64_bin.dmg` or `jdk-15.0.2_osx-x64_bin.dmg` doesn't allow the detection of JNI necessary to build +`visp_java.jar`. That's why we recommend to install JDK using brew. \verbatim $ more ViSP-third-party.txt ... @@ -71,13 +74,20 @@ openjdk: stable 17.0.1 (bottled) [keg-only] - Now for the system Java wrappers to find this JDK, symlink it with: \verbatim -$ sudo ln -sfn /usr/local/opt/openjdk/libexec/openjdk.jdk /Library/Java/JavaVirtualMachines/openjdk.jdk +$ sudo ln -sfn /opt/homebrew/opt/openjdk/libexec/openjdk.jdk /Library/Java/JavaVirtualMachines/openjdk.jdk +\endverbatim +\note Depending on the OpenJDK version, the symlink instruction can differ. To know which one is to use, follow +instructions provided by `brew info openjdk`. At the time this tutorial was updated, we got: +\verbatim +$ brew info openjdk +For the system Java wrappers to find this JDK, symlink it with + sudo ln -sfn /opt/homebrew/opt/openjdk/libexec/openjdk.jdk /Library/Java/JavaVirtualMachines/openjdk.jdk \endverbatim - Set `JAVA_HOME` env var to help JNI headers and libraries detection \verbatim -$ echo 'export JAVA_HOME=$(/usr/libexec/java_home)' >> ~/.bashrc -$ source ~/.bashrc +$ echo 'export JAVA_HOME=$(/usr/libexec/java_home)' >> ~/.zshrc +$ source ~/.zshrc \endverbatim - After installation check JDK version: From 44ad9cf923d5b9f7190bcc057313677f9b8a812b Mon Sep 17 00:00:00 2001 From: Fabien Spindler Date: Fri, 27 Oct 2023 16:24:35 +0200 Subject: [PATCH 14/14] Fix typo --- modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h b/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h index 6bb6556938..0cf2f5a162 100644 --- a/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h +++ b/modules/tracker/mbt/include/visp3/mbt/vpMbKltTracker.h @@ -205,7 +205,7 @@ class VISP_EXPORT vpMbKltTracker : public virtual vpMbTracker { protected: -//! Temporary OpenCV image for fast conversion. + //! Temporary OpenCV image for fast conversion. cv::Mat cur; //! Initial pose. vpHomogeneousMatrix c0Mo;