diff --git a/README b/README index 60a5efb..455f14a 100644 --- a/README +++ b/README @@ -1,3 +1,33 @@ See [ROS wiki](http://wiki.ros.org/ar_track_alvar) for the users document. +### Testing (Build from Source) +The testing suite for this package makes use of a ros1 bag, and depends on the following [repo](https://github.com/ros2/rosbag2_bag_v2). In order to get it to work +you need to source both the ros1 underlay and the ros2 underlay in your ~/.bashrc file. Do this in a new terminal. + +``` +# ROS1 +source /opt/ros/noetic/setup.bash + +# ROS2 +source /opt/ros/foxy/setup.bash +source /usr/share/colcon_cd/function/colcon_cd.sh +export _colcon_cd_root=/opt/ros/foxy + +``` + +### Deprecation Warning + +As far as we can tell the vendor package for Alvar is no longer being actively being maintained. +The underlying OpenCV Implementation has been upgraded, but it is unclear how worthwhile it is to keep maintaining this package. +Users should consider using the tag tracking [libraries](https://docs.opencv.org/4.2.0/d5/dae/tutorial_aruco_detection.html) natively supported by OpenCV as they handle a greater variety of tags, and provide similar functionality. + +# Miscellaneous + +Building this package requires building [perception_pcl](https://github.com/ros-perception/perception_pcl/tree/foxy-devel) from source. + +In your workspace perform the following: + +``` +git clone https://github.com/ros-perception/perception_pcl/ -b foxy-devel +``` diff --git a/ar_track_alvar/CMakeLists.txt b/ar_track_alvar/CMakeLists.txt index 7450ac6..88da167 100644 --- a/ar_track_alvar/CMakeLists.txt +++ b/ar_track_alvar/CMakeLists.txt @@ -1,183 +1,186 @@ -cmake_minimum_required(VERSION 2.8.3) +cmake_minimum_required(VERSION 3.5) project(ar_track_alvar) -set(MSG_DEPS - ar_track_alvar_msgs - std_msgs - sensor_msgs - geometry_msgs - visualization_msgs) - -find_package(catkin COMPONENTS - genmsg - roscpp - tf - tf2 - image_transport - resource_retriever - cv_bridge - pcl_ros - pcl_conversions - message_generation - ${MSG_DEPS} - dynamic_reconfigure - cmake_modules - REQUIRED) +find_package(ament_cmake REQUIRED) +find_package(ament_cmake_auto REQUIRED) +ament_auto_find_build_dependencies() -find_package(Eigen3 REQUIRED) find_package(OpenCV REQUIRED) -find_package(TinyXML REQUIRED) - -include(CheckCXXCompilerFlag) -CHECK_CXX_COMPILER_FLAG("-std=c++11" COMPILER_SUPPORTS_CXX11) -CHECK_CXX_COMPILER_FLAG("-std=c++0x" COMPILER_SUPPORTS_CXX0X) -if(COMPILER_SUPPORTS_CXX11) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") -elseif(COMPILER_SUPPORTS_CXX0X) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x") -else() - message(STATUS "The compiler ${CMAKE_CXX_COMPILER} has no C++11 support. Please use a different C++ compiler.") +if(NOT DEFINED CMAKE_SUPPRESS_DEVELOPER_WARNINGS) + set(CMAKE_SUPPRESS_DEVELOPER_WARNINGS 1 CACHE INTERNAL "No dev warnings") endif() -cmake_policy(SET CMP0046 OLD) - -# dynamic reconfigure support -generate_dynamic_reconfigure_options(cfg/Params.cfg) - -catkin_package( - INCLUDE_DIRS include - LIBRARIES ar_track_alvar - CATKIN_DEPENDS - ar_track_alvar_msgs - std_msgs - roscpp - tf - tf2 - message_runtime - image_transport - sensor_msgs - geometry_msgs - visualization_msgs - resource_retriever - cv_bridge - pcl_ros - pcl_conversions - dynamic_reconfigure +find_package(rclcpp REQUIRED) +find_package(tf2_ros REQUIRED) +find_package(tf2 REQUIRED) +find_package(image_transport REQUIRED) +find_package(resource_retriever REQUIRED) +find_package(cv_bridge REQUIRED) +find_package(perception_pcl REQUIRED) +find_package(pcl_conversions REQUIRED) +find_package(ar_track_alvar_msgs REQUIRED) +find_package(std_msgs REQUIRED) +find_package(sensor_msgs REQUIRED) +find_package(tf2_geometry_msgs REQUIRED) +find_package(geometry_msgs REQUIRED) +find_package(rosbag2_bag_v2_plugins REQUIRED) +find_package(visualization_msgs REQUIRED) +find_package(PCL REQUIRED QUIET COMPONENTS common io) +find_package(PCL REQUIRED) +find_package(Eigen3 REQUIRED) +find_package(tinyxml_vendor REQUIRED) + +include_directories(include) + +# Kinect filtering code +set(KINECT_FILTERING_TARGETS kinect_filtering medianFilter) +set(ALVAR_TARGETS ar_track_alvar individualMarkers individualMarkersNoKinect trainMarkerBundle findMarkerBundles findMarkerBundlesNoKinect createMarker) + +set(dependencies + OpenCV + tf2_ros + tf2 + pcl_conversions + std_msgs + tinyxml_vendor + image_transport + perception_pcl + visualization_msgs + rclcpp + resource_retriever + geometry_msgs + tf2_geometry_msgs + cv_bridge + sensor_msgs + ar_track_alvar_msgs ) -include_directories(include - ${catkin_INCLUDE_DIRS} +include_directories(include ${OpenCV_INCLUDE_DIRS} ${TinyXML_INCLUDE_DIRS} - + ${PCL_COMMON_INCLUDE_DIRS} ) -set(GENCPP_DEPS ar_track_alvar_msgs_gencpp std_msgs_gencpp sensor_msgs_gencpp geometry_msgs_gencpp visualization_msgs_gencpp) - -add_library(ar_track_alvar - src/Camera.cpp - src/CaptureDevice.cpp - src/Pose.cpp - src/Marker.cpp - src/MarkerDetector.cpp +add_library(${PROJECT_NAME} src/Bitset.cpp - src/Rotation.cpp - src/CvTestbed.cpp + src/Camera.cpp src/CaptureDevice.cpp src/CaptureFactory.cpp src/CaptureFactory_unix.cpp - src/FileFormatUtils.cpp - src/Threads.cpp - src/Threads_unix.cpp - src/Mutex.cpp - src/Mutex_unix.cpp src/ConnectedComponents.cpp - src/Line.cpp src/Plugin.cpp - src/Plugin_unix.cpp + src/CvTestbed.cpp src/DirectoryIterator.cpp src/DirectoryIterator_unix.cpp src/Draw.cpp - src/Util.cpp + src/FileFormatUtils.cpp src/Filter.cpp src/Kalman.cpp - src/kinect_filtering.cpp - src/Optimization.cpp + src/Line.cpp + src/Marker.cpp + src/MarkerDetector.cpp src/MultiMarker.cpp src/MultiMarkerBundle.cpp - src/MultiMarkerInitializer.cpp) -target_link_libraries(ar_track_alvar ${OpenCV_LIBS} ${TinyXML_LIBRARIES} ${catkin_LIBRARIES}) -add_dependencies(ar_track_alvar ${GENCPP_DEPS}) - -# Kinect filtering code -set(KINECT_FILTERING_TARGETS kinect_filtering medianFilter) + src/MultiMarkerInitializer.cpp + src/Mutex.cpp + src/Mutex_unix.cpp + src/Optimization.cpp + src/Plugin.cpp + src/Plugin_unix.cpp + src/Pose.cpp + src/Rotation.cpp + src/Threads.cpp + src/Threads_unix.cpp + src/Util.cpp +) + +target_link_libraries(${PROJECT_NAME} ${OpenCV_LIBRARIES} ${PCL_LIBRARIES} tinyxml) +ament_target_dependencies(ar_track_alvar ${dependencies}) add_library(kinect_filtering src/kinect_filtering.cpp) -target_link_libraries(kinect_filtering ${catkin_LIBRARIES}) -add_dependencies(kinect_filtering ${GENCPP_DEPS}) +ament_target_dependencies(kinect_filtering ${dependencies}) -add_library(medianFilter src/medianFilter.cpp) -target_link_libraries(medianFilter ar_track_alvar ${catkin_LIBRARIES}) -add_dependencies(medianFilter ${GENCPP_DEPS}) -set(ALVAR_TARGETS ar_track_alvar individualMarkers individualMarkersNoKinect trainMarkerBundle findMarkerBundles findMarkerBundlesNoKinect createMarker ar_track_alvar) +add_library(medianFilter src/medianFilter.cpp) +target_link_libraries(medianFilter ar_track_alvar ${PCL_LIBRARIES} ${TinyXML_LIBRARIES}) +ament_target_dependencies(medianFilter ${dependencies}) add_executable(individualMarkers nodes/IndividualMarkers.cpp) -target_link_libraries(individualMarkers ar_track_alvar kinect_filtering ${catkin_LIBRARIES}) -add_dependencies(individualMarkers ${PROJECT_NAME}_gencpp ${GENCPP_DEPS} ${PROJECT_NAME}_gencfg) +target_link_libraries(individualMarkers ar_track_alvar kinect_filtering ${PCL_LIBRARIES} ${TinyXML_LIBRARIES}) +ament_target_dependencies(individualMarkers ${dependencies}) add_executable(individualMarkersNoKinect nodes/IndividualMarkersNoKinect.cpp) -target_link_libraries(individualMarkersNoKinect ar_track_alvar ${catkin_LIBRARIES}) -add_dependencies(individualMarkersNoKinect ${PROJECT_NAME}_gencpp ${GENCPP_DEPS} ${PROJECT_NAME}_gencfg) +target_link_libraries(individualMarkersNoKinect ar_track_alvar) +ament_target_dependencies(individualMarkersNoKinect ${dependencies}) add_executable(trainMarkerBundle nodes/TrainMarkerBundle.cpp) -target_link_libraries(trainMarkerBundle ar_track_alvar ${catkin_LIBRARIES}) -add_dependencies(trainMarkerBundle ${PROJECT_NAME}_gencpp ${GENCPP_DEPS}) +target_link_libraries(trainMarkerBundle ar_track_alvar ${PCL_LIBRARIES} ${TinyXML_LIBRARIES}) +ament_target_dependencies(trainMarkerBundle ${dependencies}) + add_executable(findMarkerBundles nodes/FindMarkerBundles.cpp) -target_link_libraries(findMarkerBundles ar_track_alvar kinect_filtering medianFilter ${catkin_LIBRARIES}) -add_dependencies(findMarkerBundles ${PROJECT_NAME}_gencpp ${GENCPP_DEPS}) +target_link_libraries(findMarkerBundles ar_track_alvar kinect_filtering medianFilter ${PCL_LIBRARIES} ${TinyXML_LIBRARIES}) +ament_target_dependencies(findMarkerBundles ${dependencies}) add_executable(findMarkerBundlesNoKinect nodes/FindMarkerBundlesNoKinect.cpp) -target_link_libraries(findMarkerBundlesNoKinect ar_track_alvar ${catkin_LIBRARIES}) -add_dependencies(findMarkerBundlesNoKinect ${PROJECT_NAME}_gencpp ${GENCPP_DEPS}) +target_link_libraries(findMarkerBundlesNoKinect ar_track_alvar ${PCL_LIBRARIES} ${TinyXML_LIBRARIES}) +ament_target_dependencies(findMarkerBundlesNoKinect ${dependencies}) add_executable(createMarker src/SampleMarkerCreator.cpp) -target_link_libraries(createMarker ar_track_alvar ${catkin_LIBRARIES}) -add_dependencies(createMarker ${PROJECT_NAME}_gencpp ${GENCPP_DEPS}) +target_link_libraries(createMarker ar_track_alvar ${PCL_LIBRARIES} ${TinyXML_LIBRARIES}) +ament_target_dependencies(createMarker ${dependencies}) + + +ament_export_include_directories(include) +ament_export_libraries(ar_track_alvar) +ament_export_dependencies(OpenCV ar_track_alvar_msgs std_msgs rclcpp tf2_ros tf2 message_runtime image_transport sensor_msgs geometry_msgs visualization_msgs resource_retriever cv_bridge perception_pcl pcl_conversions) + install(TARGETS ${ALVAR_TARGETS} ${KINECT_FILTERING_TARGETS} - ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} - LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} - RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} + ARCHIVE DESTINATION lib + LIBRARY DESTINATION lib + RUNTIME DESTINATION lib/${PROJECT_NAME} ) install(DIRECTORY include/${PROJECT_NAME}/ - DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} + DESTINATION include/${PROJECT_NAME} ) install(DIRECTORY launch bundles - DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}/ + DESTINATION share/${PROJECT_NAME} ) -if (CATKIN_ENABLE_TESTING) - find_package(catkin REQUIRED COMPONENTS roslaunch rostest) + + +if(BUILD_TESTING) - file(GLOB LAUNCH_FILES launch/*.launch test/*.test) - foreach(LAUNCH_FILE ${LAUNCH_FILES}) - roslaunch_add_file_check(${LAUNCH_FILE} USE_TEST_DEPENDENCIES) - endforeach() + find_package(ament_lint_auto REQUIRED) + ament_lint_auto_find_test_dependencies() - catkin_download_test_data( - ${PROJECT_NAME}_4markers_tork.bag - http://download.ros.org/data/ar_track_alvar/ar_track_alvar_4markers_tork_2017-02-08-11-21-14.bag - # Workaround the issue http://answers.ros.org/question/253787/accessing-data-downloaded-via-catkin_download_test_data/ - # by downloading into source folder. - #DESTINATION ${CATKIN_DEVEL_PREFIX}/${CATKIN_PACKAGE_SHARE_DESTINATION}/test - DESTINATION ${CATKIN_DEVEL_PREFIX}/${CATKIN_PACKAGE_SHARE_DESTINATION}/test - MD5 627aa0316bbfe4334e06023d7c2b4087 - ) - add_rostest(test/marker_arg_config-basic.test DEPENDENCIES ${PROJECT_NAME}_4markers_tork.bag) - add_rostest(test/marker_arg_config-full.test DEPENDENCIES ${PROJECT_NAME}_4markers_tork.bag) - add_rostest(test/marker_param_config-basic.test DEPENDENCIES ${PROJECT_NAME}_4markers_tork.bag) - add_rostest(test/marker_param_config-full.test DEPENDENCIES ${PROJECT_NAME}_4markers_tork.bag) + set(_PYTHON_EXECUTABLE "${PYTHON_EXECUTABLE}") + if(WIN32 AND CMAKE_BUILD_TYPE STREQUAL "Debug") + set(PYTHON_EXECUTABLE "${PYTHON_EXECUTABLE_DEBUG}") + endif() + + # Test bag playing (if this fails all other tests will fail) + add_launch_test("test/test_ar_track_alvar_bag.py" + PYTHON_EXECUTABLE "${PYTHON_EXECUTABLE}" + ) + + # Test Individual Markers in launch test + add_launch_test("test/test_ar_track_alvar_individual_markers.py" + PYTHON_EXECUTABLE "${PYTHON_EXECUTABLE}" + ) + + # Legacy Tests + add_launch_test("test/test_ar_legacy.py" + PYTHON_EXECUTABLE "${PYTHON_EXECUTABLE}" + ) + + # Test Launch Files + add_launch_test("test/test_launch_files.py" + PYTHON_EXECUTABLE "${PYTHON_EXECUTABLE}" + ) + endif() + +ament_package() \ No newline at end of file diff --git a/ar_track_alvar/include/ar_track_alvar/Alvar.h b/ar_track_alvar/include/ar_track_alvar/Alvar.h index 3418557..0e5da2c 100644 --- a/ar_track_alvar/include/ar_track_alvar/Alvar.h +++ b/ar_track_alvar/include/ar_track_alvar/Alvar.h @@ -29,44 +29,52 @@ * * \section Introduction * - * ALVAR is a software library for creating virtual and augmented reality (AR) applications. ALVAR has - * been developed by the VTT Technical Research Centre of Finland. ALVAR is released under the terms of - * the GNU Lesser General Public License, version 2.1, or (at your option) any later version. - * - * ALVAR is designed to be as flexible as possible. It offers high-level tools and methods for creating - * augmented reality applications with just a few lines of code. The library also includes interfaces - * for all of the low-level tools and methods, which makes it possible for the user to develop their - * own solutions using alternative approaches or completely new algorithms. - * - * ALVAR is currently provided on Windows and Linux operating systems and only depends on one third - * party library (OpenCV). ALVAR is independent of any graphical libraries and can be easily integrated - * into existing applications. The sample applications use GLUT and the demo applications use OpenSceneGraph. + * ALVAR is a software library for creating virtual and augmented reality (AR) + * applications. ALVAR has been developed by the VTT Technical Research Centre + * of Finland. ALVAR is released under the terms of the GNU Lesser General + * Public License, version 2.1, or (at your option) any later version. + * + * ALVAR is designed to be as flexible as possible. It offers high-level tools + * and methods for creating augmented reality applications with just a few lines + * of code. The library also includes interfaces for all of the low-level tools + * and methods, which makes it possible for the user to develop their own + * solutions using alternative approaches or completely new algorithms. + * + * ALVAR is currently provided on Windows and Linux operating systems and only + * depends on one third party library (OpenCV). ALVAR is independent of any + * graphical libraries and can be easily integrated into existing applications. + * The sample applications use GLUT and the demo applications use + * OpenSceneGraph. * * \section Features * - * - Detecting and tracking 2D markers (\e MarkerDetector). Currently two types of square matrix markers - * are supported (\e MarkerData and \e MarkerArtoolkit). Future marker types can easily be added. ALVAR - * keeps the \e Marker \e Pose estimation as accurate as possible. Furthermore, ALVAR uses some tracking - * heuristics to identify markers that are "too far" and to recover from occlusions in the multimarker - * case for example. - * - Using a setup of multiple markers for pose detection (\e MultiMarker). The marker setup coordinates - * can be set manually or they can be automatically deduced using various methods (\e MultiMarkerFiltered - * and \e MultiMarkerBundle). - * - Tools for calibrating \e Camera. Distorting and undistorting points, projecting points and finding - * exterior orientation using point-sets. + * - Detecting and tracking 2D markers (\e MarkerDetector). Currently two types + * of square matrix markers are supported (\e MarkerData and \e + * MarkerArtoolkit). Future marker types can easily be added. ALVAR keeps the \e + * Marker \e Pose estimation as accurate as possible. Furthermore, ALVAR uses + * some tracking heuristics to identify markers that are "too far" and to + * recover from occlusions in the multimarker case for example. + * - Using a setup of multiple markers for pose detection (\e MultiMarker). The + * marker setup coordinates can be set manually or they can be automatically + * deduced using various methods (\e MultiMarkerFiltered and \e + * MultiMarkerBundle). + * - Tools for calibrating \e Camera. Distorting and undistorting points, + * projecting points and finding exterior orientation using point-sets. * - Hiding markers from the view (\e BuildHideTexture and \e DrawTexture). - * - Several basic filters: \e FilterAverage, \e FilterMedian, \e FilterRunningAverage, - * \e FilterDoubleExponentialSmoothing. - * - \e Kalman filters for sensor fusion: \e Kalman Filter, \e Extended Kalman Filter and Unscented Kalman - * Filter (\e KalmanSensor, \e KalmanSensorEkf, \e KalmanEkf, \e UnscentedKalman). - * - Several methods for tracking using optical flow: \e TrackerPsa , \e TrackerPsaRot , \e TrackerFeatures - * and \e TrackerStat. + * - Several basic filters: \e FilterAverage, \e FilterMedian, \e + * FilterRunningAverage, \e FilterDoubleExponentialSmoothing. + * - \e Kalman filters for sensor fusion: \e Kalman Filter, \e Extended Kalman + * Filter and Unscented Kalman Filter (\e KalmanSensor, \e KalmanSensorEkf, \e + * KalmanEkf, \e UnscentedKalman). + * - Several methods for tracking using optical flow: \e TrackerPsa , \e + * TrackerPsaRot , \e TrackerFeatures and \e TrackerStat. * - etc... * * \section Platforms * * ALVAR is officially supported and tested on the following platforms. - * - Windows XP 32-bit, Microsoft Visual Studio 2005 (8.0), 2008 (9.0) and 2010 (10.0) + * - Windows XP 32-bit, Microsoft Visual Studio 2005 (8.0), 2008 (9.0) and 2010 + * (10.0) * - Linux 32-bit, GCC 4.3 and 4.4 * - Linux 64-bit, GCC 4.3 and 4.4 * @@ -96,61 +104,68 @@ * - OpenSceneGraph (http://www.openscenegraph.org) * * \example SampleCamCalib.cpp - * This is an example of how to use \e ProjPoints and \e Camera classes to perform camera calibration - * using a chessboard pattern. + * This is an example of how to use \e ProjPoints and \e Camera classes to + * perform camera calibration using a chessboard pattern. * * \example SampleCvTestbed.cpp - * This is an example of how to use the \e CvTestbed and \e Capture classes in order to make quick OpenCV - * prototype applications. + * This is an example of how to use the \e CvTestbed and \e Capture classes in + * order to make quick OpenCV prototype applications. * * \example SampleFilter.cpp - * This is an example of how to use various filters: \e FilterAverage, \e FilterMedian, - * \e FilterRunningAverage, \e FilterDoubleExponentialSmoothing and \e Kalman. + * This is an example of how to use various filters: \e FilterAverage, \e + * FilterMedian, \e FilterRunningAverage, \e FilterDoubleExponentialSmoothing + * and \e Kalman. * * \example SampleIntegralImage.cpp - * This is an example of how to use the \e IntegralImage and \e IntegralGradient classes for image - * gradient analysis. + * This is an example of how to use the \e IntegralImage and \e IntegralGradient + * classes for image gradient analysis. * * \example SampleLabeling.cpp - * This is an example of how to label images using \e LabelImage and \e MarchEdge. + * This is an example of how to label images using \e LabelImage and \e + * MarchEdge. * * \example SampleMarkerCreator.cpp - * This is an example that demonstrates the generation of \e MarkerData markers and saving the image - * using \e SaveMarkerImage. + * This is an example that demonstrates the generation of \e MarkerData markers + * and saving the image using \e SaveMarkerImage. * * \example SampleMarkerDetector.cpp - * This is an example that shows how to detect \e MarkerData markers and visualize them using\e GlutViewer. + * This is an example that shows how to detect \e MarkerData markers and + * visualize them using\e GlutViewer. * * \example SampleMarkerHide.cpp - * This is an example that shows how to detect \e MarkerData markers, visualize them using \e GlutViewer - * and hide them with \e BuildHideTexture and \e DrawTexture. + * This is an example that shows how to detect \e MarkerData markers, visualize + * them using \e GlutViewer and hide them with \e BuildHideTexture and \e + * DrawTexture. * * \example SampleMarkerlessCreator.cpp - * This is an example that demonstrates the use of FernImageDetector to train a Fern classifier. + * This is an example that demonstrates the use of FernImageDetector to train a + * Fern classifier. * * \example SampleMarkerlessDetector.cpp - * This is an example that demonstrates the use of FernImageDetector to detect an image as a marker. + * This is an example that demonstrates the use of FernImageDetector to detect + * an image as a marker. * * \example SampleMultiMarker.cpp - * This is an example that demonstrates the use of a preconfigured \e MultiMarker setup. + * This is an example that demonstrates the use of a preconfigured \e + * MultiMarker setup. * * \example SampleMultiMarkerBundle.cpp - * This is an example that automatically recognising \e MultiMarker setups using \e MultiMarkerFiltered and - * optimizes it with \e MultiMarkerBundle. + * This is an example that automatically recognising \e MultiMarker setups using + * \e MultiMarkerFiltered and optimizes it with \e MultiMarkerBundle. * * \example SampleOptimization.cpp - * This is an example of how to use the \e Optimization class by fitting curves of increasing degree to - * random data. + * This is an example of how to use the \e Optimization class by fitting curves + * of increasing degree to random data. * * \example SamplePointcloud.cpp - * This is an example showing how to use \e SimpleSfM for tracking the environment using features in - * addition to \e MultiMarker. + * This is an example showing how to use \e SimpleSfM for tracking the + * environment using features in addition to \e MultiMarker. * * \example SampleTrack.cpp - * This is an example that shows how to perform tracking of the optical flow using \e TrackerPsa, - * \e TrackerPsaRot, \e TrackerFeatures or \e TrackerStat. + * This is an example that shows how to perform tracking of the optical flow + * using \e TrackerPsa, \e TrackerPsaRot, \e TrackerFeatures or \e TrackerStat. */ - + /** * \file Alvar.h * @@ -159,20 +174,20 @@ */ #if defined(WIN32) && !defined(ALVAR_STATIC) - #ifdef ALVAR_BUILD - #define ALVAR_EXPORT __declspec(dllexport) - #else - #define ALVAR_EXPORT __declspec(dllimport) - #endif +#ifdef ALVAR_BUILD +#define ALVAR_EXPORT __declspec(dllexport) #else - #define ALVAR_EXPORT +#define ALVAR_EXPORT __declspec(dllimport) +#endif +#else +#define ALVAR_EXPORT #endif /** * \brief Main ALVAR namespace. */ -namespace alvar { - +namespace alvar +{ /** * \brief Major version number. */ @@ -193,35 +208,35 @@ static const int ALVAR_VERSION_PATCH = 0; * * The tag contains alpha, beta and release candidate versions. */ -static const char *ALVAR_VERSION_TAG = ""; +static const char* ALVAR_VERSION_TAG = ""; /** * \brief Revision version string. * * The revision contains an identifier from the source control system. */ -static const char *ALVAR_VERSION_REVISION = ""; +static const char* ALVAR_VERSION_REVISION = ""; /** * \brief Entire version string. */ -static const char *ALVAR_VERSION = "2.0.0"; +static const char* ALVAR_VERSION = "2.0.0"; /** * \brief Entire version string without dots. */ -static const char *ALVAR_VERSION_NODOTS = "200"; +static const char* ALVAR_VERSION_NODOTS = "200"; /** * \brief Date the library was built. */ -static const char *ALVAR_DATE = "2012-06-20"; +static const char* ALVAR_DATE = "2012-06-20"; /** * \brief System the library was built on. */ -static const char *ALVAR_SYSTEM = "Linux 3.2.0-24-generic x86_64"; +static const char* ALVAR_SYSTEM = "Linux 3.2.0-24-generic x86_64"; -} +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/AlvarException.h b/ar_track_alvar/include/ar_track_alvar/AlvarException.h index 21b3c1a..99c971d 100644 --- a/ar_track_alvar/include/ar_track_alvar/AlvarException.h +++ b/ar_track_alvar/include/ar_track_alvar/AlvarException.h @@ -32,17 +32,19 @@ #include -namespace alvar { - +namespace alvar +{ /** * \brief ALVAR exception class. */ class AlvarException : public std::runtime_error { public: - AlvarException(const char *s) : std::runtime_error(s) { } + AlvarException(const char* s) : std::runtime_error(s) + { + } }; -} +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/Bitset.h b/ar_track_alvar/include/ar_track_alvar/Bitset.h index 5cc9df8..35ba5d8 100644 --- a/ar_track_alvar/include/ar_track_alvar/Bitset.h +++ b/ar_track_alvar/include/ar_track_alvar/Bitset.h @@ -37,14 +37,16 @@ #include #include -namespace alvar { - +namespace alvar +{ /** * \brief \e Bitset is a basic class for handling bit sequences * - * The bits are stored internally using deque (See http://osdir.com/ml/gis.geos.devel/2006-04/msg00142.html). - * The bitset is assumed to have most significant bits left i.e. the push_back() methods add to the least - * significant end of the bit sequence. The usage is clarified by the following example. + * The bits are stored internally using deque (See + * http://osdir.com/ml/gis.geos.devel/2006-04/msg00142.html). The bitset is + * assumed to have most significant bits left i.e. the push_back() methods add + * to the least significant end of the bit sequence. The usage is clarified by + * the following example. * * \section Usage * \code @@ -59,101 +61,110 @@ namespace alvar { * b.Output(std::cout); // b contains now: 00000000 00000000 10001000 10001000 * \endcode */ -class ALVAR_EXPORT Bitset { +class ALVAR_EXPORT Bitset +{ protected: - std::deque bits; - + std::deque bits; + public: - /** \brief The length of the \e Bitset */ - int Length(); - /** \brief Output the bits to selected ostream - * \param os The output stream to be used for outputting e.g. std::cout - */ - std::ostream &Output(std::ostream &os) const; - /** \brief Clear the bits */ - void clear(); - /** \brief Push back one bit - * \param bit Boolean (true/false) to be pushed to the end of bit sequence. - */ - void push_back(const bool bit); - /** \brief Push back \e bit_count bits from 'byte' \e b - * \param b Unsigned character (8-bits) to be pushed to the end of bit sequence. - * \param bit_count Number of bits to be pushed (default/max is 8 bits) - */ - void push_back(const unsigned char b, const int bit_count=8); - /** \brief Push back \e bit_count bits from 'short' \e s - * \param s Unsigned short (16-bits) to be pushed to the end of bit sequence. - * \param bit_count Number of bits to be pushed (default/max is 16 bits) - */ - void push_back(const unsigned short s, const int bit_count=16); - /** \brief Push back \e bit_count bits from 'long' \e l - * \param l Unsigned long (32-bits) to be pushed to the end of bit sequence. - * \param bit_count Number of bits to be pushed (default/max is 32 bits) - */ - void push_back(const unsigned long l, const int bit_count=32); - /** \brief Push back meaningful bits from 'long' \e l - * \param l The meaningful bits of the given unsigned long (32-bits) are pushed to the end of bit sequence. - */ - void push_back_meaningful(const unsigned long l); - /** \brief Fill the \e Bitset with non-meaningful zeros - * \param bit_count Non-meaningful zeros are added until this given \e bit_count is reached. - */ - void fill_zeros_left(const size_t bit_count); - /** \brief Push back a string of characters - * \param s String of characters to be pushed to the end of bit sequence. - */ - void push_back(std::string s); - /** \brief Pop the front bit */ - bool pop_front(); - /** \brief Pop the back bit */ - bool pop_back(); - /** \brief Flip the selected bit - * \param pos The bit in this given position is flipped. - */ - void flip(size_t pos); - /** \brief The \e Bitset as a hex string */ - std::string hex(); - /** \brief The \e Bitset as 'unsigned long' */ - unsigned long ulong(); - /** \brief The \e Bitset as 'unsigned char' */ - unsigned char uchar(); - /** \brief The \e Bitset as 'deque' */ - inline std::deque& GetBits() - { - return bits; - } + /** \brief The length of the \e Bitset */ + int Length(); + /** \brief Output the bits to selected ostream + * \param os The output stream to be used for outputting e.g. std::cout + */ + std::ostream& Output(std::ostream& os) const; + /** \brief Clear the bits */ + void clear(); + /** \brief Push back one bit + * \param bit Boolean (true/false) to be pushed to the end of bit sequence. + */ + void push_back(const bool bit); + /** \brief Push back \e bit_count bits from 'byte' \e b + * \param b Unsigned character (8-bits) to be pushed to the end of bit + * sequence. \param bit_count Number of bits to be pushed (default/max is 8 + * bits) + */ + void push_back(const unsigned char b, const int bit_count = 8); + /** \brief Push back \e bit_count bits from 'short' \e s + * \param s Unsigned short (16-bits) to be pushed to the end of bit sequence. + * \param bit_count Number of bits to be pushed (default/max is 16 bits) + */ + void push_back(const unsigned short s, const int bit_count = 16); + /** \brief Push back \e bit_count bits from 'long' \e l + * \param l Unsigned long (32-bits) to be pushed to the end of bit sequence. + * \param bit_count Number of bits to be pushed (default/max is 32 bits) + */ + void push_back(const unsigned long l, const int bit_count = 32); + /** \brief Push back meaningful bits from 'long' \e l + * \param l The meaningful bits of the given unsigned long (32-bits) are + * pushed to the end of bit sequence. + */ + void push_back_meaningful(const unsigned long l); + /** \brief Fill the \e Bitset with non-meaningful zeros + * \param bit_count Non-meaningful zeros are added until this given \e + * bit_count is reached. + */ + void fill_zeros_left(const size_t bit_count); + /** \brief Push back a string of characters + * \param s String of characters to be pushed to the end of bit sequence. + */ + void push_back(std::string s); + /** \brief Pop the front bit */ + bool pop_front(); + /** \brief Pop the back bit */ + bool pop_back(); + /** \brief Flip the selected bit + * \param pos The bit in this given position is flipped. + */ + void flip(size_t pos); + /** \brief The \e Bitset as a hex string */ + std::string hex(); + /** \brief The \e Bitset as 'unsigned long' */ + unsigned long ulong(); + /** \brief The \e Bitset as 'unsigned char' */ + unsigned char uchar(); + /** \brief The \e Bitset as 'deque' */ + inline std::deque& GetBits() + { + return bits; + } }; /** - * \brief An extended \e Bitset ( \e BitsetExt ) for handling e.g. Hamming encoding/decoding + * \brief An extended \e Bitset ( \e BitsetExt ) for handling e.g. Hamming + * encoding/decoding * - * This class is based on the basic \e Bitset. It provides additional features for Hamming coding - * (See http://en.wikipedia.org/wiki/Hamming_code). + * This class is based on the basic \e Bitset. It provides additional features + * for Hamming coding (See http://en.wikipedia.org/wiki/Hamming_code). * * The \e BitsetExt is used e.g by \e MarkerData */ -class ALVAR_EXPORT BitsetExt : public Bitset { +class ALVAR_EXPORT BitsetExt : public Bitset +{ protected: - bool verbose; - void hamming_enc_block(unsigned long block_len, std::deque::iterator &iter); - int hamming_dec_block(unsigned long block_len, std::deque::iterator &iter); + bool verbose; + void hamming_enc_block(unsigned long block_len, + std::deque::iterator& iter); + int hamming_dec_block(unsigned long block_len, + std::deque::iterator& iter); + public: - /** \brief Constructor */ - BitsetExt(); - /** \brief Constructor */ - BitsetExt(bool _verbose); - /** \brief Set the verbose/silent mode */ - void SetVerbose(bool _verbose); - /** \brief Count how many bits will be in the Bitset after hamming encoding */ - static int count_hamming_enc_len(int block_len, int dec_len); - /** \brief Count how many bits will be in the Bitset after hamming decoding */ - static int count_hamming_dec_len(int block_len, int enc_len); - /** \brief Hamming encoding 'in-place' using the defined block length */ - void hamming_enc(int block_len); - /** \brief Hamming decoding 'in-place' using the defined block length */ - int hamming_dec(int block_len); + /** \brief Constructor */ + BitsetExt(); + /** \brief Constructor */ + BitsetExt(bool _verbose); + /** \brief Set the verbose/silent mode */ + void SetVerbose(bool _verbose); + /** \brief Count how many bits will be in the Bitset after hamming encoding */ + static int count_hamming_enc_len(int block_len, int dec_len); + /** \brief Count how many bits will be in the Bitset after hamming decoding */ + static int count_hamming_dec_len(int block_len, int enc_len); + /** \brief Hamming encoding 'in-place' using the defined block length */ + void hamming_enc(int block_len); + /** \brief Hamming decoding 'in-place' using the defined block length */ + int hamming_dec(int block_len); }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/Camera.h b/ar_track_alvar/include/ar_track_alvar/Camera.h index beca09d..89420e4 100644 --- a/ar_track_alvar/include/ar_track_alvar/Camera.h +++ b/ar_track_alvar/include/ar_track_alvar/Camera.h @@ -37,253 +37,312 @@ #include "FileFormat.h" #include -#include -#include -#include -#include -#include -#include -#include +#include "rclcpp/rclcpp.hpp" +#include +#include +#include +#include #include -namespace alvar { - -/** \brief Simple structure for collecting 2D and 3D points e.g. for camera calibration */ -struct ALVAR_EXPORT ProjPoints { - int width; - int height; - - /** \brief 3D object points corresponding with the detected 2D image points. */ - std::vector object_points; - /** \brief Detected 2D object points +namespace alvar +{ +/** \brief Simple structure for collecting 2D and 3D points e.g. for camera + * calibration */ +struct ALVAR_EXPORT ProjPoints +{ + int width; + int height; + + /** \brief 3D object points corresponding with the detected 2D image points. + */ + std::vector object_points; + /** \brief Detected 2D object points + * If point_counts[0] == 10, then the + * If point_counts[0] == 10, then the + * If point_counts[0] == 10, then the * If point_counts[0] == 10, then the + * If point_counts[0] == 10, then the + * first 10 points are detected in the first frame. If * first 10 points are detected in the first frame. If + * first 10 points are detected in the first frame. If + * first 10 points are detected in the first frame. If + * first 10 points are detected in the first frame. If + * point_counts[1] == 6, then the next 6 of these points are * point_counts[1] == 6, then the next 6 of these points are - * detected in the next frame... etc. - */ - std::vector image_points; - /** \brief Vector indicating how many points are detected for each frame */ - std::vector point_counts; - - /** \brief Reset \e object_points , \e image_points and \e point_counts */ - void Reset(); - - /** \brief Add elements to \e object_points , \e image_points and \e point_counts using Chessboard pattern */ - bool AddPointsUsingChessboard(IplImage *image, double etalon_square_size, int etalon_rows, int etalon_columns, bool visualize); - - /** \brief Add elements to \e object_points , \e image_points and \e point_counts using detected markers */ - bool AddPointsUsingMarkers(std::vector &marker_corners, std::vector &marker_corners_img, IplImage* image); + * point_counts[1] == 6, then the next 6 of these points are + * point_counts[1] == 6, then the next 6 of these points are + * point_counts[1] == 6, then the next 6 of these points are + * detected in the next frame... etc. + */ + std::vector image_points; + /** \brief Vector indicating how many points are detected for each frame */ + std::vector point_counts; + + /** \brief Reset \e object_points , \e image_points and \e point_counts */ + void Reset(); + + /** \brief Add elements to \e object_points , \e image_points and \e + * point_counts using Chessboard pattern */ + bool AddPointsUsingChessboard(const cv::Mat& image, double etalon_square_size, + int etalon_rows, int etalon_columns, + bool visualize); + + /** \brief Add elements to \e object_points , \e image_points and \e + * point_counts using detected markers */ + bool AddPointsUsingMarkers(std::vector& marker_corners, + std::vector& marker_corners_img, + cv::Mat& image); }; - /** - * \brief Simple \e Camera class for calculating distortions, orientation or projections with pre-calibrated camera + * \brief Simple \e Camera class for calculating distortions, orientation or + * projections with pre-calibrated camera */ -class ALVAR_EXPORT Camera { - +class ALVAR_EXPORT Camera +{ public: - - CvMat calib_K; double calib_K_data[3][3]; - CvMat calib_D; double calib_D_data[4]; - int calib_x_res; - int calib_y_res; - int x_res; - int y_res; - bool getCamInfo_; + cv::Mat calib_K; + double calib_K_data[3][3]; + cv::Mat calib_D; + double calib_D_data[4]; + int calib_x_res; + int calib_y_res; + int x_res; + int y_res; + bool getCamInfo_; protected: - std::string cameraInfoTopic_; - sensor_msgs::CameraInfo cam_info_; - void camInfoCallback (const sensor_msgs::CameraInfoConstPtr &); - ros::Subscriber sub_; - ros::NodeHandle n_; + std::string cameraInfoTopic_; + sensor_msgs::msg::CameraInfo cam_info_; + // void camInfoCallback (const sensor_msgs::msg::CameraInfo::SharedPtr cam_info); + // rclcpp::Subscription::SharedPtr sub_; private: - bool LoadCalibXML(const char *calibfile); - bool LoadCalibOpenCV(const char *calibfile); - bool SaveCalibXML(const char *calibfile); - bool SaveCalibOpenCV(const char *calibfile); + bool LoadCalibXML(const char* calibfile); + bool LoadCalibOpenCV(const char* calibfile); + bool SaveCalibXML(const char* calibfile); + bool SaveCalibOpenCV(const char* calibfile); public: - /** \brief One of the two methods to make this class serializable by \e Serialization class */ - std::string SerializeId() { return "camera"; }; - /** \brief One of the two methods to make this class serializable by \e Serialization class - * + /** \brief One of the two methods to make this class serializable by \e + * Serialization class */ + std::string SerializeId() + { + return "camera"; + }; + /** \brief One of the two methods to make this class serializable by \e + * Serialization class + * + * You can serialize the \e Camera class using filename or any std::iostream + * You can serialize the \e Camera class using filename or any std::iostream + * You can serialize the \e Camera class using filename or any std::iostream * You can serialize the \e Camera class using filename or any std::iostream - * as follows: - * \code - * alvar::Camera cam; - * cam.SetCalib("calib.xml", 320, 240); - * Serialization sero("calib1.xml"); - * sero<>cam; - * cam.SetRes(320, 240); - * \endcode - * \code - * std::stringstream ss; - * Serialization sero(ss); - * sero<>cam; - * \endcode - */ - bool Serialize(Serialization *ser) { - if (!ser->Serialize(calib_x_res, "width")) return false; - if (!ser->Serialize(calib_y_res, "height")) return false; - if (!ser->Serialize(calib_K, "intrinsic_matrix")) return false; - if (!ser->Serialize(calib_D, "distortion")) return false; - return true; - } - - /** \brief Constructor */ - Camera(); - Camera(ros::NodeHandle & n, std::string cam_info_topic); - - /** Sets the intrinsic calibration */ - void SetCameraInfo(const sensor_msgs::CameraInfo& camInfo); - - /** \brief Get x-direction FOV in radians */ - double GetFovX() { + * You can serialize the \e Camera class using filename or any std::iostream + * as follows: + * \code + * alvar::Camera cam; + * cam.SetCalib("calib.xml", 320, 240); + * Serialization sero("calib1.xml"); + * sero<>cam; + * cam.SetRes(320, 240); + * \endcode + * \code + * std::stringstream ss; + * Serialization sero(ss); + * sero<>cam; + * \endcode + */ + bool Serialize(Serialization* ser) + { + if (!ser->Serialize(calib_x_res, "width")) + return false; + if (!ser->Serialize(calib_y_res, "height")) + return false; + if (!ser->Serialize(calib_K, "intrinsic_matrix")) + return false; + if (!ser->Serialize(calib_D, "distortion")) + return false; + return true; + } + + /** \brief Constructor */ + Camera(); + // Camera(ros::NodeHandle& n, std::string cam_info_topic); + + /** Sets the intrinsic calibration */ + void SetCameraInfo(const sensor_msgs::msg::CameraInfo::SharedPtr cam_info); + + + /** \brief Get x-direction FOV in radians */ + double GetFovX() + { + return (2.0f * atan2(double(x_res) / 2.0f, (double)calib_K_data[0][0])); return (2.0f * atan2(double(x_res) / 2.0f, (double)calib_K_data[0][0])); - } - /** \brief Get y-direction FOV in radians */ - double GetFovY() { - return (2.0f * atan2(double(y_res) / 2.0f, (double)calib_K_data[1][1])); - } - - void SetSimpleCalib(int _x_res, int _y_res, double f_fac=1.); - - /** \brief Set the calibration file and the current resolution for which the calibration is adjusted to - * \param calibfile File to load. - * \param _x_res Width of images captured from the real camera. - * \param _y_res Height of images captured from the real camera. - * \param format FILE_FORMAT_OPENCV (default) or FILE_FORMAT_XML (see doc/Camera.xsd). - */ - bool SetCalib(const char *calibfile, int _x_res, int _y_res, - FILE_FORMAT format = FILE_FORMAT_DEFAULT); - - /** \brief Save the current calibration information to a file - * \param calibfile File to save. - * \param format FILE_FORMAT_OPENCV (default) or FILE_FORMAT_XML (see doc/Camera.xsd). - */ - bool SaveCalib(const char *calibfile, FILE_FORMAT format = FILE_FORMAT_DEFAULT); - - /** \brief Calibrate using the collected \e ProjPoints */ - void Calibrate(ProjPoints &pp); - - /** \brief If we have no calibration file we can still adjust the default calibration to current resolution */ - void SetRes(int _x_res, int _y_res); - + return (2.0f * atan2(double(x_res) / 2.0f, (double)calib_K_data[0][0])); + return (2.0f * atan2(double(x_res) / 2.0f, (double)calib_K_data[0][0])); + return (2.0f * atan2(double(x_res) / 2.0f, (double)calib_K_data[0][0])); + } + /** \brief Get y-direction FOV in radians */ + double GetFovY() + { + return (2.0f * atan2(double(y_res) / 2.0f, (double)calib_K_data[1][1])); + } + + void SetSimpleCalib(int _x_res, int _y_res, double f_fac = 1.); + + /** \brief Set the calibration file and the current resolution for which the + * calibration is adjusted to \param calibfile File to load. \param _x_res + * Width of images captured from the real camera. \param _y_res Height of + * images captured from the real camera. \param format FILE_FORMAT_OPENCV + * (default) or FILE_FORMAT_XML (see doc/Camera.xsd). + */ + bool SetCalib(const char* calibfile, int _x_res, int _y_res, + FILE_FORMAT format = FILE_FORMAT_DEFAULT); + + /** \brief Save the current calibration information to a file + * \param calibfile File to save. + * \param format FILE_FORMAT_OPENCV (default) or FILE_FORMAT_XML (see + * doc/Camera.xsd). + */ + bool SaveCalib(const char* calibfile, + FILE_FORMAT format = FILE_FORMAT_DEFAULT); + + /** \brief Calibrate using the collected \e ProjPoints */ + void Calibrate(ProjPoints& pp); + + /** \brief If we have no calibration file we can still adjust the default + * calibration to current resolution */ + void SetRes(int _x_res, int _y_res); + + /** \brief Get OpenGL matrix + /** \brief Get OpenGL matrix + /** \brief Get OpenGL matrix /** \brief Get OpenGL matrix - * Generates the OpenGL projection matrix based on OpenCV intrinsic camera matrix K. - * \code - * 2*K[0][0]/width 2*K[0][1]/width -(2*K[0][2]/width+1) 0 - * 0 2*K[1][1]/height 2*K[1][2]/height-1 0 - * 0 0 -(f+n)/(f-n) -2*f*n/(f-n) - * 0 0 -1 0 - * \endcode - * - * Note, that the sign of the element [2][0] is changed. It should be - * \code - * 2*K[0][2]/width+1 - * \endcode - * + /** \brief Get OpenGL matrix + * Generates the OpenGL projection matrix based on OpenCV intrinsic camera + * matrix K. \code 2*K[0][0]/width 2*K[0][1]/width -(2*K[0][2]/width+1) 0 + * 0 2*K[1][1]/height 2*K[1][2]/height-1 0 + * 0 0 -(f+n)/(f-n) -2*f*n/(f-n) + * 0 0 -1 0 + * \endcode + * + * Note, that the sign of the element [2][0] is changed. It should be + * \code + * 2*K[0][2]/width+1 + * \endcode + * + * The sign change is due to the fact that with OpenCV and OpenGL projection * The sign change is due to the fact that with OpenCV and OpenGL projection - * matrices both y and z should be mirrored. With other matrix elements - * the sign changes eliminate each other, but with principal point - * in x-direction we need to make the change by hand. - */ - void GetOpenglProjectionMatrix(double proj_matrix[16], const int width, const int height, const float far_clip = 1000.0f, const float near_clip = 0.1f); - - /** \brief Invert operation for \e GetOpenglProjectionMatrix */ - void SetOpenglProjectionMatrix(double proj_matrix[16], const int width, const int height); - - /** \brief Unapplys the lens distortion for points on image plane. */ - void Undistort(std::vector& points); - - /** \brief Unapplys the lens distortion for one point on an image plane. */ - void Undistort(PointDouble &point); - - /** \brief Unapplys the lens distortion for one point on an image plane. */ - void Undistort(CvPoint2D32f& point); - - /** \brief Applys the lens distortion for one point on an image plane. */ - void Distort(CvPoint2D32f& point); - - /** \brief Applys the lens distortion for points on image plane. */ - void Distort(std::vector& points); - - /** \brief Applys the lens distortion for points on image plane. */ - void Distort(PointDouble &point); - - /** \brief Calculate exterior orientation */ - void CalcExteriorOrientation(std::vector& pw, std::vector& pi, Pose *pose); - - /** \brief Calculate exterior orientation - */ - void CalcExteriorOrientation(std::vector& pw, std::vector& pi, - CvMat *rodriques, CvMat *tra); - - /** \brief Calculate exterior orientation - */ - void CalcExteriorOrientation(std::vector& pw, std::vector& pi, - CvMat *rodriques, CvMat *tra); - - /** \brief Calculate exterior orientation - */ - void CalcExteriorOrientation(std::vector& pw, std::vector& pi, Pose *pose); - - /** \brief Update existing pose based on new observations. Use (CV_32FC3 and CV_32FC2) for matrices. */ - bool CalcExteriorOrientation(const CvMat* object_points, CvMat* image_points, Pose *pose); - - /** \brief Update existing pose (in rodriques&tra) based on new observations. Use (CV_32FC3 and CV_32FC2) for matrices. */ - bool CalcExteriorOrientation(const CvMat* object_points, CvMat* image_points, CvMat *rodriques, CvMat *tra); - - /** \brief Project one point */ - void ProjectPoint(const CvPoint3D64f pw, const Pose *pose, CvPoint2D64f &pi) const; - - /** \brief Project one point */ - void ProjectPoint(const CvPoint3D32f pw, const Pose *pose, CvPoint2D32f &pi) const; - - /** \brief Project points */ - void ProjectPoints(std::vector& pw, Pose *pose, std::vector& pi) const; - - /** \brief Project points - */ - void ProjectPoints(const CvMat* object_points, const CvMat* rotation_vector, - const CvMat* translation_vector, CvMat* image_points) const; - - /** \brief Project points - */ - void ProjectPoints(const CvMat* object_points, double gl[16], CvMat* image_points) const; - - /** \brief Project points */ - void ProjectPoints(const CvMat* object_points, const Pose* pose, CvMat* image_points) const; + * The sign change is due to the fact that with OpenCV and OpenGL projection + * The sign change is due to the fact that with OpenCV and OpenGL projection + * The sign change is due to the fact that with OpenCV and OpenGL projection + * matrices both y and z should be mirrored. With other matrix elements + * the sign changes eliminate each other, but with principal point + * in x-direction we need to make the change by hand. + */ + void GetOpenglProjectionMatrix(double proj_matrix[16], const int width, + const int height, + const float far_clip = 1000.0f, + const float near_clip = 0.1f); + + /** \brief Invert operation for \e GetOpenglProjectionMatrix */ + void SetOpenglProjectionMatrix(double proj_matrix[16], const int width, + const int height); + + /** \brief Unapplys the lens distortion for points on image plane. */ + void Undistort(std::vector& points); + + /** \brief Unapplys the lens distortion for one point on an image plane. */ + void Undistort(PointDouble& point); + + /** \brief Unapplys the lens distortion for one point on an image plane. */ + void Undistort(cv::Point2f& point); + + /** \brief Applys the lens distortion for one point on an image plane. */ + void Distort(cv::Point2f& point); + + /** \brief Applys the lens distortion for points on image plane. */ + void Distort(std::vector& points); + + /** \brief Applys the lens distortion for points on image plane. */ + void Distort(PointDouble& point); + + /** \brief Calculate exterior orientation + */ + void CalcExteriorOrientation(const std::vector& pw, + const std::vector& pi, + cv::Mat& rodriques, cv::Mat& tra) const; + + /** \brief Calculate exterior orientation + */ + void CalcExteriorOrientation(const std::vector& pw, + const std::vector& pi, + cv::Mat& rodriques, cv::Mat& tra) const; + + /** \brief Calculate exterior orientation + */ + void CalcExteriorOrientation(const std::vector& pw, + const std::vector& pi, + Pose* pose) const; + + /** \brief Project one point */ + void ProjectPoint(const cv::Point3d& pw, const Pose* pose, + cv::Point2d& pi) const; + + /** \brief Project one point */ + void ProjectPoint(const cv::Point3f& pw, const Pose* pose, + cv::Point2f& pi) const; + + /** \brief Project points */ + void ProjectPoints(std::vector& pw, Pose* pose, + std::vector& pi) const; + + /** \brief Project points + */ + void ProjectPoints(const cv::Mat& object_points, + const cv::Mat& rotation_vector, + const cv::Mat& translation_vector, + cv::Mat& image_points) const; + + /** \brief Project points + */ + void ProjectPoints(const cv::Mat& object_points, double gl[16], + cv::Mat& image_points) const; + + /** \brief Project points */ + void ProjectPoints(const cv::Mat& object_points, const Pose* pose, + cv::Mat& image_points) const; }; /** - * \brief Simple \e Homography class for finding and projecting points between two planes. + * \brief Simple \e Homography class for finding and projecting points between + * two planes. */ -struct ALVAR_EXPORT Homography { - double H_data[3][3]; - CvMat H; - - /** \brief Constructor */ - Homography(); - - /** \brief Find Homography for two point-sets */ - void Find(const std::vector& pw, const std::vector& pi); - - /** \brief Project points using the Homography */ - void ProjectPoints(const std::vector& from, std::vector& to); +struct ALVAR_EXPORT Homography +{ + cv::Mat H; + + /** \brief Constructor */ + Homography(); + + /** \brief Find Homography for two point-sets */ + void Find(const std::vector& pw, + const std::vector& pi); + + /** \brief Project points using the Homography */ + void ProjectPoints(const std::vector& from, + std::vector& to) const; }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/Capture.h b/ar_track_alvar/include/ar_track_alvar/Capture.h index b6f3f90..005a659 100644 --- a/ar_track_alvar/include/ar_track_alvar/Capture.h +++ b/ar_track_alvar/include/ar_track_alvar/Capture.h @@ -34,155 +34,179 @@ #include "CaptureDevice.h" #include "Util.h" -namespace alvar { - +namespace alvar +{ /** * \brief Capture interface that plugins must implement. * - * All plugins must implement the Capture interface. This is the class that implements - * all of the camera capture funtionality. This class is created by the CapturePlugin - * implementation. + * All plugins must implement the Capture interface. This is the class that + * implements all of the camera capture funtionality. This class is created by + * the CapturePlugin implementation. */ class ALVAR_EXPORT Capture { public: - /** - * \brief Constructor. - * - * \param captureDevice Information of which camera to create. - */ - Capture(const CaptureDevice captureDevice) - : mCaptureDevice(captureDevice) - , mXResolution(0) - , mYResolution(0) - , mIsCapturing(false) + /** + * \brief Constructor. + * + * \param captureDevice Information of which camera to create. + */ + Capture(const CaptureDevice captureDevice) + : mCaptureDevice(captureDevice) + , mXResolution(0) + , mYResolution(0) + , mIsCapturing(false) + { + } + + /** + * \brief Destructor + */ + virtual ~Capture() + { + } + + /** + * \brief The camera information associated to this capture object. + */ + CaptureDevice captureDevice() + { + return mCaptureDevice; + } + + /** + * \brief The resolution along the x axis (horizontal). + */ + unsigned long xResolution() + { + return mXResolution; + } + + /** + * \brief The resolution along the y axis (vertical). + */ + unsigned long yResolution() + { + return mYResolution; + } + + /** + * \brief Test if the camera was properly initialized. + */ + bool isCapturing() + { + return mIsCapturing; + } + + /** + * \brief Set the resolution. + * + * \param xResolution The resolution along the x axis (horizontal). + * \param yResolution The resolution along the y axis (vertical). + */ + virtual void setResolution(const unsigned long xResolution, + const unsigned long yResolution) + { + } + + /** + * \brief Starts the camera capture. + * + * \return True if the camera was properly initialized, false otherwise. + */ + virtual bool start() = 0; + + /** + * \brief Stops the camera capture. + */ + virtual void stop() = 0; + + /** + * \brief Capture one image from the camera. + * + * Do not modify this image. + * + * \return The captured image. + */ + virtual cv::Mat& captureImage() = 0; + + /** + * \brief Save camera settings to a file. + * + * \param filename The filename to write to. + * \return True if the settings were sucessfully saved, false otherwise. + */ + virtual bool saveSettings(std::string filename) + { + if (!isCapturing()) { + return false; } - /** - * \brief Destructor - */ - virtual ~Capture() {} - - /** - * \brief The camera information associated to this capture object. - */ - CaptureDevice captureDevice() {return mCaptureDevice;} - - /** - * \brief The resolution along the x axis (horizontal). - */ - unsigned long xResolution() {return mXResolution;} - - /** - * \brief The resolution along the y axis (vertical). - */ - unsigned long yResolution() {return mYResolution;} - - /** - * \brief Test if the camera was properly initialized. - */ - bool isCapturing() {return mIsCapturing;} - - /** - * \brief Set the resolution. - * - * \param xResolution The resolution along the x axis (horizontal). - * \param yResolution The resolution along the y axis (vertical). - */ - virtual void setResolution(const unsigned long xResolution, const unsigned long yResolution) + Serialization serialization(filename); + try + { + serialization << (*this); + } + catch (...) + { + return false; + } + return true; + } + + /** + * \brief Load camera settings from a file. + * + * \param filename The filename to read from. + * \return True if the settings were sucessfully loaded, false otherwise. + */ + virtual bool loadSettings(std::string filename) + { + if (!isCapturing()) { + return false; } - /** - * \brief Starts the camera capture. - * - * \return True if the camera was properly initialized, false otherwise. - */ - virtual bool start() = 0; - - /** - * \brief Stops the camera capture. - */ - virtual void stop() = 0; - - /** - * \brief Capture one image from the camera. - * - * Do not modify this image. - * - * \return The captured image. - */ - virtual IplImage *captureImage() = 0; - - /** - * \brief Save camera settings to a file. - * - * \param filename The filename to write to. - * \return True if the settings were sucessfully saved, false otherwise. - */ - virtual bool saveSettings(std::string filename) { - if (!isCapturing()) { - return false; - } - - Serialization serialization(filename); - try { - serialization << (*this); - } - catch (...) { - return false; - } - return true; - } - - /** - * \brief Load camera settings from a file. - * - * \param filename The filename to read from. - * \return True if the settings were sucessfully loaded, false otherwise. - */ - virtual bool loadSettings(std::string filename) { - if (!isCapturing()) { - return false; - } - - Serialization serialization(filename); - try { - serialization >> (*this); - } - catch (...) { - return false; - } - return true; - } - - /** - * \brief Show the settings dialog of the camera. - * \return True if the settings dialog was shown, false otherwise. - */ - virtual bool showSettingsDialog() = 0; - - /** - * \brief The identification of the class for serialization. - */ - virtual std::string SerializeId() = 0; - - /** - * \brief Performs serialization of the class members and configuration. - * - * \param serialization The Serialization object. - * \return True if the serialization of the class was successful, false otherwise. - */ - virtual bool Serialize(Serialization *serialization) = 0; + Serialization serialization(filename); + try + { + serialization >> (*this); + } + catch (...) + { + return false; + } + return true; + } + + /** + * \brief Show the settings dialog of the camera. + * \return True if the settings dialog was shown, false otherwise. + */ + virtual bool showSettingsDialog() = 0; + + /** + * \brief The identification of the class for serialization. + */ + virtual std::string SerializeId() = 0; + + /** + * \brief Performs serialization of the class members and configuration. + * + * \param serialization The Serialization object. + * \return True if the serialization of the class was successful, false + * otherwise. + */ + virtual bool Serialize(Serialization* serialization) = 0; protected: - CaptureDevice mCaptureDevice; - unsigned long mXResolution; - unsigned long mYResolution; - bool mIsCapturing; + CaptureDevice mCaptureDevice; + unsigned long mXResolution; + unsigned long mYResolution; + bool mIsCapturing; }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/CaptureDevice.h b/ar_track_alvar/include/ar_track_alvar/CaptureDevice.h index aae94ba..2beb21b 100644 --- a/ar_track_alvar/include/ar_track_alvar/CaptureDevice.h +++ b/ar_track_alvar/include/ar_track_alvar/CaptureDevice.h @@ -34,56 +34,58 @@ #include -namespace alvar { - +namespace alvar +{ /** * \brief CaptureDevice holder for camera information. * - * CaptureDevice contains the desired backend, the id and description of the camera. + * CaptureDevice contains the desired backend, the id and description of the + * camera. */ class ALVAR_EXPORT CaptureDevice { public: - /** - * \brief Constructor. - * - * \param captureType The type of capture backend. - * \param id The id of the camera. - * \param description A human readable description of the camera. - */ - CaptureDevice(const std::string captureType, const std::string id, const std::string description = ""); + /** + * \brief Constructor. + * + * \param captureType The type of capture backend. + * \param id The id of the camera. + * \param description A human readable description of the camera. + */ + CaptureDevice(const std::string captureType, const std::string id, + const std::string description = ""); - /** - * \brief Destructor. - */ - ~CaptureDevice(); + /** + * \brief Destructor. + */ + ~CaptureDevice(); - /** - * \brief The type of capture backend. - */ - std::string captureType() const; + /** + * \brief The type of capture backend. + */ + std::string captureType() const; - /** - * \brief The id of the camera. - */ - std::string id() const; + /** + * \brief The id of the camera. + */ + std::string id() const; - /** - * \brief The description of the camera. - */ - std::string description() const; + /** + * \brief The description of the camera. + */ + std::string description() const; - /** - * \brief A unique name consisting of the capture type and the id. - */ - std::string uniqueName() const; + /** + * \brief A unique name consisting of the capture type and the id. + */ + std::string uniqueName() const; private: - std::string mCaptureType; - std::string mId; - std::string mDescription; + std::string mCaptureType; + std::string mId; + std::string mDescription; }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/CaptureFactory.h b/ar_track_alvar/include/ar_track_alvar/CaptureFactory.h index 2e4833a..28e3c49 100644 --- a/ar_track_alvar/include/ar_track_alvar/CaptureFactory.h +++ b/ar_track_alvar/include/ar_track_alvar/CaptureFactory.h @@ -38,101 +38,115 @@ #include "Platform.h" #include "Util.h" -namespace alvar { - +namespace alvar +{ class CaptureFactoryPrivate; /** * \brief CaptureFactory for creating Capture classes. * * CaptureFactory is a singleton that creates Capture classes used to perform - * camera acquisition. Different backends are implemented as dynamicly loaded plugins - * so that platform dependancies can be handled at runtime and not compile time. + * camera acquisition. Different backends are implemented as dynamicly loaded + * plugins so that platform dependancies can be handled at runtime and not + * compile time. */ class ALVAR_EXPORT CaptureFactory { public: - /** - * \brief The singleton instance of CaptureFactory. - */ - static CaptureFactory *instance(); - - /** - * \brief Vector of strings. - */ - typedef std::vector CapturePluginVector; - - /** - * \brief Enumerate capture plugins currently available. - * - * This method should be used carfully since it will load all the available plugins. - * - * \return A vector of strings identifying all currently available capture plugins. - */ - CapturePluginVector enumeratePlugins(); - - /** - * \brief Vector of CaptureDevices. - */ - typedef std::vector CaptureDeviceVector; - - /** - * \brief Enumerate capture devices currently available. - * - * This method should be used carfully since it will load all the known plugins - * and call their respective enumeration methods. The vector of CaptureDevice - * objects returned should be cached. - * - * \param captureType Force the enumeration of only one type of plugin. - * \return A vector of CaptureDevice objects that are currently available. - */ - CaptureDeviceVector enumerateDevices(const std::string &captureType = ""); - - /** - * \brief Create Capture class. Transfers onwership to the caller. - * - * If the needed backend plugin is not loaded, an attempt is made to load it and - * an instance of it is kept such that it is available for subsequent calls. - * - * \param captureDevice CaptureDevice object specifying the plugin to use. - * \return A new Capture class for which the caller takes ownership. - */ - Capture *createCapture(const CaptureDevice captureDevice); + /** + * \brief The singleton instance of CaptureFactory. + */ + static CaptureFactory* instance(); + + /** + * \brief Vector of strings. + */ + typedef std::vector CapturePluginVector; + + /** + * \brief Enumerate capture plugins currently available. + * + * This method should be used carfully since it will load all the available + * plugins. + * + * \return A vector of strings identifying all currently available capture + * plugins. + */ + CapturePluginVector enumeratePlugins(); + + /** + * \brief Vector of CaptureDevices. + */ + typedef std::vector CaptureDeviceVector; + + /** + * \brief Enumerate capture devices currently available. + * + * This method should be used carfully since it will load all the known + * plugins and call their respective enumeration methods. The vector of + * CaptureDevice objects returned should be cached. + * + * \param captureType Force the enumeration of only one type of plugin. + * \return A vector of CaptureDevice objects that are currently available. + */ + CaptureDeviceVector enumerateDevices(const std::string& captureType = ""); + + /** + * \brief Create Capture class. Transfers onwership to the caller. + * + * If the needed backend plugin is not loaded, an attempt is made to load it + * and an instance of it is kept such that it is available for subsequent + * calls. + * + * \param captureDevice CaptureDevice object specifying the plugin to use. + * \return A new Capture class for which the caller takes ownership. + */ + Capture* createCapture(const CaptureDevice captureDevice); protected: - /** - * \brief Destructor. - */ - ~CaptureFactory(); + /** + * \brief Destructor. + */ + ~CaptureFactory(); private: - /** - * \brief CaptureFactoryDestroyer for deleting the CaptureFactory singleton. - */ - class CaptureFactoryDestroyer + /** + * \brief CaptureFactoryDestroyer for deleting the CaptureFactory singleton. + */ + class CaptureFactoryDestroyer + { + public: + CaptureFactoryDestroyer(CaptureFactory* instance = NULL) + : mInstance(instance) + { + } + ~CaptureFactoryDestroyer() + { + delete mInstance; + } + void set(CaptureFactory* instance) { - public: - CaptureFactoryDestroyer(CaptureFactory *instance = NULL) : mInstance(instance) {} - ~CaptureFactoryDestroyer() {delete mInstance;} - void set(CaptureFactory *instance) {mInstance = instance;} - private: - CaptureFactory *mInstance; - }; - - // private constructors and assignment operator for singleton implementation - CaptureFactory(); - CaptureFactory(const CaptureFactory&); - CaptureFactory &operator=(const CaptureFactory&); - - // static members for singleton implementation - static CaptureFactory *mInstance; - static Mutex mMutex; - static CaptureFactoryDestroyer mDestroyer; - - // members - CaptureFactoryPrivate *d; + mInstance = instance; + } + + private: + CaptureFactory* mInstance; + }; + + // private constructors and assignment operator for singleton implementation + CaptureFactory(); + CaptureFactory(const CaptureFactory&); + CaptureFactory& operator=(const CaptureFactory&); + + // static members for singleton implementation + static CaptureFactory* mInstance; + static Mutex mMutex; + static CaptureFactoryDestroyer mDestroyer; + + // members + CaptureFactoryPrivate* d; }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/CaptureFactory_private.h b/ar_track_alvar/include/ar_track_alvar/CaptureFactory_private.h index f3506d1..41f1470 100644 --- a/ar_track_alvar/include/ar_track_alvar/CaptureFactory_private.h +++ b/ar_track_alvar/include/ar_track_alvar/CaptureFactory_private.h @@ -30,38 +30,38 @@ #include "Plugin.h" -namespace alvar { - +namespace alvar +{ class CapturePlugin; class CaptureFactoryPrivate { public: - CaptureFactoryPrivate(); - ~CaptureFactoryPrivate(); + CaptureFactoryPrivate(); + ~CaptureFactoryPrivate(); - void setupPluginPaths(); - void parseEnvironmentVariable(const std::string &variable); - std::string pluginPrefix(); - std::string pluginExtension(); + void setupPluginPaths(); + void parseEnvironmentVariable(const std::string& variable); + std::string pluginPrefix(); + std::string pluginExtension(); - void loadPlugins(); - void loadPlugin(const std::string &captureType); - void loadPlugin(const std::string &captureType, const std::string &filename); - CapturePlugin *getPlugin(const std::string &captureType); + void loadPlugins(); + void loadPlugin(const std::string& captureType); + void loadPlugin(const std::string& captureType, const std::string& filename); + CapturePlugin* getPlugin(const std::string& captureType); - typedef std::vector PluginPathsVector; - PluginPathsVector mPluginPaths; - std::string mPluginPrefix; - std::string mPluginPostfix; + typedef std::vector PluginPathsVector; + PluginPathsVector mPluginPaths; + std::string mPluginPrefix; + std::string mPluginPostfix; - bool mLoadedAllPlugins; - typedef std::map PluginMap; - PluginMap mPluginMap; - typedef std::map CapturePluginMap; - CapturePluginMap mCapturePluginMap; + bool mLoadedAllPlugins; + typedef std::map PluginMap; + PluginMap mPluginMap; + typedef std::map CapturePluginMap; + CapturePluginMap mCapturePluginMap; }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/CapturePlugin.h b/ar_track_alvar/include/ar_track_alvar/CapturePlugin.h index 3ba0ca3..b8ed826 100644 --- a/ar_track_alvar/include/ar_track_alvar/CapturePlugin.h +++ b/ar_track_alvar/include/ar_track_alvar/CapturePlugin.h @@ -33,56 +33,56 @@ #include "Alvar.h" #include "CaptureDevice.h" -namespace alvar { - +namespace alvar +{ /** * \brief CapturePlugin interface that plugins must implement. * - * All plugins must implement the CapturePlugin interface. When the plugin is loaded, - * the CapturePlugin implementation will register itself with the CaptureFactory. + * All plugins must implement the CapturePlugin interface. When the plugin is + * loaded, the CapturePlugin implementation will register itself with the + * CaptureFactory. */ class ALVAR_EXPORT CapturePlugin { public: - /** - * \brief Constructor. - * - * \param captureType A unique identifier for the capture plugin. - */ - CapturePlugin(const std::string &captureType) - : mCaptureType(captureType) - { - } + /** + * \brief Constructor. + * + * \param captureType A unique identifier for the capture plugin. + */ + CapturePlugin(const std::string& captureType) : mCaptureType(captureType) + { + } - /** - * \brief Destructor. - */ - virtual ~CapturePlugin() {}; + /** + * \brief Destructor. + */ + virtual ~CapturePlugin(){}; - /** - * \brief Vector of CaptureDevices. - */ - typedef std::vector CaptureDeviceVector; + /** + * \brief Vector of CaptureDevices. + */ + typedef std::vector CaptureDeviceVector; - /** - * \brief Enumerate capture devices currently available. - * - * \return A vector of CaptureDevice objects that are currently available. - */ - virtual CaptureDeviceVector enumerateDevices() = 0; + /** + * \brief Enumerate capture devices currently available. + * + * \return A vector of CaptureDevice objects that are currently available. + */ + virtual CaptureDeviceVector enumerateDevices() = 0; - /** - * \brief Create Capture class. Transfers onwership to the caller. - * - * \param captureDevice Information of which camera to create. - * \return A new Capture class for which the caller takes ownership. - */ - virtual Capture *createCapture(const CaptureDevice captureDevice) = 0; + /** + * \brief Create Capture class. Transfers onwership to the caller. + * + * \param captureDevice Information of which camera to create. + * \return A new Capture class for which the caller takes ownership. + */ + virtual Capture* createCapture(const CaptureDevice captureDevice) = 0; protected: - std::string mCaptureType; + std::string mCaptureType; }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/ConnectedComponents.h b/ar_track_alvar/include/ar_track_alvar/ConnectedComponents.h index 8228e86..b60d719 100644 --- a/ar_track_alvar/include/ar_track_alvar/ConnectedComponents.h +++ b/ar_track_alvar/include/ar_track_alvar/ConnectedComponents.h @@ -35,105 +35,104 @@ #include "Line.h" #include "Camera.h" -namespace alvar { - +namespace alvar +{ /** * \brief Connected components labeling methods. -*/ + */ enum ALVAR_EXPORT LabelingMethod { - CVSEQ + CVSEQ }; /** * \brief Base class for labeling connected components from binary image. -*/ + */ class ALVAR_EXPORT Labeling { +protected: + Camera* cam; + int thresh_param1, thresh_param2; -protected : - - Camera *cam; - int thresh_param1, thresh_param2; - -public : - - /** - * \brief Pointer to grayscale image that is thresholded for labeling. - */ - IplImage *gray; - /** - * \brief Pointer to binary image that is then labeled. - */ - IplImage *bw; - - /** - * \brief Vector of 4-length vectors where the corners of detected blobs are stored. - */ - std::vector > blob_corners; - - /** - * \brief Two alternatives for thresholding the gray image. ADAPT (adaptive threshold) is only supported currently. - */ - enum ThresholdMethod - { - THRESH, - ADAPT - }; - - /** Constructor */ - Labeling(); - - /** Destructor*/ - virtual ~Labeling(); - - /** - * \brief Sets the camera object that is used to correct lens distortions. - */ - void SetCamera(Camera* camera) {cam = camera;} - - /** - * \brief Labels image and filters blobs to obtain square-shaped objects from the scene. - */ - virtual void LabelSquares(IplImage* image, bool visualize=false) = 0; - - bool CheckBorder(CvSeq* contour, int width, int height); - - void SetThreshParams(int param1, int param2) - { - thresh_param1 = param1; - thresh_param2 = param2; - } +public: + /** + * \brief Pointer to grayscale image that is thresholded for labeling. + */ + cv::Mat gray; + /** + * \brief Pointer to binary image that is then labeled. + */ + cv::Mat bw; + + /** + * \brief Vector of 4-length vectors where the corners of detected blobs are + * stored. + */ + std::vector> blob_corners; + + /** + * \brief Two alternatives for thresholding the gray image. ADAPT (adaptive + * threshold) is only supported currently. + */ + enum ThresholdMethod + { + THRESH, + ADAPT + }; + + /** Constructor */ + Labeling(); + + /** Destructor*/ + virtual ~Labeling(); + + /** + * \brief Sets the camera object that is used to correct lens distortions. + */ + void SetCamera(Camera* camera) + { + cam = camera; + } + + /** + * \brief Labels image and filters blobs to obtain square-shaped objects from + * the scene. + */ + virtual void LabelSquares(cv::Mat& image, bool visualize = false) = 0; + + bool CheckBorder(const std::vector& contour, int width, + int height); + + void SetThreshParams(int param1, int param2) + { + thresh_param1 = param1; + thresh_param2 = param2; + } }; /** * \brief Labeling class that uses OpenCV routines to find connected components. -*/ + */ class ALVAR_EXPORT LabelingCvSeq : public Labeling { - -protected : - - int _n_blobs; - int _min_edge; - int _min_area; - bool detect_pose_grayscale; - - CvMemStorage* storage; +protected: + int _n_blobs; + int _min_edge; + int _min_area; + bool detect_pose_grayscale; public: + LabelingCvSeq(); + ~LabelingCvSeq(); - LabelingCvSeq(); - ~LabelingCvSeq(); - - void SetOptions(bool _detect_pose_grayscale=false); + void SetOptions(bool _detect_pose_grayscale = false); - void LabelSquares(IplImage* image, bool visualize=false); + void LabelSquares(cv::Mat& image, bool visualize = false); - // TODO: Releases memory inside, cannot return CvSeq* - CvSeq* LabelImage(IplImage* image, int min_size, bool approx=false); + std::vector> LabelImage(cv::Mat& image, int min_size, + bool approx = false); }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/Container3d.h b/ar_track_alvar/include/ar_track_alvar/Container3d.h index 2a24270..72a70d1 100644 --- a/ar_track_alvar/include/ar_track_alvar/Container3d.h +++ b/ar_track_alvar/include/ar_track_alvar/Container3d.h @@ -35,80 +35,107 @@ #include #include -namespace alvar { - -template class Container3d; +namespace alvar +{ +template +class Container3d; -/** \brief Functor class for \e Container3d \e Sort() to sort the search base using distance to specified origin. */ +/** \brief Functor class for \e Container3d \e Sort() to sort the search base + * using distance to specified origin. */ template -class Container3dSortDist { +class Container3dSortDist +{ protected: - CvPoint3D32f orig; - Container3d &container; + CvPoint3D32f orig; + Container3d& container; + public: - Container3dSortDist(Container3d &_container, const CvPoint3D32f _orig) : container(_container), orig(_orig) {} - bool operator()(size_t i1, size_t i2) - { - float x1 = container[i1].first.x-orig.x, x2 = container[i2].first.x-orig.x; - float y1 = container[i1].first.y-orig.y, y2 = container[i2].first.y-orig.y; - float z1 = container[i1].first.z-orig.z, z2 = container[i2].first.z-orig.z; - float d1 = x1*x1 + y1*y1 + z1*z1; - float d2 = x2*x2 + y2*y2 + z2*z2; - return d1& _container, const CvPoint3D32f _orig) + : container(_container), orig(_orig) + { + } + bool operator()(size_t i1, size_t i2) + { + float x1 = container[i1].first.x - orig.x, + x2 = container[i2].first.x - orig.x; + float y1 = container[i1].first.y - orig.y, + y2 = container[i2].first.y - orig.y; + float z1 = container[i1].first.z - orig.z, + z2 = container[i2].first.z - orig.z; + float d1 = x1 * x1 + y1 * y1 + z1 * z1; + float d2 = x2 * x2 + y2 * y2 + z2 * z2; + return d1 < d2; + } }; -/** \brief Functor class for \e Container3d \e Sort() to sort the search base using content size. */ +/** \brief Functor class for \e Container3d \e Sort() to sort the search base + * using content size. */ template -class Container3dSortSize { +class Container3dSortSize +{ protected: - Container3d &container; + Container3d& container; + public: - Container3dSortSize(Container3d &_container) : container(_container) {} - bool operator()(size_t i1, size_t i2) - { - return (container[i1].second->size() > container[i2].second->size()); - } + Container3dSortSize(Container3d& _container) : container(_container) + { + } + bool operator()(size_t i1, size_t i2) + { + return (container[i1].second->size() > container[i2].second->size()); + } }; -/** \brief Functor class for \e Container3d \e Limit() to limit the search space with distance */ +/** \brief Functor class for \e Container3d \e Limit() to limit the search space + * with distance */ template -class Container3dLimitDist { +class Container3dLimitDist +{ protected: - Container3d &container; - CvPoint3D32f orig; - float dist_limit; + Container3d& container; + CvPoint3D32f orig; + float dist_limit; + public: - Container3dLimitDist(Container3d &_container, const CvPoint3D32f _orig, float _dist_limit) - : container(_container),orig(_orig),dist_limit(_dist_limit) {} - bool operator()(size_t i) const { - float x = container[i].first.x-orig.x; - float y = container[i].first.y-orig.y; - float z = container[i].first.z-orig.z; - float d = x*x + y*y + z*z; - if (d <= dist_limit*dist_limit) return true; - return false; - } + Container3dLimitDist(Container3d& _container, const CvPoint3D32f _orig, + float _dist_limit) + : container(_container), orig(_orig), dist_limit(_dist_limit) + { + } + bool operator()(size_t i) const + { + float x = container[i].first.x - orig.x; + float y = container[i].first.y - orig.y; + float z = container[i].first.z - orig.z; + float d = x * x + y * y + z * z; + if (d <= dist_limit * dist_limit) + return true; + return false; + } }; /** - * \brief Generic container to store any information in 3D (features, photos, ...) + * \brief Generic container to store any information in 3D (features, photos, + * ...) * - * You can store any information in 3D using this container. Each element in the container has - * an unique id that it can be referenced with using \e operator[](). The indices are from 0 - * to \e size(). You can find specific index using \e GetIndex(Iterator &iter) or \e GetIndex(T *p) . + * You can store any information in 3D using this container. Each element in the + * container has an unique id that it can be referenced with using \e + * operator[](). The indices are from 0 to \e size(). You can find specific + * index using \e GetIndex(Iterator &iter) or \e GetIndex(T *p) . * - * In addition the Container3d contains also a 'search space' that can be iterated through - * using \e begin() and \e end(). This 'search space' can be limited using \e Limit() , - * sorted using \e Sort() and reseted using \e ResetSearchSpace(). You specify what to limit/sort - * using specified functors. In ALVAR there exists functors \e Container3dLimitDist , - * \e Container3dSortSize and \e Container3dSortDist . But you can quite well make your own - * versions (see example below). - * - * The implementation is optimized for a situation where there are a lot of searches between every - * time the search space is limited/ordered. This is the reason we use vector containers - * internally. The idea is that later we will improve the class by providing more ways for limiting - * and sorting the search space; for example Frustum culling. + * In addition the Container3d contains also a 'search space' that can be + * iterated through using \e begin() and \e end(). This 'search space' can be + * limited using \e Limit() , sorted using \e Sort() and reseted using \e + * ResetSearchSpace(). You specify what to limit/sort using specified functors. + * In ALVAR there exists functors \e Container3dLimitDist , \e + * Container3dSortSize and \e Container3dSortDist . But you can quite well make + * your own versions (see example below). + * + * The implementation is optimized for a situation where there are a lot of + * searches between every time the search space is limited/ordered. This is the + * reason we use vector containers internally. The idea is that later we will + * improve the class by providing more ways for limiting and sorting the search + * space; for example Frustum culling. * * Usage: * @@ -130,14 +157,14 @@ class Container3dLimitDist { * int x_min, x_max; * Container3d &container; * public: - * Container3dLimitX(Container3d &_container, int _x_min, int _x_max) + * Container3dLimitX(Container3d &_container, int _x_min, int _x_max) * : container(_container),x_min(_x_min),x_max(_x_max) {} * bool operator()(size_t i1) const { - * if ((container[i1].first.x >= x_min) && (container[i1].first.x <= x_max)) return true; - * return false; + * if ((container[i1].first.x >= x_min) && (container[i1].first.x <= x_max)) + * return true; return false; * } * }; - * + * * ... * Container3d c3d; * c3d.Add(CvPoint3D32f(0,0,0), 0); @@ -165,120 +192,175 @@ class Container3dLimitDist { template class Container3d { - public: - /** \brief \e node_type for storing data. 3D-position is paired with the data content. */ - typedef std::pair node_type; - protected: - /** \brief the actual data in using node_type: pair */ - std::vector data; - /** \brief Possibly limited set of indices for \e data in somehow "optimal" search order */ - std::vector search_space; +public: + /** \brief \e node_type for storing data. 3D-position is paired with the data + * content. */ + typedef std::pair node_type; + +protected: + /** \brief the actual data in using node_type: pair */ + std::vector data; + /** \brief Possibly limited set of indices for \e data in somehow "optimal" + * search order */ + std::vector search_space; + +public: + /** \brief Add \e _data in the container and associate it with 3D position \e + * _pos */ + void Add(const CvPoint3D32f& _pos, const T& _data) + { + data.push_back(node_type(_pos, _data)); + search_space.push_back(data.size() - 1); + } + /** \brief Clear the container */ + void Clear() + { + data.clear(); + search_space.clear(); + } + /** \brief Reset the search space to contain whole data */ + void ResetSearchSpace() + { + search_space.resize(data.size()); + for (size_t i = 0; i < search_space.size(); i++) + { + search_space[i] = i; + } + } + /** \brief Erase item in the container */ + void Erase(size_t index) + { + typename std::vector::iterator iter_d; + iter_d = data.begin(); + for (size_t i = 0; i < index; i++) + iter_d++; + data.erase(iter_d); + ResetSearchSpace(); + } + /** \brief Sort using external Compare method */ + template + int Sort(Compare comp) + { + stable_sort(search_space.begin(), search_space.end(), comp); + return search_space.size(); + } - public: - /** \brief Add \e _data in the container and associate it with 3D position \e _pos */ - void Add(const CvPoint3D32f& _pos, const T& _data){ - data.push_back(node_type(_pos, _data)); - search_space.push_back(data.size()-1); - } - /** \brief Clear the container */ - void Clear() { - data.clear(); - search_space.clear(); - } - /** \brief Reset the search space to contain whole data */ - void ResetSearchSpace() { - search_space.resize(data.size()); - for (size_t i=0; i::iterator iter_d; - iter_d = data.begin(); - for (size_t i=0; i - int Sort(Compare comp) { - stable_sort(search_space.begin(), search_space.end(), comp); - return search_space.size(); - } + /** \brief Limit the search space with external limitation */ + template + int Limit(Test test) + { + std::vector::iterator iter; + for (iter = search_space.begin(); iter != search_space.end();) + { + if (!test(*iter)) + { + iter = search_space.erase(iter); + } + else + { + iter++; + } + } + return search_space.size(); + } - /** \brief Limit the search space with external limitation */ - template - int Limit(Test test) { - std::vector::iterator iter; - for (iter = search_space.begin(); iter != search_space.end();) { - if (!test(*iter)) { - iter = search_space.erase(iter); - } else { - iter++; - } - } - return search_space.size(); - } + /** \brief Iterator for going through the items in \e Container3d in the + * specified order + * + * The idea is that the content in \e Container3d can be sorted and limited in + * different ways. After sorting/limiting the content the \e iterator (\e + * Begin() and \e End() ) can be used for accessing the data items in optimal + * order. + */ + class Iterator : public std::iterator + { + protected: + Container3d* container; + std::vector::iterator iter; - /** \brief Iterator for going through the items in \e Container3d in the specified order - * - * The idea is that the content in \e Container3d can be sorted and limited in different - * ways. After sorting/limiting the content the \e iterator (\e Begin() and \e End() ) can - * be used for accessing the data items in optimal order. - */ - class Iterator : public std::iterator - { - protected: - Container3d *container; - std::vector::iterator iter; - public: - Iterator() {} - Iterator(Container3d *_container, std::vector::iterator _iter) : container(_container), iter(_iter) {} - node_type &operator*() const { return container->data[*iter]; } - node_type *operator->() const { return &(operator*()); } - virtual Iterator& operator++() { ++iter; return *this; } - bool operator==(const Iterator& _m) const { return iter == _m.iter; } - bool operator!=(const Iterator& _m) const { return iter != _m.iter; } - size_t GetIndex() { return *iter; } - }; + public: + Iterator() + { + } + Iterator(Container3d* _container, std::vector::iterator _iter) + : container(_container), iter(_iter) + { + } + node_type& operator*() const + { + return container->data[*iter]; + } + node_type* operator->() const + { + return &(operator*()); + } + virtual Iterator& operator++() + { + ++iter; + return *this; + } + bool operator==(const Iterator& _m) const + { + return iter == _m.iter; + } + bool operator!=(const Iterator& _m) const + { + return iter != _m.iter; + } + size_t GetIndex() + { + return *iter; + } + }; - /** \brief Provides an iterator pointing to the beginning of the limited/sorted 3D content */ - Iterator begin() { - return Iterator(this, search_space.begin()); - } + /** \brief Provides an iterator pointing to the beginning of the + * limited/sorted 3D content */ + Iterator begin() + { + return Iterator(this, search_space.begin()); + } - /** \brief Provides an iterator pointing to the end of the limited/sorted 3D content */ - Iterator end() { - return Iterator(this, search_space.end()); - } + /** \brief Provides an iterator pointing to the end of the limited/sorted 3D + * content */ + Iterator end() + { + return Iterator(this, search_space.end()); + } - /** \brief Get number of items that can be referenced using \e operator[]() */ - size_t size() const { return data.size(); } + /** \brief Get number of items that can be referenced using \e operator[]() */ + size_t size() const + { + return data.size(); + } - /** \brief Get absolute reference usable with \e operator[]() based on the iterator */ - size_t GetIndex(Iterator &iter) { - return iter.GetIndex(); - } + /** \brief Get absolute reference usable with \e operator[]() based on the + * iterator */ + size_t GetIndex(Iterator& iter) + { + return iter.GetIndex(); + } - /** \brief Instead of \e Iterator we can use also absolute references for data with \e operator[]() */ - node_type &operator[](size_t index) { - return data[index]; - } + /** \brief Instead of \e Iterator we can use also absolute references for data + * with \e operator[]() */ + node_type& operator[](size_t index) + { + return data[index]; + } - /** \brief Get absolute reference usable with \e operator[]() based on the content */ - size_t GetIndex(T *p) { - size_t i=0; - for (; i #include #include -#include "cv.h" -#include "highgui.h" +#include +#include #include "CaptureFactory.h" using namespace alvar; @@ -17,7 +18,7 @@ using namespace alvar; * Usage: * \code * #include "CvTestbed.h" - * + * * void videocallback(IplImage *image) * { * static IplImage *img_gray=NULL; @@ -25,22 +26,23 @@ using namespace alvar; * assert(image); * if (img_gray == NULL) { * // Following image is toggled visible using key '0' - * img_gray = CvTestbed::Instance().CreateImageWithProto("img_gray", image, 0, 1); + * img_gray = CvTestbed::Instance().CreateImageWithProto("img_gray", + *image, 0, 1); * } * cvCvtColor(image, img_gray, CV_RGB2GRAY); * // TODO: Do your image operations * } * * void main() { - * CvTestbed::Instance().SetVideoCallback(videocallback); // Set video callback - * CvTestbed::Instance().StartVideo("movie.avi"); // Video from avi + * CvTestbed::Instance().SetVideoCallback(videocallback); // Set video + *callback CvTestbed::Instance().StartVideo("movie.avi"); // Video from avi * // CvTestbed::Instance().StartVideo(0) // Video from camera 0 * } * \endcode * * In addition to handling video input from avi or from camera * \e CvTestbed has also functions for creating and showing - * (and automatically releasing) IplImage's. + * (and automatically releasing) IplImage's. * * The \e CvTestbed is made into a simple Meyers singleton: * \code @@ -58,115 +60,134 @@ using namespace alvar; * } * \endcode * - * The only instance of the class is accessed using public + * The only instance of the class is accessed using public * Instance()-interface. All possible constructors and destructors - * are hidden. If more complex singleton approach is needed + * are hidden. If more complex singleton approach is needed * refer to Loki library or the book "Modern C++ Design". */ -class CvTestbed { +class CvTestbed +{ protected: - Capture *cap; - /** \brief Hidden constructor for Singleton */ - CvTestbed(); - /** \brief Hidden copy constructor for Singleton */ - CvTestbed(const CvTestbed&); - /** \brief Hidden copy operator for Singleton */ - CvTestbed& operator=(const CvTestbed&); - /** \brief Hidden destructor for Singleton */ - ~CvTestbed(); - /** \brief Boolean indicating are we still running. We exit from the \e WaitKeys when this is false. */ - bool running; + Capture* cap; + /** \brief Hidden constructor for Singleton */ + CvTestbed(); + /** \brief Hidden copy constructor for Singleton */ + CvTestbed(const CvTestbed&); + /** \brief Hidden copy operator for Singleton */ + CvTestbed& operator=(const CvTestbed&); + /** \brief Hidden destructor for Singleton */ + ~CvTestbed(); + /** \brief Boolean indicating are we still running. We exit from the \e + * WaitKeys when this is false. */ + bool running; + + /** \brief Pointer for the user-defined videocallback. */ + void (*videocallback)(cv::Mat& image); + /** \brief Pointer for the user-defined KEYcallback. */ + int (*keycallback)(int key); + /** \brief The window title for the video view. */ + std::string wintitle; + /** \brief The filename if we are reading an AVI file. */ + std::string filename; + /** \brief Image structure to store the images internally */ + struct Image + { + cv::Mat ipl; + std::string title; + bool visible; + bool release_at_exit; + Image(cv::Mat _ipl, std::string _title, bool _visible, + bool _release_at_exit) + : ipl(std::move(_ipl)) + , title(_title) + , visible(_visible) + , release_at_exit(_release_at_exit) + { + } + }; + /** \brief Vector of images stored internally */ + std::vector images; - /** \brief Pointer for the user-defined videocallback. */ - void (*videocallback)(IplImage *image); - /** \brief Pointer for the user-defined KEYcallback. */ - int (*keycallback)(int key); - /** \brief The window title for the video view. */ - std::string wintitle; - /** \brief The filename if we are reading an AVI file. */ - std::string filename; - /** \brief Image structure to store the images internally */ - struct Image { - IplImage *ipl; - std::string title; - bool visible; - bool release_at_exit; - Image(IplImage *_ipl, std::string _title, bool _visible, bool _release_at_exit) - :ipl(_ipl),title(_title),visible(_visible),release_at_exit(_release_at_exit) {} - }; - /** \brief Vector of images stored internally */ - std::vector images; + /** \brief Video callback called for every frame. This calls user-defined + * videocallback if one exists. */ + static void default_videocallback(cv::Mat& image); + /** \brief \e WaitKeys contains the main loop. */ + void WaitKeys(); + /** \brief \e ShowVisibleImages is called from the videocallback. This shows + * the internally stored images which have the visible-flag on */ + void ShowVisibleImages(); - /** \brief Video callback called for every frame. This calls user-defined videocallback if one exists. */ - static void default_videocallback(IplImage *image); - /** \brief \e WaitKeys contains the main loop. */ - void WaitKeys(); - /** \brief \e ShowVisibleImages is called from the videocallback. This shows the internally stored images which have the visible-flag on */ - void ShowVisibleImages(); public: - //CameraDescription camera_description; - /** - * \brief The one and only instance of CvTestbed is accessed using - * \e CvTestbed::Instance() - */ - static CvTestbed& Instance(); - /** - * \brief Set the videocallback function that will be called for - * every frame. - */ - void SetVideoCallback(void (*_videocallback)(IplImage *image)); - /** - * \brief Sets the keyboard callback function that will be called - * when keyboard is pressed. - * - * The callback should return 0 if it doesn't want the default keyboard - * actions to be made. By default keys '0'-'9' executes - * \e ToggleImageVisible for first 10 IplImage's, while any other key - * will Exit the program. - */ - void SetKeyCallback(int (*_keycallback)(int key)); - /** - * \brief Start video input from given capture device - * \param cap The capture device. If NULL a default capture device is created. - */ - bool StartVideo(Capture *_cap, const char *_wintitle=0/*"Capture"*/); - /** - * \brief Stop video - */ - void StopVideo() { running = false; } - /** - * \brief Sets an existing IplImage to be stored with the given title - * \param title Title for the image - * \param ipl The IplImage to be stored - * \param release_at_exit Boolean indicating should \e CvTestbed automatically release the image at exit - */ - size_t SetImage(const char *title, IplImage *ipl, bool release_at_exit=false); - /** - * \brief Creates an image with given size, depth and channels and stores - * it with a given 'title' (see \e CvTestbed::SetImage) - */ - IplImage *CreateImage(const char *title, CvSize size, int depth, int channels); - /** - * \brief Creates an image based on the given prototype and stores - * it with a given 'title' (see \e CvTestbed::SetImage) - */ - IplImage *CreateImageWithProto(const char *title, IplImage *proto, int depth=0, int channels=0); - /** - * \brief Get a pointer for the stored image based on index number - */ - IplImage *GetImage(size_t index); - /** - * \brief Get an index number of the stored image based on title - */ - size_t GetImageIndex(const char *title); - /** - * \brief Get a pointer for the stored image based on title - */ - IplImage *GetImage(const char *title); - /** - * \brief Toggle the visibility of the stored image - */ - bool ToggleImageVisible(size_t index, int flags=1); + // CameraDescription camera_description; + /** + * \brief The one and only instance of CvTestbed is accessed using + * \e CvTestbed::Instance() + */ + static CvTestbed& Instance(); + /** + * \brief Set the videocallback function that will be called for + * every frame. + */ + void SetVideoCallback(void (*_videocallback)(cv::Mat& image)); + /** + * \brief Sets the keyboard callback function that will be called + * when keyboard is pressed. + * + * The callback should return 0 if it doesn't want the default keyboard + * actions to be made. By default keys '0'-'9' executes + * \e ToggleImageVisible for first 10 IplImage's, while any other key + * will Exit the program. + */ + void SetKeyCallback(int (*_keycallback)(int key)); + /** + * \brief Start video input from given capture device + * \param cap The capture device. If NULL a default capture device is created. + */ + bool StartVideo(Capture* _cap, const char* _wintitle = 0 /*"Capture"*/); + /** + * \brief Stop video + */ + void StopVideo() + { + running = false; + } + /** + * \brief Sets an existing IplImage to be stored with the given title + * \param title Title for the image + * \param ipl The IplImage to be stored + * \param release_at_exit Boolean indicating should \e CvTestbed automatically + * release the image at exit + */ + size_t SetImage(const char* title, const cv::Mat& ipl, + bool release_at_exit = false); + /** + * \brief Creates an image with given size, depth and channels and stores + * it with a given 'title' (see \e CvTestbed::SetImage) + */ + cv::Mat CreateImage(const char* title, cv::Size size, int depth, + int channels); + /** + * \brief Creates an image based on the given prototype and stores + * it with a given 'title' (see \e CvTestbed::SetImage) + */ + cv::Mat CreateImageWithProto(const char* title, cv::Mat& proto, int depth = 0, + int channels = 0); + /** + * \brief Get a pointer for the stored image based on index number + */ + cv::Mat GetImage(size_t index); + /** + * \brief Get an index number of the stored image based on title + */ + size_t GetImageIndex(const char* title); + /** + * \brief Get a pointer for the stored image based on title + */ + cv::Mat GetImage(const char* title); + /** + * \brief Toggle the visibility of the stored image + */ + bool ToggleImageVisible(size_t index, int flags = 1); }; #endif diff --git a/ar_track_alvar/include/ar_track_alvar/DirectoryIterator.h b/ar_track_alvar/include/ar_track_alvar/DirectoryIterator.h old mode 100755 new mode 100644 index a56086c..4520d6e --- a/ar_track_alvar/include/ar_track_alvar/DirectoryIterator.h +++ b/ar_track_alvar/include/ar_track_alvar/DirectoryIterator.h @@ -34,57 +34,58 @@ #include -namespace alvar { - +namespace alvar +{ class DirectoryIteratorPrivate; /** * \brief DirectoryIterator for iterating over files and directories. * - * DirectoryIterator class for iterating over files and directories on the filesystem. + * DirectoryIterator class for iterating over files and directories on the + * filesystem. */ class ALVAR_EXPORT DirectoryIterator { public: - /** - * \brief Constructor. - * - * \param path The path on the filesystem to iterate. - */ - DirectoryIterator(const std::string &path); + /** + * \brief Constructor. + * + * \param path The path on the filesystem to iterate. + */ + DirectoryIterator(const std::string& path); - /** - * \brief Destructor. - */ - ~DirectoryIterator(); + /** + * \brief Destructor. + */ + ~DirectoryIterator(); - /** - * \brief Verifies if another entry exist in the directory. - */ - bool hasNext(); + /** + * \brief Verifies if another entry exist in the directory. + */ + bool hasNext(); - /** - * \brief Advances the iterator and returns the name of the next entry. - */ - std::string next(); + /** + * \brief Advances the iterator and returns the name of the next entry. + */ + std::string next(); - /** - * \brief Returns the name of the current entry. - */ - std::string currentEntry(); + /** + * \brief Returns the name of the current entry. + */ + std::string currentEntry(); - /** - * \brief Returns the path of the current entry. - * - * This appends the name of the current entry to the path of the directory that - * is being iterated. - */ - std::string currentPath(); + /** + * \brief Returns the path of the current entry. + * + * This appends the name of the current entry to the path of the directory + * that is being iterated. + */ + std::string currentPath(); private: - DirectoryIteratorPrivate *d; + DirectoryIteratorPrivate* d; }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/DirectoryIterator_private.h b/ar_track_alvar/include/ar_track_alvar/DirectoryIterator_private.h old mode 100755 new mode 100644 index ea3dbf7..ec7009a --- a/ar_track_alvar/include/ar_track_alvar/DirectoryIterator_private.h +++ b/ar_track_alvar/include/ar_track_alvar/DirectoryIterator_private.h @@ -26,25 +26,25 @@ #include -namespace alvar { - +namespace alvar +{ class DirectoryIteratorPrivateData; class DirectoryIteratorPrivate { public: - DirectoryIteratorPrivate(const std::string &path); - ~DirectoryIteratorPrivate(); - bool hasNext(); - std::string next(); - void skip(); - - DirectoryIteratorPrivateData *d; - std::string mDirectory; - std::string mEntry; - bool mValid; + DirectoryIteratorPrivate(const std::string& path); + ~DirectoryIteratorPrivate(); + bool hasNext(); + std::string next(); + void skip(); + + DirectoryIteratorPrivateData* d; + std::string mDirectory; + std::string mEntry; + bool mValid; }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/Draw.h b/ar_track_alvar/include/ar_track_alvar/Draw.h index c2b7978..2122ac8 100644 --- a/ar_track_alvar/include/ar_track_alvar/Draw.h +++ b/ar_track_alvar/include/ar_track_alvar/Draw.h @@ -36,140 +36,165 @@ #include "Camera.h" #include "Line.h" #include +#include -namespace alvar { - +namespace alvar +{ /** \brief Draws the bounding box of a connected component (Blob). * \param image Pointer to the destination image. * \param points Vector of points that determine the bounding box. - * \param color Use CV_RGB(red,green,blue) to determine the color of the bounding box. - * \param label A label to show in the center of the bounding box. + * \param color Use CV_RGB(red,green,blue) to determine the color of the + * bounding box. \param label A label to show in the center of the bounding box. */ -template -void inline DrawBB(IplImage *image, const std::vector& points, CvScalar color, std::string label="") +template +void inline DrawBB(cv::Mat& image, const std::vector& points, + const cv::Scalar& color, const std::string& label = "") { - if (points.size() < 2) { - return; - } - PointType minimum = PointType(image->width, image->height); - PointType maximum = PointType(0, 0); - for (int i = 0; i < (int)points.size(); ++i) { - PointType current = points.at(i); - if (current.x < minimum.x) minimum.x = current.x; - if (current.x > maximum.x) maximum.x = current.x; - if (current.y < minimum.y) minimum.y = current.y; - if (current.y > maximum.y) maximum.y = current.y; - } - cvLine(image, cvPoint((int)minimum.x,(int)minimum.y), cvPoint((int)maximum.x,(int)minimum.y), color); - cvLine(image, cvPoint((int)maximum.x,(int)minimum.y), cvPoint((int)maximum.x,(int)maximum.y), color); - cvLine(image, cvPoint((int)maximum.x,(int)maximum.y), cvPoint((int)minimum.x,(int)maximum.y), color); - cvLine(image, cvPoint((int)minimum.x,(int)maximum.y), cvPoint((int)minimum.x,(int)minimum.y), color); - if (!label.empty()) { - CvFont font; - cvInitFont(&font, 0, 0.5, 0.5, 0); - cvPutText(image, label.c_str(), cvPoint((int)minimum.x+1,(int)minimum.y+2), &font, CV_RGB(255,255,0)); - } + if (points.size() < 2) + { + return; + } + PointType minimum = PointType(image.cols, image.rows); + PointType maximum = PointType(0, 0); + for (int i = 0; i < (int)points.size(); ++i) + { + PointType current = points.at(i); + if (current.x < minimum.x) + minimum.x = current.x; + if (current.x > maximum.x) + maximum.x = current.x; + if (current.y < minimum.y) + minimum.y = current.y; + if (current.y > maximum.y) + maximum.y = current.y; + } + cv::line(image, cv::Point((int)minimum.x, (int)minimum.y), + cv::Point((int)maximum.x, (int)minimum.y), color); + cv::line(image, cv::Point((int)maximum.x, (int)minimum.y), + cv::Point((int)maximum.x, (int)maximum.y), color); + cv::line(image, cv::Point((int)maximum.x, (int)maximum.y), + cv::Point((int)minimum.x, (int)maximum.y), color); + cv::line(image, cv::Point((int)minimum.x, (int)maximum.y), + cv::Point((int)minimum.x, (int)minimum.y), color); + if (!label.empty()) + { + cv::putText(image, label, cv::Point((int)minimum.x + 1, (int)minimum.y + 2), + 0, 0.5, CV_RGB(255, 255, 0)); + } } -/** \brief Draws a set of points. - * \param image Pointer to the destination image. - * \param points Array of CvPoints to be visualzed. - * \param color Use CV_RGB(red,green,blue) to determine the color. -*/ -void ALVAR_EXPORT DrawPoints(IplImage *image, const std::vector& points, CvScalar color); - /** \brief Draws lines between consecutive points stored in vector (polyline). - * \param image Pointer to the destination image. - * \param points Vector of points that determine the polyline. - * \param color Use CV_RGB(red,green,blue) to determine the color. - * \param loop If true, the polyline is closed. -*/ -template -void inline DrawLines(IplImage *image, const std::vector& points, CvScalar color, bool loop=true) + * \param image Pointer to the destination image. + * \param points Vector of points that determine the polyline. + * \param color Use CV_RGB(red,green,blue) to determine the color. + * \param loop If true, the polyline is closed. + */ +template +void inline DrawLines(cv::Mat& image, const std::vector& points, + const cv::Scalar& color, bool loop = true) { - for(unsigned i = 1; i < points.size(); ++i) - cvLine(image, cvPoint((int)points[i-1].x,(int)points[i-1].y), cvPoint((int)points[i].x,(int)points[i].y), color); - if (loop) { - cvLine(image, cvPoint((int)points[points.size()-1].x,(int)points[points.size()-1].y), cvPoint((int)points[0].x,(int)points[0].y), color); - } + for (unsigned i = 1; i < points.size(); ++i) + cv::line(image, cv::Point((int)points[i - 1].x, (int)points[i - 1].y), + cv::Point((int)points[i].x, (int)points[i].y), color); + if (loop) + { + cv::line(image, + cv::Point((int)points[points.size() - 1].x, + (int)points[points.size() - 1].y), + cv::Point((int)points[0].x, (int)points[0].y), color); + } } /** \brief Draws a line. - * \param image Pointer to the destination image. - * \param line Line struct to be drawn. - * \param color Use CV_RGB(red,green,blue) to determine the color. -*/ -void ALVAR_EXPORT DrawLine(IplImage* image, const Line line, CvScalar color = CV_RGB(0,255,0)); + * \param image Pointer to the destination image. + * \param line Line struct to be drawn. + * \param color Use CV_RGB(red,green,blue) to determine the color. + */ +void ALVAR_EXPORT DrawLine(cv::Mat& image, const Line line, + const cv::Scalar& color = CV_RGB(0, 255, 0)); /** \brief Draws points of the contour that is obtained by \e Labeling class. - * \param image Pointer to the destination image. - * \param contour Controur sequence. - * \param color Use CV_RGB(red,green,blue) to determine the color. -*/ -void ALVAR_EXPORT DrawPoints(IplImage* image, const CvSeq* contour, CvScalar color = CV_RGB(255,0,0)); - - -/** \brief Draws circles to the contour points that are obtained by \e Labeling class. - * \param image Pointer to the destination image. - * \param contour Controur sequence. - * \param radius Circle radius in pixels. - * \param color Use CV_RGB(red,green,blue) to determine the color. -*/ -void ALVAR_EXPORT DrawCircles(IplImage* image, const CvSeq* contour, int radius, CvScalar color = CV_RGB(255,0,0)); + * \param image Pointer to the destination image. + * \param contour Controur sequence. + * \param color Use CV_RGB(red,green,blue) to determine the color. + */ +void ALVAR_EXPORT DrawPoints(cv::Mat& image, + const std::vector& contour, + const cv::Scalar& color = CV_RGB(255, 0, 0)); + +/** \brief Draws circles to the contour points that are obtained by \e Labeling + * class. \param image Pointer to the destination image. \param contour + * Controur sequence. \param radius Circle radius in pixels. \param color Use + * CV_RGB(red,green,blue) to determine the color. + */ +void ALVAR_EXPORT DrawCircles(cv::Mat& image, + const std::vector& contour, int radius, + const cv::Scalar& color = CV_RGB(255, 0, 0)); /** \brief Draws lines between consecutive contour points. - * \param image Pointer to the destination image. - * \param contour Controur sequence. - * \param color Use CV_RGB(red,green,blue) to determine the color. -*/ -void ALVAR_EXPORT DrawLines(IplImage* image, const CvSeq* contour, CvScalar color = CV_RGB(255,0,0)); + * \param image Pointer to the destination image. + * \param contour Controur sequence. + * \param color Use CV_RGB(red,green,blue) to determine the color. + */ +void ALVAR_EXPORT DrawLines(cv::Mat& image, + const std::vector& contour, + const cv::Scalar& color = CV_RGB(255, 0, 0)); /** \brief Draws circles to the array of points. - * \param image Pointer to the destination image. - * \param points Vector of points to be visualized. - * \param color Use CV_RGB(red,green,blue) to determine the color. - * \param radius Circle radius in pixels. -*/ -template -void inline DrawPoints(IplImage *image, const std::vector& points, CvScalar color, int radius=1) + * \param image Pointer to the destination image. + * \param points Vector of points to be visualized. + * \param color Use CV_RGB(red,green,blue) to determine the color. + * \param radius Circle radius in pixels. + */ +template +void inline DrawPoints(cv::Mat& image, const std::vector& points, + const cv::Scalar& color, int radius = 1) { - for(unsigned i = 0; i < points.size(); ++i) - cvCircle(image, cvPoint((int)points[i].x,(int)points[i].y), radius, color); + for (unsigned i = 0; i < points.size(); ++i) + cv::circle(image, cv::Point((int)points[i].x, (int)points[i].y), radius, + color); } /** \brief Draws OpenCV ellipse. - * \param image Pointer to the destination image. - * \param ellipse Ellipse struct in OpenCV format. - * \param color Use CV_RGB(red,green,blue) to determine the color. - * \param fill If false, only the outline is drawn. - * \param par The ellipse width and height are grown by \e par pixels. -*/ -void ALVAR_EXPORT DrawCVEllipse(IplImage* image, CvBox2D& ellipse, CvScalar color, bool fill=false, double par=0); - -/** \brief This function is used to construct a texture image which is needed to hide a marker from the original video frame. See \e SampleMarkerHide.cpp for example implementation. - * \param image Pointer to the original video frame from where the hiding texture is calculated. - * \param hide_texture Pointer to the destination image where the resulting texture is stored. - * \param cam Camera object that is used for marker tracking. - * \param gl_modelview Current model view matrix. - * \param topleft Top left limit of the texture area in marker coordinate frame. - * \param botright Bottom right limit of the texture area in marker coordinate frame. + * \param image Pointer to the destination image. + * \param ellipse Ellipse struct in OpenCV format. + * \param color Use CV_RGB(red,green,blue) to determine the color. + * \param fill If false, only the outline is drawn. + * \param par The ellipse width and height are grown by \e par pixels. */ -void ALVAR_EXPORT BuildHideTexture(IplImage *image, IplImage *hide_texture, - Camera *cam, double gl_modelview[16], - PointDouble topleft, PointDouble botright); - -/** \brief Draws the texture generated by \e BuildHideTexture to given video frame. For better performance, use OpenGL instead. See \e SampleMarkerHide.cpp for example implementation. - * \param image Pointer to the destination image where the texture is drawn. - * \param texure Pointer to the texture image genereated by \e BuildHideTexture. - * \param cam Camera object that is used for marker tracking. - * \param gl_modelview Current model view matrix. - * \param topleft Top left limit of the texture area in marker coordinate frame. - * \param botright Bottom right limit of the texture area in marker coordinate frame. +void ALVAR_EXPORT DrawCVEllipse(cv::Mat& image, const cv::RotatedRect& ellipse, + const cv::Scalar& color, bool fill = false, + double par = 0); + +/** \brief This function is used to construct a texture image which is needed to + * hide a marker from the original video frame. See \e SampleMarkerHide.cpp for + * example implementation. \param image Pointer to the original video frame + * from where the hiding texture is calculated. \param hide_texture Pointer to + * the destination image where the resulting texture is stored. \param cam + * Camera object that is used for marker tracking. \param gl_modelview Current + * model view matrix. \param topleft Top left limit of the texture area in + * marker coordinate frame. \param botright Bottom right limit of the texture + * area in marker coordinate frame. + */ +void ALVAR_EXPORT BuildHideTexture(cv::Mat& image, cv::Mat& hide_texture, + Camera* cam, double gl_modelview[16], + PointDouble topleft, PointDouble botright); + +/** \brief Draws the texture generated by \e BuildHideTexture to given video + * frame. For better performance, use OpenGL instead. See \e + * SampleMarkerHide.cpp for example implementation. \param image Pointer to + * the destination image where the texture is drawn. \param texure Pointer to + * the texture image genereated by \e BuildHideTexture. \param cam Camera + * object that is used for marker tracking. \param gl_modelview Current model + * view matrix. \param topleft Top left limit of the texture area in marker + * coordinate frame. + * \param botright Bottom right limit of the texture area in marker + * coordinate frame. */ -void ALVAR_EXPORT DrawTexture(IplImage *image, IplImage *texture, - Camera *cam, double gl_modelview[16], - PointDouble topleft, PointDouble botright); +void ALVAR_EXPORT DrawTexture(cv::Mat& image, cv::Mat& texture, Camera* cam, + double gl_modelview[16], PointDouble topleft, + PointDouble botright); -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/EC.h b/ar_track_alvar/include/ar_track_alvar/EC.h index b2cb36c..3a6438b 100644 --- a/ar_track_alvar/include/ar_track_alvar/EC.h +++ b/ar_track_alvar/include/ar_track_alvar/EC.h @@ -36,30 +36,32 @@ #include "MarkerDetector.h" #include "MultiMarker.h" -namespace alvar { - +namespace alvar +{ /** \brief Basic structure to be usable with EC methods. * - * You can inherit your own classes/structures from this or make one with similar members. + * You can inherit your own classes/structures from this or make one with + * similar members. * - * The idea is that the EC-versions store the data externally in a map of features that - * are based on the structure \e ExternalContainer . The \e ExternalContainer has certain - * predefined fields that are used/needed by different methods. Below is a summary of - * some main methods for reading (r) and writing (w) in the external container. + * The idea is that the EC-versions store the data externally in a map of + * features that are based on the structure \e ExternalContainer . The \e + * ExternalContainer has certain predefined fields that are used/needed by + * different methods. Below is a summary of some main methods for reading (r) + * and writing (w) in the external container. * - * - * - * - * - * - * - * - * - * - * - * - * - * + *
type_idhas_p2dhas_p3dp2dp3dprojected_p2d
TrackerFeaturesEC::Track(r)w
+ * + * + * + * + * + * + * + * + * + * + * + * * * * @@ -67,8 +69,8 @@ namespace alvar { * * * - * - * + * + * * * * @@ -76,8 +78,8 @@ namespace alvar { * * * - * - * + * + * * * * @@ -85,8 +87,8 @@ namespace alvar { * * * - * - * + * + * * * * @@ -94,8 +96,8 @@ namespace alvar { * * * - * - * + * + * * * * @@ -103,8 +105,8 @@ namespace alvar { * * * - * - * + * + * * * * @@ -112,8 +114,8 @@ namespace alvar { * * * - * - * + * + * * * * @@ -121,8 +123,8 @@ namespace alvar { * * * - * - * + * + * * * * @@ -130,8 +132,8 @@ namespace alvar { * * * - * - * + * + * * * * @@ -139,670 +141,899 @@ namespace alvar { * * * - * - * + * + * * * * * * * - *
type_idhas_p2dhas_p3dp2dp3dprojected_p2d
TrackerFeaturesEC::Track(r)wrwrw
CameraEC::Undistort(r)CameraEC::Undistort(r)rwrw
CameraEC::Distort(r)CameraEC::Distort(r)rwrw
CameraEC::CalcExteriorOrientation(r)CameraEC::CalcExteriorOrientation(r)rrr
CameraEC::UpdatePose(r)CameraEC::UpdatePose(r)rrr
CameraEC::Reproject(r)CameraEC::Reproject(r)(r)(r)rw
CameraEC::EraseUsingReprojectionError(r)CameraEC::EraseUsingReprojectionError(r)(r)(r)r
MarkerDetectorEC::DetectrwMarkerDetectorEC::Detectrwww
MarkerDetectorEC::MarkerToECwMarkerDetectorEC::MarkerToECww
MultiMarkerEC::MarkersToECwMultiMarkerEC::MarkersToECwww
+ * */ -class ExternalContainer { +class ExternalContainer +{ public: - int type_id; - bool has_p2d; - bool has_p3d; - CvPoint2D32f p2d; - CvPoint3D32f p3d; - CvPoint2D32f projected_p2d; // This is only temporary -- user needs to take care that it has valid content - ExternalContainer() : type_id(-1), has_p2d(false), has_p3d(false) {} - ExternalContainer(const ExternalContainer &c) { - type_id = c.type_id; - has_p2d = c.has_p2d; - has_p3d = c.has_p3d; - p2d.x = c.p2d.x; - p2d.y = c.p2d.y; - p3d.x = c.p3d.x; - p3d.y = c.p3d.y; - p3d.z = c.p3d.z; - projected_p2d.x = c.projected_p2d.x; - projected_p2d.y = c.projected_p2d.y; - } + int type_id; + bool has_p2d; + bool has_p3d; + CvPoint2D32f p2d; + CvPoint3D32f p3d; + CvPoint2D32f projected_p2d; // This is only temporary -- user needs to take + // care that it has valid content + ExternalContainer() : type_id(-1), has_p2d(false), has_p3d(false) + { + } + ExternalContainer(const ExternalContainer& c) + { + type_id = c.type_id; + has_p2d = c.has_p2d; + has_p3d = c.has_p3d; + p2d.x = c.p2d.x; + p2d.y = c.p2d.y; + p3d.x = c.p3d.x; + p3d.y = c.p3d.y; + p3d.z = c.p3d.z; + projected_p2d.x = c.projected_p2d.x; + projected_p2d.y = c.projected_p2d.y; + } }; -/** \brief This is a default functor for testing which items in the container should be handled by each method +/** \brief This is a default functor for testing which items in the container + * should be handled by each method * - * By default this only tests that the only the \e type_id. If you specify \e type_id that is not -1 - * then the functor tests that the item's \e type_id matches. Naturally you can make whatever tests - * in your own functors. + * By default this only tests that the only the \e type_id. If you specify \e + * type_id that is not -1 then the functor tests that the item's \e type_id + * matches. Naturally you can make whatever tests in your own functors. */ -template -class DoHandleTest { +template +class DoHandleTest +{ protected: - int type_id; - bool needs_has_p2d; - bool needs_has_p3d; + int type_id; + bool needs_has_p2d; + bool needs_has_p3d; + public: - DoHandleTest(int _type_id=-1, bool _needs_has_p2d=false, bool _needs_has_p3d=false) - : type_id(_type_id), needs_has_p2d(_needs_has_p2d), needs_has_p3d(_needs_has_p3d) {} - virtual bool operator()(const T &f) const { - if (needs_has_p2d && !f.has_p2d) return false; - if (needs_has_p3d && !f.has_p3d) return false; - // if (f.type_id == -1) return false; // Unnecessary? - if ((type_id != -1) && (type_id != f.type_id)) return false; - return true; - } + DoHandleTest(int _type_id = -1, bool _needs_has_p2d = false, + bool _needs_has_p3d = false) + : type_id(_type_id) + , needs_has_p2d(_needs_has_p2d) + , needs_has_p3d(_needs_has_p3d) + { + } + virtual bool operator()(const T& f) const + { + if (needs_has_p2d && !f.has_p2d) + return false; + if (needs_has_p3d && !f.has_p3d) + return false; + // if (f.type_id == -1) return false; // Unnecessary? + if ((type_id != -1) && (type_id != f.type_id)) + return false; + return true; + } }; -/** \brief This is default functor for testing which items in the container should be erased +/** \brief This is default functor for testing which items in the container + * should be erased * - * By default this can test for \e has_p2d and \e has_p3d . Furthermore, it can test for - * reprojection error when \e Pose , \e Camera and limit are given. + * By default this can test for \e has_p2d and \e has_p3d . Furthermore, it can + * test for reprojection error when \e Pose , \e Camera and limit are given. */ -template -class DoEraseTest { +template +class DoEraseTest +{ protected: - int type_id; - bool erase_without_p2d; - bool erase_without_p3d; - bool test_reprojection; - float limit_sq; + int type_id; + bool erase_without_p2d; + bool erase_without_p3d; + bool test_reprojection; + float limit_sq; + public: - DoEraseTest(bool _erase_without_p2d, bool _erase_without_p3d, int _type_id=-1) - : type_id(_type_id), erase_without_p2d(_erase_without_p2d), erase_without_p3d(_erase_without_p3d), - test_reprojection(false), limit_sq(0.f) {} - DoEraseTest(float _limit, bool _erase_without_p2d=false, bool _erase_without_p3d=false, int _type_id=-1) - : type_id(_type_id), erase_without_p2d(_erase_without_p2d), erase_without_p3d(_erase_without_p3d), - test_reprojection(true), limit_sq(_limit*_limit) {} - virtual bool operator()(const T &f) const { - if ((type_id != -1) && (type_id != f.type_id)) return false; - if (erase_without_p2d && !f.has_p2d) return true; - if (erase_without_p3d && !f.has_p3d) return true; - if (test_reprojection) { - if (!f.has_p2d) return false; - if (!f.has_p3d) return false; - // Note that the projected_p2d needs to have valid content at this point - double dist_sq = (f.p2d.x-f.projected_p2d.x)*(f.p2d.x-f.projected_p2d.x); - dist_sq += (f.p2d.y-f.projected_p2d.y)*(f.p2d.y-f.projected_p2d.y); - if (dist_sq > limit_sq) - return true; - } - return false; - } + DoEraseTest(bool _erase_without_p2d, bool _erase_without_p3d, + int _type_id = -1) + : type_id(_type_id) + , erase_without_p2d(_erase_without_p2d) + , erase_without_p3d(_erase_without_p3d) + , test_reprojection(false) + , limit_sq(0.f) + { + } + DoEraseTest(float _limit, bool _erase_without_p2d = false, + bool _erase_without_p3d = false, int _type_id = -1) + : type_id(_type_id) + , erase_without_p2d(_erase_without_p2d) + , erase_without_p3d(_erase_without_p3d) + , test_reprojection(true) + , limit_sq(_limit * _limit) + { + } + virtual bool operator()(const T& f) const + { + if ((type_id != -1) && (type_id != f.type_id)) + return false; + if (erase_without_p2d && !f.has_p2d) + return true; + if (erase_without_p3d && !f.has_p3d) + return true; + if (test_reprojection) + { + if (!f.has_p2d) + return false; + if (!f.has_p3d) + return false; + // Note that the projected_p2d needs to have valid content at this point + double dist_sq = + (f.p2d.x - f.projected_p2d.x) * (f.p2d.x - f.projected_p2d.x); + dist_sq += (f.p2d.y - f.projected_p2d.y) * (f.p2d.y - f.projected_p2d.y); + if (dist_sq > limit_sq) + return true; + } + return false; + } }; /** \brief Erasing items from container using \e DoEraseTest type functor */ -template -inline int EraseItemsEC(std::map &container, F do_erase_test) { - int count=0; - typename std::map::iterator iter = container.begin(); - while(iter != container.end()) { - T &f = iter->second; - if (do_erase_test(f)) { - container.erase(iter++); - count++; - } else iter++; - } - return count; +template +inline int EraseItemsEC(std::map& container, F do_erase_test) +{ + int count = 0; + typename std::map::iterator iter = container.begin(); + while (iter != container.end()) + { + T& f = iter->second; + if (do_erase_test(f)) + { + container.erase(iter++); + count++; + } + else + iter++; + } + return count; } /** \brief Version of \e TrackerFeatures using external container */ -class TrackerFeaturesEC : public TrackerFeatures { +class TrackerFeaturesEC : public TrackerFeatures +{ protected: - bool purge; + bool purge; + public: - /** \brief Constructor */ - TrackerFeaturesEC(int _max_features=100, int _min_features=90, double _quality_level=0.01, double _min_distance=10, int _pyr_levels=4, int win_size=6) - : TrackerFeatures(_max_features, _min_features, _quality_level, _min_distance, _pyr_levels, win_size), purge(false) {} - - /** \brief Track features with matching type id. New features will have id's in the specified id range. */ - template - bool Track(IplImage *img, IplImage *mask, std::map &container, int type_id=-1, int first_id=-1, int last_id=-1) - { - DoHandleTest do_handle_test_default(type_id); - if (type_id == -1) type_id=0; - return Track(img, mask, container, do_handle_test_default, type_id, first_id, last_id); - } - - /** \brief Track features matching the given functor. New features will have id's in the specified id range. */ - /*template - bool Track(IplImage *img, IplImage *mask, std::map &container, F do_handle_test, int type_id=0, int first_id=0, int last_id=65535) - { - // Update features to match the ones in the given container - typename std::map::iterator iter = container.begin(); - typename std::map::iterator iter_end = container.end(); - feature_count = 0; - for (;iter != iter_end; iter++) { - T &f = iter->second; - if (!do_handle_test(f)) continue; - if (f.has_p2d != true) continue; - f.has_p2d = false; // This is updated again to true if tracking succeeds - features[feature_count] = f.p2d; - ids[feature_count] = iter->first; - feature_count++; - if (feature_count == max_features) break; - } - // Check that next_id is ok - if (next_id < first_id) next_id = first_id; - if (next_id > last_id) return false; // TODO: Make some better solution for this - // Purge if needed - if (purge) { - TrackerFeatures::Purge(); - purge=false; - } - // Track as usual (this will swap above features to prev_features) - TrackHid(img, mask); - // Update the container to have the updated features - for (int i=0; i= last_id) break; - T &f = container[id]; - f.type_id = type_id; - f.has_p2d = true; - f.p2d = features[i]; - } - return true; - }*/ - - /** \brief Track features matching the given functor. If first_id >= 0 we call \e AddFeatures with the specified id range. */ - template - bool Track(IplImage *img, IplImage *mask, std::map &container, F do_handle_test, int type_id=0, int first_id=-1, int last_id=-1) - { - // When first_id && last_id are < 0 then we don't add new features... - if (first_id < 0) last_id = -1; - // Update features to match the ones in the given container - typename std::map::iterator iter = container.begin(); - typename std::map::iterator iter_end = container.end(); - feature_count = 0; - for (;iter != iter_end; iter++) { - T &f = iter->second; - if (!do_handle_test(f)) continue; - if (f.has_p2d != true) continue; - f.has_p2d = false; // This is updated again to true if tracking succeeds - features[feature_count] = f.p2d; - ids[feature_count] = iter->first; - feature_count++; - if (feature_count == max_features) break; - } - // Purge if needed - if (purge) { - TrackerFeatures::Purge(); - purge=false; - } - if (first_id < 0) { - // Track as usual (this will swap above features to prev_features) - TrackHid(img, mask, false); - } else { - // Check that next_id is ok - if (next_id < first_id) next_id = first_id; - if (next_id > last_id) return false; // TODO: Make some better solution for this - // Track as usual (this will swap above features to prev_features) - TrackHid(img, mask, true); - } - // Update the container to have the updated features - for (int i=0; i= 0 && id >= last_id) break; - T &f = container[id]; - f.type_id = type_id; - f.has_p2d = true; - f.p2d = features[i]; - } - return true; - } - - /** \brief add features to the previously tracked frame if there are less than min_features */ - template - bool AddFeatures(std::map &container, int type_id=0, int first_id=0, int last_id=65535) - { - if (TrackerFeatures::AddFeatures() == 0) return false; - // Update the container to have the updated features - for (int i=0; i= 0 && id >= last_id) break; - T &f = container[id]; - f.type_id = type_id; - f.has_p2d = true; - f.p2d = features[i]; - } - return true; - } - - /** \brief Erases the items matching with \e type_id having \e has_p2d == false . If \e type_id == -1 doesn't test the type. */ - template - int EraseNonTracked(std::map &container, int type_id=-1) - { - DoEraseTest do_erase_test(true, false, type_id); - return EraseItemsEC(container, do_erase_test); - } - /** \brief Purge features that are considerably closer than the defined min_distance. - * - * Note, that we always try to maintain the smaller id's assuming that they are older ones - */ - void Purge() { purge=true; } - void Reset() { throw alvar::AlvarException("Method not supported"); } - double Reset(IplImage *img, IplImage *mask) { throw alvar::AlvarException("Method not supported"); } - bool DelFeature(int index) { throw alvar::AlvarException("Method not supported"); } - bool DelFeatureId(int id) { throw alvar::AlvarException("Method not supported"); } + /** \brief Constructor */ + TrackerFeaturesEC(int _max_features = 100, int _min_features = 90, + double _quality_level = 0.01, double _min_distance = 10, + int _pyr_levels = 4, int win_size = 6) + : TrackerFeatures(_max_features, _min_features, _quality_level, + _min_distance, _pyr_levels, win_size) + , purge(false) + { + } + + /** \brief Track features with matching type id. New features will have id's + * in the specified id range. */ + template + bool Track(IplImage* img, IplImage* mask, std::map& container, + int type_id = -1, int first_id = -1, int last_id = -1) + { + DoHandleTest do_handle_test_default(type_id); + if (type_id == -1) + type_id = 0; + return Track(img, mask, container, do_handle_test_default, type_id, + first_id, last_id); + } + + /** \brief Track features matching the given functor. New features will have + * id's in the specified id range. */ + /*template + bool Track(IplImage *img, IplImage *mask, std::map &container, F + do_handle_test, int type_id=0, int first_id=0, int last_id=65535) + { + // Update features to match the ones in the given container + typename std::map::iterator iter = container.begin(); + typename std::map::iterator iter_end = container.end(); + feature_count = 0; + for (;iter != iter_end; iter++) { + T &f = iter->second; + if (!do_handle_test(f)) continue; + if (f.has_p2d != true) continue; + f.has_p2d = false; // This is updated again to true if tracking succeeds + features[feature_count] = f.p2d; + ids[feature_count] = iter->first; + feature_count++; + if (feature_count == max_features) break; + } + // Check that next_id is ok + if (next_id < first_id) next_id = first_id; + if (next_id > last_id) return false; // TODO: Make some better solution for + this + // Purge if needed + if (purge) { + TrackerFeatures::Purge(); + purge=false; + } + // Track as usual (this will swap above features to prev_features) + TrackHid(img, mask); + // Update the container to have the updated features + for (int i=0; i= last_id) break; + T &f = container[id]; + f.type_id = type_id; + f.has_p2d = true; + f.p2d = features[i]; + } + return true; + }*/ + + /** \brief Track features matching the given functor. If first_id >= 0 we call + * \e AddFeatures with the specified id range. */ + template + bool Track(IplImage* img, IplImage* mask, std::map& container, + F do_handle_test, int type_id = 0, int first_id = -1, + int last_id = -1) + { + // When first_id && last_id are < 0 then we don't add new features... + if (first_id < 0) + last_id = -1; + // Update features to match the ones in the given container + typename std::map::iterator iter = container.begin(); + typename std::map::iterator iter_end = container.end(); + feature_count = 0; + for (; iter != iter_end; iter++) + { + T& f = iter->second; + if (!do_handle_test(f)) + continue; + if (f.has_p2d != true) + continue; + f.has_p2d = false; // This is updated again to true if tracking succeeds + features[feature_count] = f.p2d; + ids[feature_count] = iter->first; + feature_count++; + if (feature_count == max_features) + break; + } + // Purge if needed + if (purge) + { + TrackerFeatures::Purge(); + purge = false; + } + if (first_id < 0) + { + // Track as usual (this will swap above features to prev_features) + TrackHid(img, mask, false); + } + else + { + // Check that next_id is ok + if (next_id < first_id) + next_id = first_id; + if (next_id > last_id) + return false; // TODO: Make some better solution for this + // Track as usual (this will swap above features to prev_features) + TrackHid(img, mask, true); + } + // Update the container to have the updated features + for (int i = 0; i < feature_count; i++) + { + int id = ids[i]; + if (last_id >= 0 && id >= last_id) + break; + T& f = container[id]; + f.type_id = type_id; + f.has_p2d = true; + f.p2d = features[i]; + } + return true; + } + + /** \brief add features to the previously tracked frame if there are less than + * min_features */ + template + bool AddFeatures(std::map& container, int type_id = 0, + int first_id = 0, int last_id = 65535) + { + if (TrackerFeatures::AddFeatures() == 0) + return false; + // Update the container to have the updated features + for (int i = 0; i < feature_count; i++) + { + int id = ids[i]; + if (last_id >= 0 && id >= last_id) + break; + T& f = container[id]; + f.type_id = type_id; + f.has_p2d = true; + f.p2d = features[i]; + } + return true; + } + + /** \brief Erases the items matching with \e type_id having \e has_p2d == + * false . If \e type_id == -1 doesn't test the type. */ + template + int EraseNonTracked(std::map& container, int type_id = -1) + { + DoEraseTest do_erase_test(true, false, type_id); + return EraseItemsEC(container, do_erase_test); + } + /** \brief Purge features that are considerably closer than the defined + * min_distance. + * + * Note, that we always try to maintain the smaller id's assuming that they + * are older ones + */ + void Purge() + { + purge = true; + } + void Reset() + { + throw alvar::AlvarException("Method not supported"); + } + double Reset(IplImage* img, IplImage* mask) + { + throw alvar::AlvarException("Method not supported"); + } + bool DelFeature(int index) + { + throw alvar::AlvarException("Method not supported"); + } + bool DelFeatureId(int id) + { + throw alvar::AlvarException("Method not supported"); + } }; /** \brief Version of \e Camera using external container */ -class ALVAR_EXPORT CameraEC : public Camera { +class ALVAR_EXPORT CameraEC : public Camera +{ public: - /** \brief Undistort the items with matching \e type_id */ - template - void Undistort(std::map &container, int type_id=-1) { - DoHandleTest do_handle_test_default(type_id); - return Undistort(container, do_handle_test_default); - } - /** \brief Undistort the items matching the given functor */ - template - void Undistort(std::map &container, F &do_handle_test) { - typename std::map::iterator iter = container.begin(); - typename std::map::iterator iter_end = container.end(); - for (;iter != iter_end; iter++) { - T &f = iter->second; - if (!do_handle(f)) continue; - if (f.has_p2d) Undistort(f.p2d); - } - } - /** \brief Distort the items with matching \e type_id */ - template - void Distort(std::map &container, int type_id=-1) { - DoHandleTest do_handle_test_default(type_id); - return Distort(container, do_handle_test_default); - } - /** \brief Distort the items matching the given functor */ - template - void Distort(std::map &container, F &do_handle_test) { - typename std::map::iterator iter = container.begin(); - typename std::map::iterator iter_end = container.end(); - for (;iter != iter_end; iter++) { - T &f = iter->second; - if (!do_handle(f)) continue; - if (f.has_p2d) Distort(f.p2d); - } - } - /** \brief Calculate the \e pose using the items with matching \e type_id */ - template - bool CalcExteriorOrientation(std::map &container, Pose *pose, int type_id=-1) { - DoHandleTest do_handle_test_default(type_id); - return CalcExteriorOrientation(container, pose, do_handle_test_default); - } - /** \brief Calculate the \e pose using the items matching the given functor */ - template - bool CalcExteriorOrientation(std::map &container, Pose *pose, F do_handle_test) { - int count_points = container.size(); - if(count_points < 4) return false; - CvMat* world_points = cvCreateMat(count_points, 1, CV_32FC3); - CvMat* image_observations = cvCreateMat(count_points*2, 1, CV_32FC2); - typename std::map::iterator iter = container.begin(); - typename std::map::iterator iter_end = container.end(); - int ind = 0; - for (;iter != iter_end; iter++) { - T &f = iter->second; - if (!do_handle_test(f)) continue; - if (f.has_p2d && f.has_p3d) { - world_points->data.fl[ind*3+0] = (float)f.p3d.x; - world_points->data.fl[ind*3+1] = (float)f.p3d.y; - world_points->data.fl[ind*3+2] = (float)f.p3d.z; - image_observations->data.fl[ind*2+0] = (float)f.p2d.x; - image_observations->data.fl[ind*2+1] = (float)f.p2d.y; - ind++; - } - } - if (ind<4) return false; - - CvRect r; r.x = 0; r.y = 0; r.height = ind; r.width = 1; - CvMat world_points_sub; - cvGetSubRect(world_points, &world_points_sub, r); - CvMat image_observations_sub; - cvGetSubRect(image_observations, &image_observations_sub, r); - - bool ret = Camera::CalcExteriorOrientation(&world_points_sub, &image_observations_sub, pose); - - cvReleaseMat(&image_observations); - cvReleaseMat(&world_points); - - return ret; - } - /** \brief Update the \e pose using the items with matching \e type_id */ - template - bool UpdatePose(std::map &container, Pose *pose, int type_id=-1, std::map *weights=0) { - DoHandleTest do_handle_test_default(type_id); - return UpdatePose(container, pose, do_handle_test_default, weights); - } - /** \brief Update the rotation in \e pose using the items with matching \e type_id */ - template - bool UpdateRotation(std::map &container, Pose *pose, int type_id=-1) { - DoHandleTest do_handle_test_default(type_id); - return UpdateRotation(container, pose, do_handle_test_default); - } - /** \brief Update the rotation in \e pose using the items matching the given functor */ - template - bool UpdateRotation(std::map &container, Pose *pose, F do_handle_test) { - int count_points = container.size(); - if(count_points < 6) return false; - - CvMat* world_points = cvCreateMat(count_points, 1, CV_64FC3); - CvMat* image_observations = cvCreateMat(count_points*2, 1, CV_64F); // [u v u v u v ...]' - - int ind = 0; - typename std::map::iterator iter = container.begin(); - typename std::map::iterator iter_end = container.end(); - for (;iter != iter_end; iter++) { - T &f = iter->second; - if (!do_handle_test(f)) continue; - if (f.has_p2d && f.has_p3d) { - world_points->data.db[ind*3+0] = f.p3d.x; - world_points->data.db[ind*3+1] = f.p3d.y; - world_points->data.db[ind*3+2] = f.p3d.z; - image_observations->data.db[ind*2+0] = f.p2d.x; - image_observations->data.db[ind*2+1] = f.p2d.y; - ind++; - } - } - if (ind < 6) return false; - - CvRect r; r.x = 0; r.y = 0; r.height = ind; r.width = 1; - CvMat world_points_sub; - cvGetSubRect(world_points, &world_points_sub, r); - - r.height = 2*ind; - CvMat image_observations_sub; - cvGetSubRect(image_observations, &image_observations_sub, r); - - bool ret = UpdateRotation(&world_points_sub, &image_observations_sub, pose); - - cvReleaseMat(&image_observations); - cvReleaseMat(&world_points); - - return ret; - } - /** \brief Update the \e pose using the items matching the given functor */ - template - bool UpdatePose(std::map &container, Pose *pose, F do_handle_test, std::map *weights=0) { - int count_points = container.size(); - if(count_points < 4) return false; // Note, UpdatePose calls CalcExteriorOrientation if 4 or 5 points - - CvMat* world_points = cvCreateMat(count_points, 1, CV_64FC3); - CvMat* image_observations = cvCreateMat(count_points*2, 1, CV_64F); // [u v u v u v ...]' - CvMat* w = 0; - if(weights) w = cvCreateMat(count_points*2, 1, CV_64F); - - int ind = 0; - typename std::map::iterator iter = container.begin(); - typename std::map::iterator iter_end = container.end(); - for (;iter != iter_end; iter++) { - T &f = iter->second; - if (!do_handle_test(f)) continue; - if (f.has_p2d && f.has_p3d) { - world_points->data.db[ind*3+0] = f.p3d.x; - world_points->data.db[ind*3+1] = f.p3d.y; - world_points->data.db[ind*3+2] = f.p3d.z; - image_observations->data.db[ind*2+0] = f.p2d.x; - image_observations->data.db[ind*2+1] = f.p2d.y; - - if(weights) - w->data.db[ind*2+1] = w->data.db[ind*2+0] = (*weights)[iter->first]; - ind++; - } - } - if (ind < 4) return false; // Note, UpdatePose calls CalcExteriorOrientation if 4 or 5 points - - CvRect r; r.x = 0; r.y = 0; r.height = ind; r.width = 1; - CvMat world_points_sub; - cvGetSubRect(world_points, &world_points_sub, r); - - r.height = 2*ind; - CvMat image_observations_sub; - cvGetSubRect(image_observations, &image_observations_sub, r); - - bool ret; - if (weights) { - CvMat w_sub; - cvGetSubRect(w, &w_sub, r); - ret = UpdatePose(&world_points_sub, &image_observations_sub, pose, &w_sub); - } - else - ret = UpdatePose(&world_points_sub, &image_observations_sub, pose); - - cvReleaseMat(&image_observations); - cvReleaseMat(&world_points); - if(w) cvReleaseMat(&w); - - return ret; - } - - /** \brief Projects \e p3d in the items matching with \e type_id into \e projected_p2d . If \e type_id == -1 doesn't test the type. */ - template - float Reproject(std::map &container, Pose *pose, int type_id=-1, bool needs_has_p2d=false, bool needs_has_p3d=false, float average_outlier_limit=0.f) - { - DoHandleTest do_handle_test(type_id, needs_has_p2d, needs_has_p3d); - return Reproject(container, pose, do_handle_test, average_outlier_limit); - } - /** \brief Projects \e p3d in the items matching with the given functor */ - template - float Reproject(std::map &container, Pose *pose, F do_handle_test, float average_outlier_limit=0.f) - { - float reprojection_sum=0.f; - int reprojection_count=0; - float limit_sq = average_outlier_limit*average_outlier_limit; - typename std::map::iterator iter = container.begin(); - typename std::map::iterator iter_end = container.end(); - for(;iter != iter_end;iter++) { - T &f = iter->second; - if (!do_handle_test(f)) continue; - Camera::ProjectPoint(iter->second.p3d, pose, iter->second.projected_p2d); - Camera::ProjectPoint(iter->second.p3d_sh, pose, iter->second.projected_p2d_sh); - - // TODO: Now this is calculated in several places (should this distance be included in ExternalContainer structure? - float dist_sq = (f.p2d.x-f.projected_p2d.x)*(f.p2d.x-f.projected_p2d.x); - dist_sq += (f.p2d.y-f.projected_p2d.y)*(f.p2d.y-f.projected_p2d.y); - if ((limit_sq == 0) || (dist_sq < limit_sq)) { - reprojection_sum += sqrt(dist_sq); - reprojection_count++; - } - } - if (reprojection_count == 0) return 0.f; - return reprojection_sum/reprojection_count; - } - - /** \brief Erases the items matching with \e type_id having large reprojection error. - * If \e type_id == -1 doesn't test the type. - * If \e pose is given calls \e Reproject() internally -- otherwise assumes it has been called beforehands. - */ - template - int EraseUsingReprojectionError(std::map &container, float reprojection_error_limit, int type_id=-1, Pose *pose = 0) - { - if (pose) Reproject(container, pose, type_id, false, false); - DoEraseTest do_erase_test(reprojection_error_limit, false, false, type_id) ; - return EraseItemsEC(container, do_erase_test); - } - - /** \brief Update pose rotations based on new observations */ - bool UpdateRotation(const CvMat* object_points, CvMat* image_points, Pose *pose); - - /** \brief Update pose rotations (in rodriques&tra) based on new observations */ - bool UpdateRotation(const CvMat* object_points, CvMat* image_points, CvMat *rot, CvMat *tra); - - /** \brief Update existing pose based on new observations */ - bool UpdatePose(const CvMat* object_points, CvMat* image_points, Pose *pose, CvMat *weights=0); - - /** \brief Update existing pose (in rodriques&tra) based on new observations */ - bool UpdatePose(const CvMat* object_points, CvMat* image_points, CvMat *rodriques, CvMat *tra, CvMat *weights=0); - - /** \brief Reconstruct 3D point using two camera poses and corresponding undistorted image feature positions */ - bool ReconstructFeature(const Pose *pose1, const Pose *pose2, const CvPoint2D32f *u1, const CvPoint2D32f *u2, CvPoint3D32f *p3d, double limit); - - /** \brief Get 3D-coordinate for 2D feature on the plane defined by the pose (z == 0) */ - void Get3dOnPlane(const Pose *pose, CvPoint2D32f p2d, CvPoint3D32f &p3d); - - /** \brief Get 3D-coordinate for 2D feature assuming certain depth */ - void Get3dOnDepth(const Pose *pose, CvPoint2D32f p2d, float depth, CvPoint3D32f &p3d); + /** \brief Undistort the items with matching \e type_id */ + template + void Undistort(std::map& container, int type_id = -1) + { + DoHandleTest do_handle_test_default(type_id); + return Undistort(container, do_handle_test_default); + } + /** \brief Undistort the items matching the given functor */ + template + void Undistort(std::map& container, F& do_handle_test) + { + typename std::map::iterator iter = container.begin(); + typename std::map::iterator iter_end = container.end(); + for (; iter != iter_end; iter++) + { + T& f = iter->second; + if (!do_handle(f)) + continue; + if (f.has_p2d) + Undistort(f.p2d); + } + } + /** \brief Distort the items with matching \e type_id */ + template + void Distort(std::map& container, int type_id = -1) + { + DoHandleTest do_handle_test_default(type_id); + return Distort(container, do_handle_test_default); + } + /** \brief Distort the items matching the given functor */ + template + void Distort(std::map& container, F& do_handle_test) + { + typename std::map::iterator iter = container.begin(); + typename std::map::iterator iter_end = container.end(); + for (; iter != iter_end; iter++) + { + T& f = iter->second; + if (!do_handle(f)) + continue; + if (f.has_p2d) + Distort(f.p2d); + } + } + /** \brief Calculate the \e pose using the items with matching \e type_id */ + template + bool CalcExteriorOrientation(std::map& container, Pose* pose, + int type_id = -1) + { + DoHandleTest do_handle_test_default(type_id); + return CalcExteriorOrientation(container, pose, do_handle_test_default); + } + /** \brief Calculate the \e pose using the items matching the given functor */ + template + bool CalcExteriorOrientation(std::map& container, Pose* pose, + F do_handle_test) + { + int count_points = container.size(); + if (count_points < 4) + return false; + CvMat* world_points = cvCreateMat(count_points, 1, CV_32FC3); + CvMat* image_observations = cvCreateMat(count_points * 2, 1, CV_32FC2); + typename std::map::iterator iter = container.begin(); + typename std::map::iterator iter_end = container.end(); + int ind = 0; + for (; iter != iter_end; iter++) + { + T& f = iter->second; + if (!do_handle_test(f)) + continue; + if (f.has_p2d && f.has_p3d) + { + world_points->data.fl[ind * 3 + 0] = (float)f.p3d.x; + world_points->data.fl[ind * 3 + 1] = (float)f.p3d.y; + world_points->data.fl[ind * 3 + 2] = (float)f.p3d.z; + image_observations->data.fl[ind * 2 + 0] = (float)f.p2d.x; + image_observations->data.fl[ind * 2 + 1] = (float)f.p2d.y; + ind++; + } + } + if (ind < 4) + return false; + + CvRect r; + r.x = 0; + r.y = 0; + r.height = ind; + r.width = 1; + CvMat world_points_sub; + cvGetSubRect(world_points, &world_points_sub, r); + CvMat image_observations_sub; + cvGetSubRect(image_observations, &image_observations_sub, r); + + bool ret = Camera::CalcExteriorOrientation(&world_points_sub, + &image_observations_sub, pose); + + cvReleaseMat(&image_observations); + cvReleaseMat(&world_points); + + return ret; + } + /** \brief Update the \e pose using the items with matching \e type_id */ + template + bool UpdatePose(std::map& container, Pose* pose, int type_id = -1, + std::map* weights = 0) + { + DoHandleTest do_handle_test_default(type_id); + return UpdatePose(container, pose, do_handle_test_default, weights); + } + /** \brief Update the rotation in \e pose using the items with matching \e + * type_id */ + template + bool UpdateRotation(std::map& container, Pose* pose, int type_id = -1) + { + DoHandleTest do_handle_test_default(type_id); + return UpdateRotation(container, pose, do_handle_test_default); + } + /** \brief Update the rotation in \e pose using the items matching the given + * functor */ + template + bool UpdateRotation(std::map& container, Pose* pose, F do_handle_test) + { + int count_points = container.size(); + if (count_points < 6) + return false; + + CvMat* world_points = cvCreateMat(count_points, 1, CV_64FC3); + CvMat* image_observations = + cvCreateMat(count_points * 2, 1, CV_64F); // [u v u v u v ...]' + + int ind = 0; + typename std::map::iterator iter = container.begin(); + typename std::map::iterator iter_end = container.end(); + for (; iter != iter_end; iter++) + { + T& f = iter->second; + if (!do_handle_test(f)) + continue; + if (f.has_p2d && f.has_p3d) + { + world_points->data.db[ind * 3 + 0] = f.p3d.x; + world_points->data.db[ind * 3 + 1] = f.p3d.y; + world_points->data.db[ind * 3 + 2] = f.p3d.z; + image_observations->data.db[ind * 2 + 0] = f.p2d.x; + image_observations->data.db[ind * 2 + 1] = f.p2d.y; + ind++; + } + } + if (ind < 6) + return false; + + CvRect r; + r.x = 0; + r.y = 0; + r.height = ind; + r.width = 1; + CvMat world_points_sub; + cvGetSubRect(world_points, &world_points_sub, r); + + r.height = 2 * ind; + CvMat image_observations_sub; + cvGetSubRect(image_observations, &image_observations_sub, r); + + bool ret = UpdateRotation(&world_points_sub, &image_observations_sub, pose); + + cvReleaseMat(&image_observations); + cvReleaseMat(&world_points); + + return ret; + } + /** \brief Update the \e pose using the items matching the given functor */ + template + bool UpdatePose(std::map& container, Pose* pose, F do_handle_test, + std::map* weights = 0) + { + int count_points = container.size(); + if (count_points < 4) + return false; // Note, UpdatePose calls CalcExteriorOrientation if 4 or 5 + // points + + CvMat* world_points = cvCreateMat(count_points, 1, CV_64FC3); + CvMat* image_observations = + cvCreateMat(count_points * 2, 1, CV_64F); // [u v u v u v ...]' + CvMat* w = 0; + if (weights) + w = cvCreateMat(count_points * 2, 1, CV_64F); + + int ind = 0; + typename std::map::iterator iter = container.begin(); + typename std::map::iterator iter_end = container.end(); + for (; iter != iter_end; iter++) + { + T& f = iter->second; + if (!do_handle_test(f)) + continue; + if (f.has_p2d && f.has_p3d) + { + world_points->data.db[ind * 3 + 0] = f.p3d.x; + world_points->data.db[ind * 3 + 1] = f.p3d.y; + world_points->data.db[ind * 3 + 2] = f.p3d.z; + image_observations->data.db[ind * 2 + 0] = f.p2d.x; + image_observations->data.db[ind * 2 + 1] = f.p2d.y; + + if (weights) + w->data.db[ind * 2 + 1] = w->data.db[ind * 2 + 0] = + (*weights)[iter->first]; + ind++; + } + } + if (ind < 4) + return false; // Note, UpdatePose calls CalcExteriorOrientation if 4 or 5 + // points + + CvRect r; + r.x = 0; + r.y = 0; + r.height = ind; + r.width = 1; + CvMat world_points_sub; + cvGetSubRect(world_points, &world_points_sub, r); + + r.height = 2 * ind; + CvMat image_observations_sub; + cvGetSubRect(image_observations, &image_observations_sub, r); + + bool ret; + if (weights) + { + CvMat w_sub; + cvGetSubRect(w, &w_sub, r); + ret = + UpdatePose(&world_points_sub, &image_observations_sub, pose, &w_sub); + } + else + ret = UpdatePose(&world_points_sub, &image_observations_sub, pose); + + cvReleaseMat(&image_observations); + cvReleaseMat(&world_points); + if (w) + cvReleaseMat(&w); + + return ret; + } + + /** \brief Projects \e p3d in the items matching with \e type_id into \e + * projected_p2d . If \e type_id == -1 doesn't test the type. */ + template + float Reproject(std::map& container, Pose* pose, int type_id = -1, + bool needs_has_p2d = false, bool needs_has_p3d = false, + float average_outlier_limit = 0.f) + { + DoHandleTest do_handle_test(type_id, needs_has_p2d, needs_has_p3d); + return Reproject(container, pose, do_handle_test, average_outlier_limit); + } + /** \brief Projects \e p3d in the items matching with the given functor */ + template + float Reproject(std::map& container, Pose* pose, F do_handle_test, + float average_outlier_limit = 0.f) + { + float reprojection_sum = 0.f; + int reprojection_count = 0; + float limit_sq = average_outlier_limit * average_outlier_limit; + typename std::map::iterator iter = container.begin(); + typename std::map::iterator iter_end = container.end(); + for (; iter != iter_end; iter++) + { + T& f = iter->second; + if (!do_handle_test(f)) + continue; + Camera::ProjectPoint(iter->second.p3d, pose, iter->second.projected_p2d); + Camera::ProjectPoint(iter->second.p3d_sh, pose, + iter->second.projected_p2d_sh); + + // TODO: Now this is calculated in several places (should this distance be + // included in ExternalContainer structure? + float dist_sq = + (f.p2d.x - f.projected_p2d.x) * (f.p2d.x - f.projected_p2d.x); + dist_sq += (f.p2d.y - f.projected_p2d.y) * (f.p2d.y - f.projected_p2d.y); + if ((limit_sq == 0) || (dist_sq < limit_sq)) + { + reprojection_sum += sqrt(dist_sq); + reprojection_count++; + } + } + if (reprojection_count == 0) + return 0.f; + return reprojection_sum / reprojection_count; + } + + /** \brief Erases the items matching with \e type_id having large reprojection + * error. If \e type_id == -1 doesn't test the type. If \e pose is given calls + * \e Reproject() internally -- otherwise assumes it has been called + * beforehands. + */ + template + int EraseUsingReprojectionError(std::map& container, + float reprojection_error_limit, + int type_id = -1, Pose* pose = 0) + { + if (pose) + Reproject(container, pose, type_id, false, false); + DoEraseTest do_erase_test(reprojection_error_limit, false, false, + type_id); + return EraseItemsEC(container, do_erase_test); + } + + /** \brief Update pose rotations based on new observations */ + bool UpdateRotation(const CvMat* object_points, CvMat* image_points, + Pose* pose); + + /** \brief Update pose rotations (in rodriques&tra) based on new observations + */ + bool UpdateRotation(const CvMat* object_points, CvMat* image_points, + CvMat* rot, CvMat* tra); + + /** \brief Update existing pose based on new observations */ + bool UpdatePose(const CvMat* object_points, CvMat* image_points, Pose* pose, + CvMat* weights = 0); + + /** \brief Update existing pose (in rodriques&tra) based on new observations + */ + bool UpdatePose(const CvMat* object_points, CvMat* image_points, + CvMat* rodriques, CvMat* tra, CvMat* weights = 0); + + /** \brief Reconstruct 3D point using two camera poses and corresponding + * undistorted image feature positions */ + bool ReconstructFeature(const Pose* pose1, const Pose* pose2, + const CvPoint2D32f* u1, const CvPoint2D32f* u2, + CvPoint3D32f* p3d, double limit); + + /** \brief Get 3D-coordinate for 2D feature on the plane defined by the pose + * (z == 0) */ + void Get3dOnPlane(const Pose* pose, CvPoint2D32f p2d, CvPoint3D32f& p3d); + + /** \brief Get 3D-coordinate for 2D feature assuming certain depth */ + void Get3dOnDepth(const Pose* pose, CvPoint2D32f p2d, float depth, + CvPoint3D32f& p3d); }; -/** \brief Calculate the index used in external container map for specified \e marker_id */ -inline int MarkerIdToContainerId(int marker_id, int corner_id, int first_id=0, int last_id=65535) { - int id = first_id + marker_id*4 + corner_id; - if (id > last_id) return -1; - return id; +/** \brief Calculate the index used in external container map for specified \e + * marker_id */ +inline int MarkerIdToContainerId(int marker_id, int corner_id, int first_id = 0, + int last_id = 65535) +{ + int id = first_id + marker_id * 4 + corner_id; + if (id > last_id) + return -1; + return id; } /** \brief Version of \e MarkerDetector using external container */ -template -class MarkerDetectorEC : public MarkerDetector { +template +class MarkerDetectorEC : public MarkerDetector +{ protected: - template - bool PreDetect(std::pair &p, int type_id) { - return PreDetect(p.second, type_id); - } - template - bool PreDetect(T &p, int type_id) { - if (p.type_id != type_id) return false; - p.has_p2d = false; // This is updated again to true if tracking succeeds - return true; - } + template + bool PreDetect(std::pair& p, int type_id) + { + return PreDetect(p.second, type_id); + } + template + bool PreDetect(T& p, int type_id) + { + if (p.type_id != type_id) + return false; + p.has_p2d = false; // This is updated again to true if tracking succeeds + return true; + } + public: - int GetId(int marker_id, int corner_id, int first_id=0, int last_id=65535) { - return MarkerIdToContainerId(marker_id, corner_id, first_id, last_id); - } - - /** \brief Detect markers in the image and fill in their 2D-positions in the given external container */ - template - int Detect(IplImage *image, - Camera *cam, - std::map &container, - int type_id=0, - int first_id=0, - int last_id=65535, - bool track=false, - bool visualize=false, - double max_new_marker_error=0.08, - double max_track_error=0.2, - LabelingMethod labeling_method=CVSEQ) - { - int ret; - - // The existing markers in the container are marked to not have valid p2d (has_p2d=false) - typename std::map::iterator iter = container.begin(); - typename std::map::iterator iter_end = container.end(); - for (;iter != iter_end; iter++) { - T &f = iter->second; - if (f.type_id != type_id) continue; - f.has_p2d = false; // This is updated again to true if tracking succeeds - } - - // Detect without making the unnecessary pose update - ret = MarkerDetector::Detect(image, cam, track, visualize, max_new_marker_error, max_track_error, labeling_method, false); - - // Fill in the detected markers to the container - for (size_t i=0; i::markers->size(); i++) { - M &m = (*(MarkerDetector::markers))[i]; - for (int corner=0; corner<4; corner++) { - int id = MarkerIdToContainerId(m.GetId(), corner, first_id, last_id); - if (id != -1) { - T &f = container[id]; - f.type_id = type_id; - f.has_p2d = true; - f.p2d.x = float(m.marker_corners_img[corner].x); - f.p2d.y = float(m.marker_corners_img[corner].y); - } - } - } - return ret; - } - - /** \brief Fill in 3D-coordinates for \e marker_id marker corners using \e edge_length and \e pose */ - template - void MarkerToEC(std::map &container, int marker_id, double edge_length, Pose &pose, int type_id=0, int first_id=0,int last_id=65535) { - CvMat *m3 = cvCreateMat(4,4,CV_64F); cvSetIdentity(m3); - pose.GetMatrix(m3); - - for(size_t corner = 0; corner < 4; ++corner) - { - // TODO: This should be exactly the same as in Marker class. - // Should we get the values from there somehow? - double X_data[4] = {0, 0, 0, 1}; - if (corner == 0) { - X_data[0] = -0.5*edge_length; - X_data[1] = -0.5*edge_length; - } else if (corner == 1) { - X_data[0] = +0.5*edge_length; - X_data[1] = -0.5*edge_length; - } else if (corner == 2) { - X_data[0] = +0.5*edge_length; - X_data[1] = +0.5*edge_length; - } else if (corner == 3) { - X_data[0] = -0.5*edge_length; - X_data[1] = +0.5*edge_length; - } - - CvMat X = cvMat(4, 1, CV_64F, X_data); - cvMatMul(m3, &X, &X); - - int id = MarkerIdToContainerId(marker_id, corner, first_id, last_id); - T &f = container[id]; - f.type_id = type_id; - f.p3d.x = float(X_data[0] / X_data[3]); - f.p3d.y = float(X_data[1] / X_data[3]); - f.p3d.z = float(X_data[2] / X_data[3]); - f.has_p3d = true; - } - cvReleaseMat(&m3); - } + int GetId(int marker_id, int corner_id, int first_id = 0, int last_id = 65535) + { + return MarkerIdToContainerId(marker_id, corner_id, first_id, last_id); + } + + /** \brief Detect markers in the image and fill in their 2D-positions in the + * given external container */ + template + int Detect(IplImage* image, Camera* cam, std::map& container, + int type_id = 0, int first_id = 0, int last_id = 65535, + bool track = false, bool visualize = false, + double max_new_marker_error = 0.08, double max_track_error = 0.2, + LabelingMethod labeling_method = CVSEQ) + { + int ret; + + // The existing markers in the container are marked to not have valid p2d + // (has_p2d=false) + typename std::map::iterator iter = container.begin(); + typename std::map::iterator iter_end = container.end(); + for (; iter != iter_end; iter++) + { + T& f = iter->second; + if (f.type_id != type_id) + continue; + f.has_p2d = false; // This is updated again to true if tracking succeeds + } + + // Detect without making the unnecessary pose update + ret = MarkerDetector::Detect(image, cam, track, visualize, + max_new_marker_error, max_track_error, + labeling_method, false); + + // Fill in the detected markers to the container + for (size_t i = 0; i < MarkerDetector::markers->size(); i++) + { + M& m = (*(MarkerDetector::markers))[i]; + for (int corner = 0; corner < 4; corner++) + { + int id = MarkerIdToContainerId(m.GetId(), corner, first_id, last_id); + if (id != -1) + { + T& f = container[id]; + f.type_id = type_id; + f.has_p2d = true; + f.p2d.x = float(m.marker_corners_img[corner].x); + f.p2d.y = float(m.marker_corners_img[corner].y); + } + } + } + return ret; + } + + /** \brief Fill in 3D-coordinates for \e marker_id marker corners using \e + * edge_length and \e pose */ + template + void MarkerToEC(std::map& container, int marker_id, + double edge_length, Pose& pose, int type_id = 0, + int first_id = 0, int last_id = 65535) + { + CvMat* m3 = cvCreateMat(4, 4, CV_64F); + cvSetIdentity(m3); + pose.GetMatrix(m3); + + for (size_t corner = 0; corner < 4; ++corner) + { + // TODO: This should be exactly the same as in Marker class. + // Should we get the values from there somehow? + double X_data[4] = { 0, 0, 0, 1 }; + if (corner == 0) + { + X_data[0] = -0.5 * edge_length; + X_data[1] = -0.5 * edge_length; + } + else if (corner == 1) + { + X_data[0] = +0.5 * edge_length; + X_data[1] = -0.5 * edge_length; + } + else if (corner == 2) + { + X_data[0] = +0.5 * edge_length; + X_data[1] = +0.5 * edge_length; + } + else if (corner == 3) + { + X_data[0] = -0.5 * edge_length; + X_data[1] = +0.5 * edge_length; + } + + CvMat X = cvMat(4, 1, CV_64F, X_data); + cvMatMul(m3, &X, &X); + + int id = MarkerIdToContainerId(marker_id, corner, first_id, last_id); + T& f = container[id]; + f.type_id = type_id; + f.p3d.x = float(X_data[0] / X_data[3]); + f.p3d.y = float(X_data[1] / X_data[3]); + f.p3d.z = float(X_data[2] / X_data[3]); + f.has_p3d = true; + } + cvReleaseMat(&m3); + } }; /** \brief Version of \e MultiMarker using external container */ -class MultiMarkerEC : public MultiMarker { +class MultiMarkerEC : public MultiMarker +{ public: - /** \brief Fill in 3D-coordinates for marker corners to the given container */ - template - bool MarkersToEC(std::map &container, int type_id=0, int first_id=0,int last_id=65535) { - bool ret = false; - for (size_t i = 0; i < marker_indices.size(); i++) { - if (marker_status[i] == 0) continue; // Skip the ones not in point cloud - int marker_id = marker_indices[i]; - for (int corner = 0; corner < 4; corner++) { - int id = MarkerIdToContainerId(marker_id, corner, first_id, last_id); - if (id != -1) { - int pc_index = pointcloud_index(marker_id, corner); - T &f = container[id]; - f.type_id = type_id; - f.p3d.x = float(pointcloud[pc_index].x); - f.p3d.y = float(pointcloud[pc_index].y); - f.p3d.z = float(pointcloud[pc_index].z); - f.has_p3d = true; - ret = true; - } - } - } - return ret; - } - - template - bool MarkersFromEC(std::map &container, int type_id=0, int first_id=0,int last_id=65535) { - // TODO... - return false; - } - - template - bool Save(std::map &container, const char* fname, FILE_FORMAT format = FILE_FORMAT_DEFAULT, int type_id=0, int first_id=0,int last_id=65535) { - if (!MarkersFromEC(container, type_id, first_id, last_id)) return false; - if (!MultiMarker::Save(fname, format)) return false; - return true; - } - - /** \brief Fill in 3D-coordinates for marker corners to the given container using save \e MultiMarker file */ - template - bool Load(std::map &container, const char* fname, FILE_FORMAT format = FILE_FORMAT_DEFAULT, int type_id=0, int first_id=0,int last_id=65535) { - if (!MultiMarker::Load(fname, format)) return false; - return MarkersToEC(container, type_id, first_id, last_id); - } + /** \brief Fill in 3D-coordinates for marker corners to the given container */ + template + bool MarkersToEC(std::map& container, int type_id = 0, + int first_id = 0, int last_id = 65535) + { + bool ret = false; + for (size_t i = 0; i < marker_indices.size(); i++) + { + if (marker_status[i] == 0) + continue; // Skip the ones not in point cloud + int marker_id = marker_indices[i]; + for (int corner = 0; corner < 4; corner++) + { + int id = MarkerIdToContainerId(marker_id, corner, first_id, last_id); + if (id != -1) + { + int pc_index = pointcloud_index(marker_id, corner); + T& f = container[id]; + f.type_id = type_id; + f.p3d.x = float(pointcloud[pc_index].x); + f.p3d.y = float(pointcloud[pc_index].y); + f.p3d.z = float(pointcloud[pc_index].z); + f.has_p3d = true; + ret = true; + } + } + } + return ret; + } + + template + bool MarkersFromEC(std::map& container, int type_id = 0, + int first_id = 0, int last_id = 65535) + { + // TODO... + return false; + } + + template + bool Save(std::map& container, const char* fname, + FILE_FORMAT format = FILE_FORMAT_DEFAULT, int type_id = 0, + int first_id = 0, int last_id = 65535) + { + if (!MarkersFromEC(container, type_id, first_id, last_id)) + return false; + if (!MultiMarker::Save(fname, format)) + return false; + return true; + } + + /** \brief Fill in 3D-coordinates for marker corners to the given container + * using save \e MultiMarker file */ + template + bool Load(std::map& container, const char* fname, + FILE_FORMAT format = FILE_FORMAT_DEFAULT, int type_id = 0, + int first_id = 0, int last_id = 65535) + { + if (!MultiMarker::Load(fname, format)) + return false; + return MarkersToEC(container, type_id, first_id, last_id); + } }; -} +} // namespace alvar #endif - diff --git a/ar_track_alvar/include/ar_track_alvar/FernImageDetector.h b/ar_track_alvar/include/ar_track_alvar/FernImageDetector.h index 798f839..954bfa8 100644 --- a/ar_track_alvar/include/ar_track_alvar/FernImageDetector.h +++ b/ar_track_alvar/include/ar_track_alvar/FernImageDetector.h @@ -44,31 +44,29 @@ using namespace cv; -namespace alvar{ - +namespace alvar +{ /** * \brief FernClassifier subclass that implements binary reading and writting. */ class FernClassifierWrapper : public FernClassifier { public: - FernClassifierWrapper(); - FernClassifierWrapper(const FileNode &fileNode); - FernClassifierWrapper(const vector > &points, - const vector &referenceImages, - const vector > &labels = vector >(), - int _nclasses = 0, - int _patchSize = PATCH_SIZE, - int _signatureSize = DEFAULT_SIGNATURE_SIZE, - int _nstructs = DEFAULT_STRUCTS, - int _structSize = DEFAULT_STRUCT_SIZE, - int _nviews = DEFAULT_VIEWS, - int _compressionMethod = COMPRESSION_NONE, - const PatchGenerator &patchGenerator = PatchGenerator()); - virtual ~FernClassifierWrapper(); - - virtual void readBinary(std::fstream &stream); - virtual void writeBinary(std::fstream &stream) const; + FernClassifierWrapper(); + FernClassifierWrapper(const FileNode& fileNode); + FernClassifierWrapper( + const vector >& points, + const vector& referenceImages, + const vector >& labels = vector >(), + int _nclasses = 0, int _patchSize = PATCH_SIZE, + int _signatureSize = DEFAULT_SIGNATURE_SIZE, + int _nstructs = DEFAULT_STRUCTS, int _structSize = DEFAULT_STRUCT_SIZE, + int _nviews = DEFAULT_VIEWS, int _compressionMethod = COMPRESSION_NONE, + const PatchGenerator& patchGenerator = PatchGenerator()); + virtual ~FernClassifierWrapper(); + + virtual void readBinary(std::fstream& stream); + virtual void writeBinary(std::fstream& stream) const; }; /** @@ -76,41 +74,41 @@ class FernClassifierWrapper : public FernClassifier */ class ALVAR_EXPORT FernImageDetector { -public: - FernImageDetector(const bool visualize = false); - ~FernImageDetector(); - - void imagePoints(vector &points); - void modelPoints(vector &points, bool normalize = true); - - cv::Size size(); - cv::Mat homography(); - double inlierRatio(); - - void train(const std::string &filename); - void train(Mat &image); - void findFeatures(Mat &image, bool planeAssumption = true); - - bool read(const std::string &filename, const bool binary = true); - bool write(const std::string &filename, const bool binary = true); +public: + FernImageDetector(const bool visualize = false); + ~FernImageDetector(); + + void imagePoints(vector& points); + void modelPoints(vector& points, bool normalize = true); + + cv::Size size(); + cv::Mat homography(); + double inlierRatio(); + + void train(const std::string& filename); + void train(Mat& image); + void findFeatures(Mat& image, bool planeAssumption = true); + + bool read(const std::string& filename, const bool binary = true); + bool write(const std::string& filename, const bool binary = true); private: - PatchGenerator mPatchGenerator; - LDetector mLDetector; - std::vector mClassifier; - - vector mKeyPoints; - vector mImagePoints; - vector mModelPoints; - - bool mVisualize; - std::vector mObjects; - cv::Size mSize; - cv::Mat mCorrespondences; - cv::Mat mHomography; - double mInlierRatio; + PatchGenerator mPatchGenerator; + LDetector mLDetector; + std::vector mClassifier; + + vector mKeyPoints; + vector mImagePoints; + vector mModelPoints; + + bool mVisualize; + std::vector mObjects; + cv::Size mSize; + cv::Mat mCorrespondences; + cv::Mat mHomography; + double mInlierRatio; }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/FernPoseEstimator.h b/ar_track_alvar/include/ar_track_alvar/FernPoseEstimator.h index 6c7ae37..bc40f01 100644 --- a/ar_track_alvar/include/ar_track_alvar/FernPoseEstimator.h +++ b/ar_track_alvar/include/ar_track_alvar/FernPoseEstimator.h @@ -42,36 +42,35 @@ namespace alvar { - /** * \brief Pose estimation class for FernImageDetector. */ class ALVAR_EXPORT FernPoseEstimator { - public: - FernPoseEstimator(); - ~FernPoseEstimator(); + FernPoseEstimator(); + ~FernPoseEstimator(); + + Pose pose() const; + Camera camera() const; - Pose pose() const; - Camera camera() const; + bool setCalibration(const std::string& filename, int width, int height); + void setResolution(int width, int height); - bool setCalibration(const std::string &filename, int width, int height); - void setResolution(int width, int height); - - typedef std::vector ImagePointVector; - typedef std::vector ModelPointVector; - typedef std::map ExternalContainerMap; - void calculateFromPointCorrespondences(ModelPointVector &mpts, ImagePointVector &ipts); - void updateFromTrackedPoints(ExternalContainerMap &container); - void extractPlaneCoordinates(ExternalContainerMap &container); + typedef std::vector ImagePointVector; + typedef std::vector ModelPointVector; + typedef std::map ExternalContainerMap; + void calculateFromPointCorrespondences(ModelPointVector& mpts, + ImagePointVector& ipts); + void updateFromTrackedPoints(ExternalContainerMap& container); + void extractPlaneCoordinates(ExternalContainerMap& container); private: - Pose mPose; - Camera mCamera; - CameraEC mCameraEC; + Pose mPose; + Camera mCamera; + CameraEC mCameraEC; }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/FileFormat.h b/ar_track_alvar/include/ar_track_alvar/FileFormat.h index eacfb5f..9ba0165 100644 --- a/ar_track_alvar/include/ar_track_alvar/FileFormat.h +++ b/ar_track_alvar/include/ar_track_alvar/FileFormat.h @@ -30,42 +30,43 @@ * \brief This file defines various file formats. */ -namespace alvar { - - /** - * File format enumeration used when reading / writing configuration - * files. - */ - typedef enum { - /** - * \brief Default file format. - * - * Format is either OPENCV, TEXT or XML depending on load/store function used. - */ - FILE_FORMAT_DEFAULT, +namespace alvar +{ +/** + * File format enumeration used when reading / writing configuration + * files. + */ +typedef enum +{ + /** + * \brief Default file format. + * + * Format is either OPENCV, TEXT or XML depending on load/store function used. + */ + FILE_FORMAT_DEFAULT, - /** - * \brief File format written with cvWrite. - * - * File contents depend on the specific load/store function used. - */ - FILE_FORMAT_OPENCV, + /** + * \brief File format written with cvWrite. + * + * File contents depend on the specific load/store function used. + */ + FILE_FORMAT_OPENCV, - /** - * \brief Plain ASCII text file format. - * - * File contents depend on the specific load/store function used. - */ - FILE_FORMAT_TEXT, + /** + * \brief Plain ASCII text file format. + * + * File contents depend on the specific load/store function used. + */ + FILE_FORMAT_TEXT, - /** - * \brief XML file format. - * - * XML schema depends on the specific load/store function used. - */ - FILE_FORMAT_XML + /** + * \brief XML file format. + * + * XML schema depends on the specific load/store function used. + */ + FILE_FORMAT_XML - } FILE_FORMAT; -} +} FILE_FORMAT; +} // namespace alvar -#endif //FILEFORMAT_H +#endif // FILEFORMAT_H diff --git a/ar_track_alvar/include/ar_track_alvar/FileFormatUtils.h b/ar_track_alvar/include/ar_track_alvar/FileFormatUtils.h index 0fd66f4..6d1f402 100644 --- a/ar_track_alvar/include/ar_track_alvar/FileFormatUtils.h +++ b/ar_track_alvar/include/ar_track_alvar/FileFormatUtils.h @@ -32,52 +32,54 @@ */ #include "Alvar.h" -#include "cv.h" +#include #include "tinyxml.h" -namespace alvar { - - /** \brief Utility functions for file reading / writing. - */ - class ALVAR_EXPORT FileFormatUtils { - private: - /** - * \brief Reads matrix type, rows and cols from XML element. - * \return true if XML element appears to be valid; otherwise false. - */ - static bool decodeXMLMatrix(const TiXmlElement *xml_matrix, int &type, int &rows, int &cols); - - public: +namespace alvar +{ +/** \brief Utility functions for file reading / writing. + */ +class ALVAR_EXPORT FileFormatUtils +{ +private: + /** + * \brief Reads matrix type, rows and cols from XML element. + * \return true if XML element appears to be valid; otherwise false. + */ + static bool decodeXMLMatrix(const TiXmlElement* xml_matrix, int& type, + int& rows, int& cols); - /** \brief Allocates CvMat of a correct type and size. - * \param xml_matrix alvar:matrix element. - * \return CvMat that has the correct size for \e parseXMLMatrix. - */ - static CvMat* allocateXMLMatrix(const TiXmlElement *xml_matrix); +public: + /** \brief Allocates cv::Mat of a correct type and size. + * \param xml_matrix alvar:matrix element. + * \return cv::Mat that has the correct size for \e parseXMLMatrix. + */ + static cv::Mat* allocateXMLMatrix(const TiXmlElement* xml_matrix); - /** \brief Reads contents of alvar:matrix into CvMat. - * - * Parsing fails if the matrix is not the same type or does not have - * the same number of rows and columns as the XML element. - * - * \param xml_matrix alvar:matrix element. If NULL no parsing is done and - * false is returned. - * \param matrix CvMat that has the correct size, populated with data in - * the xml_matrix. - * \return true if matrix was successfully parsed; otherwise false. - */ - static bool parseXMLMatrix(const TiXmlElement *xml_matrix, CvMat *matrix); + /** \brief Reads contents of alvar:matrix into cv::Mat. + * + * Parsing fails if the matrix is not the same type or does not have + * the same number of rows and columns as the XML element. + * + * \param xml_matrix alvar:matrix element. If NULL no parsing is done and + * false is returned. + * \param matrix cv::Mat that has the correct size, populated with data in + * the xml_matrix. + * \return true if matrix was successfully parsed; otherwise false. + */ + static bool parseXMLMatrix(const TiXmlElement* xml_matrix, cv::Mat& matrix); - /** \brief Allocates new XML element and populates it with a CvMat data. - * - * The returned element needs to be deallocated by the caller. - * - * \param element_name Name of the allocated tiXmlElement. - * \param matrix Data that is written into the returned XML element. - * \return Newly allocated TiXmlElement. - */ - static TiXmlElement* createXMLMatrix(const char* element_name, const CvMat *matrix); - }; -} + /** \brief Allocates new XML element and populates it with a cv::Mat data. + * + * The returned element needs to be deallocated by the caller. + * + * \param element_name Name of the allocated tiXmlElement. + * \param matrix Data that is written into the returned XML element. + * \return Newly allocated TiXmlElement. + */ + static TiXmlElement* createXMLMatrix(const char* element_name, + const cv::Mat& matrix); +}; +} // namespace alvar -#endif //FILEFORMATUTILS_H +#endif // FILEFORMATUTILS_H diff --git a/ar_track_alvar/include/ar_track_alvar/Filter.h b/ar_track_alvar/include/ar_track_alvar/Filter.h index 52943c6..d63f565 100644 --- a/ar_track_alvar/include/ar_track_alvar/Filter.h +++ b/ar_track_alvar/include/ar_track_alvar/Filter.h @@ -36,10 +36,11 @@ #include #include -namespace alvar { - +namespace alvar +{ /** - * \brief \e Filter is pure virtual class describing the basic virtual interface for all filters + * \brief \e Filter is pure virtual class describing the basic virtual interface + * for all filters * * Basic usage: * \code @@ -62,34 +63,39 @@ namespace alvar { * cout<<(d = 2.9)< buffer; - void push_to_buffer(double y); + unsigned int count; + unsigned int window_size; + std::deque buffer; + void push_to_buffer(double y); + public: - FilterAverage(int size=3) { setWindowSize(size); } - void setWindowSize(int size) { window_size=size; count=0; } - int getWindowSize() { return window_size; } - int getCurrentSize() { return (int) buffer.size(); } - double operator= (double _value) { return next(_value); } - virtual double next(double y); - virtual void reset(); - double deviation() const; + FilterAverage(int size = 3) + { + setWindowSize(size); + } + void setWindowSize(int size) + { + window_size = size; + count = 0; + } + int getWindowSize() + { + return window_size; + } + int getCurrentSize() + { + return (int)buffer.size(); + } + double operator=(double _value) + { + return next(_value); + } + virtual double next(double y); + virtual void reset(); + double deviation() const; }; /** @@ -125,21 +149,30 @@ class ALVAR_EXPORT FilterAverage : public Filter { * * The \e FilterMedian remembers \e window_size last elements * in the time series and returns always the middle element - * after sorting ((\e window_size / 2) + 1) elements. The size of the window - * \e window_size can be set in the constructor or with + * after sorting ((\e window_size / 2) + 1) elements. The size of the window + * \e window_size can be set in the constructor or with * \e setWindowSize() . * */ -class ALVAR_EXPORT FilterMedian : public FilterAverage { - std::vector sort_buffer; +class ALVAR_EXPORT FilterMedian : public FilterAverage +{ + std::vector sort_buffer; + public: - FilterMedian(int size=3) { setWindowSize(size); } - void setWindowSize(int size) { - FilterAverage::setWindowSize(size); - sort_buffer.resize(size); - } - double operator= (double _value) { return next(_value); } - virtual double next(double y); + FilterMedian(int size = 3) + { + setWindowSize(size); + } + void setWindowSize(int size) + { + FilterAverage::setWindowSize(size); + sort_buffer.resize(size); + } + double operator=(double _value) + { + return next(_value); + } + virtual double next(double y); }; /** @@ -147,55 +180,85 @@ class ALVAR_EXPORT FilterMedian : public FilterAverage { * \note This could be named also as FilterSingleExponentialSmoothing * * The \e FilterRunningAverage calculates a simple running average - * using the weight value \e alpha. + * using the weight value \e alpha. * \code * value = ((1.0-alpha) * value) + (alpha * (double)y); * \endcode - * If alpha is larger (near 1.0) the average reacts faster for - * changes and if it is near 0.0 then it reacts slowly. The - * weight value \e alpha may be set in the constructor or + * If alpha is larger (near 1.0) the average reacts faster for + * changes and if it is near 0.0 then it reacts slowly. The + * weight value \e alpha may be set in the constructor or * with \e setAlpha() . */ -class ALVAR_EXPORT FilterRunningAverage : public Filter { +class ALVAR_EXPORT FilterRunningAverage : public Filter +{ protected: - double alpha; - bool breset; + double alpha; + bool breset; + public: - FilterRunningAverage(double _alpha=0.5) { breset=true; setAlpha(_alpha); } - void setAlpha(double _alpha) { alpha=std::max(std::min(_alpha,1.0),0.0); } - double getAlpha() { return alpha; } - double operator= (double _value) { return next(_value); } - virtual double next(double y); - virtual void reset(); + FilterRunningAverage(double _alpha = 0.5) + { + breset = true; + setAlpha(_alpha); + } + void setAlpha(double _alpha) + { + alpha = std::max(std::min(_alpha, 1.0), 0.0); + } + double getAlpha() + { + return alpha; + } + double operator=(double _value) + { + return next(_value); + } + virtual double next(double y); + virtual void reset(); }; /** - * \brief \e FilterDoubleExponentialSmoothing provides an weighted running average filter + * \brief \e FilterDoubleExponentialSmoothing provides an weighted running + *average filter * * The \e FilterDoubleExponentialSmoothing calculates a simple running average * for both the \e average and \e slope using the weight values \e alpha and - * \e gamma. + * \e gamma. * \code * value = ((1.0-alpha) * (value + slope)) + (alpha * (double)y); * slope = ((1.0-gamma) * (slope)) + (gamma * (value - value_prev)); * \endcode - * If the weight values (\e alpha , \e gamma) are larger (near 1.0) - * the formulas react faster for changes and if they are near 0.0 + * If the weight values (\e alpha , \e gamma) are larger (near 1.0) + * the formulas react faster for changes and if they are near 0.0 * then the reaction is slower. The weight values \e alpha and \e gamma * may be set in the constructor or with \e setAlpha() and \e setGamma() . */ -class ALVAR_EXPORT FilterDoubleExponentialSmoothing : public FilterRunningAverage { +class ALVAR_EXPORT FilterDoubleExponentialSmoothing + : public FilterRunningAverage +{ protected: - double gamma; - double slope; + double gamma; + double slope; + public: - FilterDoubleExponentialSmoothing(double _alpha=0.5, double _gamma=1.0) : FilterRunningAverage(_alpha) { - setGamma(_gamma); - } - void setGamma(double _gamma) { gamma=std::max(std::min(_gamma,1.0),0.0); } - double getGamma() { return gamma; } - double operator= (double _value) { return next(_value); } - virtual double next(double y); + FilterDoubleExponentialSmoothing(double _alpha = 0.5, double _gamma = 1.0) + : FilterRunningAverage(_alpha) + { + setGamma(_gamma); + } + void setGamma(double _gamma) + { + gamma = std::max(std::min(_gamma, 1.0), 0.0); + } + double getGamma() + { + return gamma; + } + double operator=(double _value) + { + return next(_value); + } + virtual double next(double y); }; /** @@ -203,37 +266,47 @@ class ALVAR_EXPORT FilterDoubleExponentialSmoothing : public FilterRunningAverag * */ template -class ALVAR_EXPORT FilterArray { +class ALVAR_EXPORT FilterArray +{ protected: - double *tmp; - std::vector arr; + double* tmp; + std::vector arr; + public: - FilterArray(int size) { - tmp = NULL; - SetSize(size); - } - ~FilterArray() { - delete [] tmp; - } - size_t GetSize() { - return arr.size(); - } - void SetSize(size_t size) { - if (tmp) delete [] tmp; - tmp = new double[size]; - arr.resize(size); - } - F &operator[](size_t i) { - return arr[i]; - } - const double *as_double_array(size_t start_i=0) { - for (size_t i=0; i #ifdef WIN32 - #include - #include - #include - #include +#include +#include +#include +#include #else - #include - #include - #include +#include +#include +#include #endif class Drawable { public: - Drawable(double _scale=1, double _r=1, double _g=1, double _b=1); + Drawable(double _scale = 1, double _r = 1, double _g = 1, double _b = 1); - void SetScale(double _scale); - void SetColor(double _r=1, double _g=1, double _b=1); - - virtual void Draw(); - virtual void DrawAxis(double scale, double color[3]); - - void SetGLMatTraQuat(double *tra, double *quat, bool flip=false); - void SetGLMatTraRod(double *tra, double *rod); + void SetScale(double _scale); + void SetColor(double _r = 1, double _g = 1, double _b = 1); - double scale; - double color[3]; - double gl_mat[16]; + virtual void Draw(); + virtual void DrawAxis(double scale, double color[3]); + + void SetGLMatTraQuat(double* tra, double* quat, bool flip = false); + void SetGLMatTraRod(double* tra, double* rod); + + double scale; + double color[3]; + double gl_mat[16]; protected: - double ax_len; + double ax_len; }; namespace GlutViewer { - void Draw(); - void Exit(); - void Start(int argc, char** argv, int w, int h, float r=300.0); - void Reshape(int w, int h); +void Draw(); +void Exit(); +void Start(int argc, char** argv, int w, int h, float r = 300.0); +void Reshape(int w, int h); - void DrawableClear(); - void DrawableAdd(Drawable* item); +void DrawableClear(); +void DrawableAdd(Drawable* item); - void DrawVr(); - void DrawAr(); - void DrawFloor(); - void DrawContent(); - void DrawAxis(float scale); - void Mouse(int button, int state, int x, int y); - void Motion(int x, int y); - void SetGlProjectionMatrix(double p[16]); - void SetGlModelviewMatrix(double p[16]); - void KeyCallback(int key, int x, int y); +void DrawVr(); +void DrawAr(); +void DrawFloor(); +void DrawContent(); +void DrawAxis(float scale); +void Mouse(int button, int state, int x, int y); +void Motion(int x, int y); +void SetGlProjectionMatrix(double p[16]); +void SetGlModelviewMatrix(double p[16]); +void KeyCallback(int key, int x, int y); - void SetVideo(const IplImage* _image); - void DrawVideo(); +void SetVideo(const IplImage* _image); +void DrawVideo(); - double GetXOffset(); - double GetYOffset(); -} +double GetXOffset(); +double GetYOffset(); +} // namespace GlutViewer #endif diff --git a/ar_track_alvar/include/ar_track_alvar/IntegralImage.h b/ar_track_alvar/include/ar_track_alvar/IntegralImage.h index 4d32262..e73c065 100644 --- a/ar_track_alvar/include/ar_track_alvar/IntegralImage.h +++ b/ar_track_alvar/include/ar_track_alvar/IntegralImage.h @@ -35,13 +35,14 @@ #include #include -namespace alvar { - -/** \brief Class for calculating "evenly spaced" integer indices for data sequence +namespace alvar +{ +/** \brief Class for calculating "evenly spaced" integer indices for data + * sequence * - * If we have a data sequence we want to step through in certain amount of steps, - * \e IntIndex can be used for iterating through the steps using fast integer implementation. - * This class is related to stride iterators. + * If we have a data sequence we want to step through in certain amount of + * steps, \e IntIndex can be used for iterating through the steps using fast + * integer implementation. This class is related to stride iterators. * * \section Usage * \code @@ -52,116 +53,130 @@ namespace alvar { * } * \endcode */ -class ALVAR_EXPORT IntIndex { +class ALVAR_EXPORT IntIndex +{ protected: - int index; - int step; - int step_remainder; - int estep; - int next_step; - int res; - int steps; - void update_next_step(); + int index; + int step; + int step_remainder; + int estep; + int next_step; + int res; + int steps; + void update_next_step(); + public: - /** \brief Create \e IntIndex for indexing \e _res elements in predefined amount of \e _steps . - * \param _res The number of elements in the data sequence we want to index. - * \param _steps The number of steps to use to cover the \e _res elements (\e _steps < \e _res) - */ - IntIndex(int _res, int _steps); - /** \brief Set the integer index to the "grid" value nearest to \e v . - */ - int operator=(int v); - /** \brief Take the next integer index step. */ - int next(); - /** \brief Get the index value */ - int get() const; - /** \brief How much the index will be increased with the next \e next() */ - int get_next_step() const; - /** \brief For testing have we reached the end. */ - int end() const; + /** \brief Create \e IntIndex for indexing \e _res elements in predefined + * amount of \e _steps . \param _res The number of elements in the data + * sequence we want to index. \param _steps The number of steps to use to + * cover the \e _res elements (\e _steps < \e _res) + */ + IntIndex(int _res, int _steps); + /** \brief Set the integer index to the "grid" value nearest to \e v . + */ + int operator=(int v); + /** \brief Take the next integer index step. */ + int next(); + /** \brief Get the index value */ + int get() const; + /** \brief How much the index will be increased with the next \e next() */ + int get_next_step() const; + /** \brief For testing have we reached the end. */ + int end() const; }; - -/** \brief \e IntegralImage is used for calculating rectangular image sums and averages rapidly +/** \brief \e IntegralImage is used for calculating rectangular image sums and + * averages rapidly * - * The integral images are based on making intermediate representation of the image. Using this - * approach the sum/average of rectangular area can be calculated using only four references for - * the integral image. The integral images are commonly used with HAAR-like features. + * The integral images are based on making intermediate representation of the + * image. Using this approach the sum/average of rectangular area can be + * calculated using only four references for the integral image. The integral + * images are commonly used with HAAR-like features. * - * The \e IntegralImage should be used when we need to a lot of sum/average calculations for - * the same image. - * - * \section References - * - Konstantinos G. Derpanis (2007). Integral image-based representations. Department of Computer - * Science and Engineering, York University. - * - Viola, P. & Jones, M. (2001). Rapid object detection using a boosted cascade of simple - * features. In IEEE Computer Vision and Pattern Recognition (pp. I:511-518). + * The \e IntegralImage should be used when we need to a lot of sum/average + * calculations for the same image. + * + * \section References + * - Konstantinos G. Derpanis (2007). Integral image-based representations. + * Department of Computer Science and Engineering, York University. + * - Viola, P. & Jones, M. (2001). Rapid object detection using a boosted + * cascade of simple features. In IEEE Computer Vision and Pattern Recognition + * (pp. I:511-518). */ -class ALVAR_EXPORT IntegralImage { - IplImage *sum; +class ALVAR_EXPORT IntegralImage +{ + IplImage* sum; + public: - IntegralImage(); - ~IntegralImage(); - /** \brief Update integral image for the given image. - * \param gray The original grayscale image we want analyze - */ - void Update(IplImage *gray); - /** \brief Calculate the sum for the given rectangular area in the image. - * \param rect The rectancle - * \param count If this parameter is not 0 it is filled with number of pixels in the rectangle. - */ - double GetSum(CvRect &rect, int *count=0); - /** \brief Calculate the average for the given rectangular area in the image. */ - double GetAve(CvRect &rect); - /** \brief Get a sub-image using integral image representation. - * \param rect The rectangle we want to get the sub-image from - * \param sub The image where the sub-image is generated. Note, the desired resolution is defined by \e sub. - * - * Get an image \e sub with a predefined resolution from the given - * rectangular area \e rect . In practice the \e sub is filled by - * getting the average with \e GetAve() for every pixel area. - */ - void GetSubimage(const CvRect &rect, IplImage *sub); + IntegralImage(); + ~IntegralImage(); + /** \brief Update integral image for the given image. + * \param gray The original grayscale image we want analyze + */ + void Update(IplImage* gray); + /** \brief Calculate the sum for the given rectangular area in the image. + * \param rect The rectancle + * \param count If this parameter is not 0 it is filled with number of pixels + * in the rectangle. + */ + double GetSum(CvRect& rect, int* count = 0); + /** \brief Calculate the average for the given rectangular area in the image. + */ + double GetAve(CvRect& rect); + /** \brief Get a sub-image using integral image representation. + * \param rect The rectangle we want to get the sub-image from + * \param sub The image where the sub-image is generated. Note, the desired + * resolution is defined by \e sub. + * + * Get an image \e sub with a predefined resolution from the given + * rectangular area \e rect . In practice the \e sub is filled by + * getting the average with \e GetAve() for every pixel area. + */ + void GetSubimage(const CvRect& rect, IplImage* sub); }; -/** \brief \e IntegralGradient is used for calculating rectangular image gradients rapidly +/** \brief \e IntegralGradient is used for calculating rectangular image + * gradients rapidly * - * We calculate \e IntegralImage:s based on point normals for 4-pixel intersections - * (see Donahue1992). Using the integral images it is possible to make fast gradient - * calculations to any image rectangle. This approach is useful when we need to calculate many - * gradient rectangles for the same image. + * We calculate \e IntegralImage:s based on point normals for 4-pixel + * intersections (see Donahue1992). Using the integral images it is possible to + * make fast gradient calculations to any image rectangle. This approach is + * useful when we need to calculate many gradient rectangles for the same image. * * (See \e SampleIntegralImage.cpp) */ -class ALVAR_EXPORT IntegralGradient { +class ALVAR_EXPORT IntegralGradient +{ protected: - IplImage *normalx; - IplImage *normaly; - IntegralImage integx; - IntegralImage integy; - // Calculate point normals for 4-pixel intersection - // as described in Donahue1992 - void CalculatePointNormals(IplImage *gray); + IplImage* normalx; + IplImage* normaly; + IntegralImage integx; + IntegralImage integy; + // Calculate point normals for 4-pixel intersection + // as described in Donahue1992 + void CalculatePointNormals(IplImage* gray); + public: - IntegralGradient(); - ~IntegralGradient(); - /** \brief Update intermediate images for calculating the gradients to the given image. - * \param gray The original grayscale image we want analyze - */ - void Update(IplImage *gray); - /** \brief Calculate the gradient for the given rectangular area in the image. - * \param dirx Method fills in the x-component of the gradient here - * \param diry Method fills in the y-component of the gradient here - * \param count If this parameter is not 0 it is filled with number of pixels in the rectangle. - */ - void GetGradient(CvRect &rect, double *dirx, double *diry, int *count=0); - /** \brief Calculate the average gradient for the given rectangular area in the image. - * \param dirx Method fills in the x-component of the gradient here - * \param diry Method fills in the y-component of the gradient here - */ - void GetAveGradient(CvRect &rect, double *dirx, double *diry); + IntegralGradient(); + ~IntegralGradient(); + /** \brief Update intermediate images for calculating the gradients to the + * given image. \param gray The original grayscale image we want analyze + */ + void Update(IplImage* gray); + /** \brief Calculate the gradient for the given rectangular area in the image. + * \param dirx Method fills in the x-component of the gradient here + * \param diry Method fills in the y-component of the gradient here + * \param count If this parameter is not 0 it is filled with number of pixels + * in the rectangle. + */ + void GetGradient(CvRect& rect, double* dirx, double* diry, int* count = 0); + /** \brief Calculate the average gradient for the given rectangular area in + * the image. \param dirx Method fills in the x-component of the gradient here + * \param diry Method fills in the y-component of the gradient here + */ + void GetAveGradient(CvRect& rect, double* dirx, double* diry); }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/Kalman.h b/ar_track_alvar/include/ar_track_alvar/Kalman.h index cfd3a09..c1597dc 100644 --- a/ar_track_alvar/include/ar_track_alvar/Kalman.h +++ b/ar_track_alvar/include/ar_track_alvar/Kalman.h @@ -26,287 +26,333 @@ /** * \file Kalman.h - * + * * \brief This file implements a versatile Kalman filter. */ #include "Alvar.h" -#include "cxcore.h" - -namespace alvar { +#include +namespace alvar +{ /** \brief Core implementation for Kalman sensor */ -class ALVAR_EXPORT KalmanSensorCore { - friend class KalmanVisualize; +class ALVAR_EXPORT KalmanSensorCore +{ + friend class KalmanVisualize; + protected: - int n; - int m; - CvMat *H_trans; - CvMat *z_pred; - CvMat *z_residual; - CvMat *x_gain; + int n; + int m; + cv::Mat H_trans; + cv::Mat z_pred; + cv::Mat z_residual; + cv::Mat x_gain; + public: - /** \brief Latest measurement vector (m*1) */ - CvMat *z; - /** \brief The matrix (m*n) mapping Kalman state vector into this sensor's measurements vector */ - CvMat *H; - /** \brief The matrix (n*m) containing Kalman gain (something between 0 and H^-1). - * In this core-implementation we assume this to be precalculated. In \e KalmanSensor this is updated using \e update_K . - */ - CvMat *K; - /** \brief Copy constructor */ - KalmanSensorCore(const KalmanSensorCore &k); - /** - * \brief Constructor - * \param _n The number of items in the Kalman state vector - * \param _m The number of measurements given by this sensor - */ - KalmanSensorCore(int _n, int _m); - /** \brief Destructor */ - ~KalmanSensorCore(); - /** \brief Accessor for n */ - int get_n() { return n; } - /** \brief Accessor for m */ - int get_m() { return m; } - /** \brief Method for updating the state estimate x - * This is called from \e predict_update() of \e Kalman. - * In \e KalmanSensorCore and in \e KalmanSensor this update is made linearly - * but \e KalmanSensorEkf will override this method to use unlinear estimation. - */ - virtual void update_x(CvMat *x_pred, CvMat *x); + /** \brief Latest measurement vector (m*1) */ + cv::Mat z; + /** \brief The matrix (m*n) mapping Kalman state vector into this sensor's + * measurements vector */ + cv::Mat H; + /** \brief The matrix (n*m) containing Kalman gain (something between 0 and + * H^-1). In this core-implementation we assume this to be precalculated. In + * \e KalmanSensor this is updated using \e update_K . + */ + cv::Mat K; + /** \brief Copy constructor */ + KalmanSensorCore(const KalmanSensorCore& k); + /** + * \brief Constructor + * \param _n The number of items in the Kalman state vector + * \param _m The number of measurements given by this sensor + */ + KalmanSensorCore(int _n, int _m); + /** \brief Destructor */ + ~KalmanSensorCore(); + /** \brief Accessor for n */ + int get_n() + { + return n; + } + /** \brief Accessor for m */ + int get_m() + { + return m; + } + /** \brief Method for updating the state estimate x + * This is called from \e predict_update() of \e Kalman. + * In \e KalmanSensorCore and in \e KalmanSensor this update is made linearly + * but \e KalmanSensorEkf will override this method to use unlinear + * estimation. + */ + virtual void update_x(const cv::Mat& x_pred, cv::Mat& x); }; /** \brief Core implementation for Kalman */ -class ALVAR_EXPORT KalmanCore { - friend class KalmanVisualize; +class ALVAR_EXPORT KalmanCore +{ + friend class KalmanVisualize; + protected: - int n; - //CvMat *x_pred; - CvMat *F_trans; - virtual void predict_x(unsigned long tick); -public: - /** \brief The Kalman state vector (n*1) */ - CvMat *x; - /** \brief The matrix (n*n) containing the transition model for the internal state. */ - CvMat *F; - /** \brief Copy constructor */ - KalmanCore(const KalmanCore &s); - /** - * \brief Constructor - * \param _n The number of items in the Kalman state vector - */ - KalmanCore(int _n); - /** \brief Destructor */ - ~KalmanCore(); - /** \brief Accessor for n */ - int get_n() { return n; } - /** \brief Predict the Kalman state vector for the given time step . - * x_pred = F * x - */ - virtual CvMat *predict(); - /** \brief Predict the Kalman state vector and update the state using the constant Kalman gain. - * x = x_pred + K* ( z - H*x_pred) - */ - CvMat *predict_update(KalmanSensorCore *sensor); + int n; + // cv::Mat x_pred; + cv::Mat F_trans; + virtual void predict_x(unsigned long tick); - /** \brief Predicted state, TODO: should be protected?! */ - CvMat *x_pred; +public: + /** \brief The Kalman state vector (n*1) */ + cv::Mat x; + /** \brief The matrix (n*n) containing the transition model for the internal + * state. */ + cv::Mat F; + /** \brief Copy constructor */ + KalmanCore(const KalmanCore& s); + /** + * \brief Constructor + * \param _n The number of items in the Kalman state vector + */ + KalmanCore(int _n); + /** \brief Destructor */ + ~KalmanCore(); + /** \brief Accessor for n */ + int get_n() + { + return n; + } + /** \brief Predict the Kalman state vector for the given time step . + * x_pred = F * x + */ + virtual cv::Mat& predict(); + /** \brief Predict the Kalman state vector and update the state using the + * constant Kalman gain. x = x_pred + K* ( z - H*x_pred) + */ + cv::Mat& predict_update(KalmanSensorCore* sensor); + /** \brief Predicted state, TODO: should be protected?! */ + cv::Mat x_pred; }; /** \brief Kalman sensor implementation */ -class ALVAR_EXPORT KalmanSensor : public KalmanSensorCore { +class ALVAR_EXPORT KalmanSensor : public KalmanSensorCore +{ protected: - CvMat *R_tmp; - CvMat *P_tmp; + cv::Mat R_tmp; + cv::Mat P_tmp; + public: - /** \brief The covariance matrix for the observation noise */ - CvMat *R; - /** \brief Copy constructor */ - KalmanSensor(const KalmanSensor &k); - /** - * \brief Constructor - * \param _n The number of items in the Kalman state vector - * \param _m The number of measurements given by this sensor - */ - KalmanSensor(int n, int _m); - /** \brief Destructor */ - ~KalmanSensor(); - /** \brief Method for updating how the Kalman state vector is mapped into this sensor's measurements vector. - * This is called from \e predict_update() of \e Kalman. - * Please override this method if you want this mapping to change on the run (e.g. based on time?). - */ - virtual void update_H(CvMat *x_pred) {} - /** \brief Method for updating the Kalman gain. - * This is called from \e predict_update() of \e Kalman. - */ - virtual void update_K(CvMat *P_pred); - /** \brief Method for updating the error covariance matrix describing the accuracy of the state estimate. - * This is called from \e predict_update() of \e Kalman. - */ - virtual void update_P(CvMat *P_pred, CvMat *P); + /** \brief The covariance matrix for the observation noise */ + cv::Mat R; + /** \brief Copy constructor */ + KalmanSensor(const KalmanSensor& k); + /** + * \brief Constructor + * \param _n The number of items in the Kalman state vector + * \param _m The number of measurements given by this sensor + */ + KalmanSensor(int n, int _m); + /** \brief Destructor */ + ~KalmanSensor(); + /** \brief Method for updating how the Kalman state vector is mapped into + * this sensor's measurements vector. This is called from \e predict_update() + * of \e Kalman. Please override this method if you want this mapping to + * change on the run (e.g. based on time?). + */ + virtual void update_H(const cv::Mat& x_pred) + { + } + /** \brief Method for updating the Kalman gain. + * This is called from \e predict_update() of \e Kalman. + */ + virtual void update_K(const cv::Mat& P_pred); + /** \brief Method for updating the error covariance matrix describing the + * accuracy of the state estimate. This is called from \e predict_update() of + * \e Kalman. + */ + virtual void update_P(const cv::Mat& P_pred, cv::Mat& P); }; /** \brief Kalman implementation -* -* The Kalman filter provides an effective way of estimating a system/process recursively -* (http://www.cs.unc.edu/~welch/media/pdf/kalman_intro.pdf). In this implementation -* we have separated the Kalman class (\e KalmanCore, \e Kalman or \e KalmanEkf) -* from the sensor class (\e KalmanSensorCore, \e KalmanSensor or \e KalmanSensorEkf). -* The selected Kalman class contains always the latest estimation of the process. -* The estimation can be updated using one or several sensors. This implementation allows -* SCAAT approach, where there may be several sensors (and several controls) for each -* Kalman filter (See http://www.cs.unc.edu/~welch/scaat.html). -* -* Currently we have have three levels of implementations for both Kalman and Sensor (\e core, \e "normal" and \e EKF). -* -* The \e core implementations can be used for really fast bare-bones core implementations when -* we have precalculated and constant \e K. In systems where \e F, \e H, \e Q and \e R are constants -* the \e K will converge into a constant and it can be precalculated. Note, that the core -* implementation need to assume constant frame rate if \e F depends on the timestep between frames. -* Note also, that the core-classes cannot use \e EKF Jacobians because they change the \e H. -* -* The \e "normal" implementations are used when we have a linear \e F for \e Kalman, or linear \e H -* for \e KalmanSensor. The \e EKF implementations are used when we have non-linear function \e f() -* for \e KalmanEkf, or non-linear function \e h() for \e KalmanSensorEkf. -* -* Furthermore we have a class \e KalmanVisualize for visualizing the internal state of \e Kalman. -* -* Note, that now the \e KalmanControl is left out from this implementation. But it could be added -* using similar conventions as the \e KalmanSensor. -*/ -class ALVAR_EXPORT Kalman : public KalmanCore { + * + * The Kalman filter provides an effective way of estimating a system/process + * recursively (http://www.cs.unc.edu/~welch/media/pdf/kalman_intro.pdf). In + * this implementation we have separated the Kalman class (\e KalmanCore, \e + * Kalman or \e KalmanEkf) from the sensor class (\e KalmanSensorCore, \e + * KalmanSensor or \e KalmanSensorEkf). The selected Kalman class contains + * always the latest estimation of the process. The estimation can be updated + * using one or several sensors. This implementation allows SCAAT approach, + * where there may be several sensors (and several controls) for each Kalman + * filter (See http://www.cs.unc.edu/~welch/scaat.html). + * + * Currently we have have three levels of implementations for both Kalman and + * Sensor (\e core, \e "normal" and \e EKF). + * + * The \e core implementations can be used for really fast bare-bones core + * implementations when we have precalculated and constant \e K. In systems + * where \e F, \e H, \e Q and \e R are constants the \e K will converge into a + * constant and it can be precalculated. Note, that the core implementation need + * to assume constant frame rate if \e F depends on the timestep between frames. + * Note also, that the core-classes cannot use \e EKF Jacobians because they + * change the \e H. + * + * The \e "normal" implementations are used when we have a linear \e F for \e + * Kalman, or linear \e H for \e KalmanSensor. The \e EKF implementations are + * used when we have non-linear function \e f() for \e KalmanEkf, or non-linear + * function \e h() for \e KalmanSensorEkf. + * + * Furthermore we have a class \e KalmanVisualize for visualizing the internal + * state of \e Kalman. + * + * Note, that now the \e KalmanControl is left out from this implementation. But + * it could be added using similar conventions as the \e KalmanSensor. + */ +class ALVAR_EXPORT Kalman : public KalmanCore +{ protected: - int prev_tick; - void predict_P(); + int prev_tick; + void predict_P(); + public: - /** \brief The error covariance matrix describing the accuracy of the state estimate */ - CvMat *P; - /** \brief The covariance matrix for the process noise */ - CvMat *Q; - /** \brief The predicted error covariance matrix */ - CvMat *P_pred; - /** - * \brief Constructor - * \param n The number of items in the Kalman state vector - * \param _m The number of measurements given by this sensor - */ - Kalman(int _n); - /** \brief Destructor */ - ~Kalman(); - /** - * If your transition matrix F is based on time you need to override this method. - */ - virtual void update_F(unsigned long tick); - /** \brief Predict the Kalman state vector for the given time step - * This calls \e updateF for updating the transition matrix based on the real time step - * - * x_pred = F*x - * P_pred = F*P*trans(F) + Q - */ - CvMat *predict(unsigned long tick); - /** \brief Predict the Kalman state vector for the given time step and update the state using the Kalman gain. - * - Calls first the predict to ensure that the prediction is based on same timestep as the update - * - K = P_pred * trans(H) * inv(H*P_pred*trans(H) + R) - * - x = x_pred + K* ( z - H*x_pred) - * - P = (I - K*H) * P_pred - */ - CvMat *predict_update(KalmanSensor *sensor, unsigned long tick); - /** \brief Helper method. */ - double seconds_since_update(unsigned long tick); + /** \brief The error covariance matrix describing the accuracy of the state + * estimate */ + cv::Mat P; + /** \brief The covariance matrix for the process noise */ + cv::Mat Q; + /** \brief The predicted error covariance matrix */ + cv::Mat P_pred; + /** + * \brief Constructor + * \param n The number of items in the Kalman state vector + * \param _m The number of measurements given by this sensor + */ + Kalman(int _n); + /** \brief Destructor */ + ~Kalman(); + /** + * If your transition matrix F is based on time you need to override this + * method. + */ + virtual void update_F(unsigned long tick); + /** \brief Predict the Kalman state vector for the given time step + * This calls \e updateF for updating the transition matrix based on the real + * time step + * + * x_pred = F*x + * P_pred = F*P*trans(F) + Q + */ + cv::Mat& predict(unsigned long tick); + /** \brief Predict the Kalman state vector for the given time step and update + * the state using the Kalman gain. + * - Calls first the predict to ensure that the prediction is based on same + * timestep as the update + * - K = P_pred * trans(H) * inv(H*P_pred*trans(H) + R) + * - x = x_pred + K* ( z - H*x_pred) + * - P = (I - K*H) * P_pred + */ + cv::Mat& predict_update(KalmanSensor* sensor, unsigned long tick); + /** \brief Helper method. */ + double seconds_since_update(unsigned long tick); }; /** \brief Extended Kalman Filter (EKF) sensor implementation. -* -* Please override the pure virtual \e h() with the desired unlinear function. -* By default the \e upate_H calculates the Jacobian numerically, if you want other approach override -* also the \e update_H() -*/ -class ALVAR_EXPORT KalmanSensorEkf : public KalmanSensor { + * + * Please override the pure virtual \e h() with the desired unlinear function. + * By default the \e upate_H calculates the Jacobian numerically, if you want + * other approach override also the \e update_H() + */ +class ALVAR_EXPORT KalmanSensorEkf : public KalmanSensor +{ protected: - CvMat *delta; - CvMat *x_plus; - CvMat *x_minus; - CvMat *z_tmp1; - CvMat *z_tmp2; - virtual void h(CvMat *x_pred, CvMat *_z_pred) = 0; - virtual void update_H(CvMat *x_pred); - virtual void update_x(CvMat *x_pred, CvMat *x); + cv::Mat delta; + cv::Mat x_plus; + cv::Mat x_minus; + cv::Mat z_tmp1; + cv::Mat z_tmp2; + virtual void h(const cv::Mat& x_pred, const cv::Mat& _z_pred) = 0; + virtual void update_H(const cv::Mat& x_pred); + virtual void update_x(const cv::Mat& x_pred, cv::Mat& x); + public: - KalmanSensorEkf(const KalmanSensorEkf &k); - KalmanSensorEkf(int _n, int _m); - ~KalmanSensorEkf(); + KalmanSensorEkf(const KalmanSensorEkf& k); + KalmanSensorEkf(int _n, int _m); + ~KalmanSensorEkf(); }; /** \brief Extended Kalman Filter (EKF) implementation. -* -* Please override the pure virtual \e f() with the desired unlinear function. -* By default the \e upate_F calculates the Jacobian numerically, if you want other approach override -* also the \e update_F() -*/ -class ALVAR_EXPORT KalmanEkf : public Kalman { + * + * Please override the pure virtual \e f() with the desired unlinear function. + * By default the \e upate_F calculates the Jacobian numerically, if you want + * other approach override also the \e update_F() + */ +class ALVAR_EXPORT KalmanEkf : public Kalman +{ protected: - CvMat *delta; - CvMat *x_plus; - CvMat *x_minus; - CvMat *x_tmp1; - CvMat *x_tmp2; - virtual void f(CvMat *_x, CvMat *_x_pred, double dt) = 0; - virtual void update_F(unsigned long tick); - virtual void predict_x(unsigned long tick); + cv::Mat delta; + cv::Mat x_plus; + cv::Mat x_minus; + cv::Mat x_tmp1; + cv::Mat x_tmp2; + virtual void f(const cv::Mat& _x, const cv::Mat& _x_pred, double dt) = 0; + virtual void update_F(unsigned long tick); + virtual void predict_x(unsigned long tick); + public: - KalmanEkf(int _n); - ~KalmanEkf(); + KalmanEkf(int _n); + ~KalmanEkf(); }; /** \brief Class for visualizing Kalman filter Usage: \code - KalmanVisualize kvis(&kalman, &sensor); - ... - kvis.update_pre(); - kalman.predict_update(&sensor); - kvis.update_post(); - kvis.show(); + KalmanVisualize kvis(&kalman, &sensor); + ... + kvis.update_pre(); + kalman.predict_update(&sensor); + kvis.update_post(); + kvis.show(); \endcode */ -class ALVAR_EXPORT KalmanVisualize { - int n; - int m; - KalmanCore *kalman; - KalmanSensorCore *sensor; - Kalman *kalman_ext; - KalmanSensor *sensor_ext; - /** \brief Image collecting visualization of the Kalman filter */ - IplImage *img; - /** \brief Image to show */ - IplImage *img_legend; - /** \brief Image to show */ - IplImage *img_show; - /** \brief visualization scale before show */ - int img_scale; - /** \brief Add matrix to the image */ - void img_matrix(CvMat *mat, int top, int left); - /** \brief Init everything. Called from constructors. */ - void Init(); +class ALVAR_EXPORT KalmanVisualize +{ + int n; + int m; + KalmanCore* kalman; + KalmanSensorCore* sensor; + Kalman* kalman_ext; + KalmanSensor* sensor_ext; + /** \brief Image collecting visualization of the Kalman filter */ + cv::Mat img; + /** \brief Image to show */ + cv::Mat img_legend; + /** \brief Image to show */ + cv::Mat img_show; + /** \brief visualization scale before show */ + int img_scale; + /** \brief Add matrix to the image */ + void img_matrix(const cv::Mat& mat, int top, int left); + /** \brief Init everything. Called from constructors. */ + void Init(); + public: - /** \brief Helper method for outputting matrices (for debug purposes) */ - static void out_matrix(CvMat *m, char *name); - /** \brief Constructor for full Kalman implementation */ - KalmanVisualize(Kalman *_kalman, KalmanSensor *_sensor); - /** \brief Constructor for core Kalman implementation (not all visualizations possible) */ - KalmanVisualize(KalmanCore *_kalman, KalmanSensorCore *_sensor); - /** \brief Destructor */ - ~KalmanVisualize(); - /** \brief Update the visualization image - call this before the Kalman's predict_update */ - void update_pre(); - /** \brief Update the visualization image - call this after the Kalman's predict_update */ - void update_post(); - /** \brief Show the genrated visualization image */ - void show(); + /** \brief Helper method for outputting matrices (for debug purposes) */ + static void out_matrix(const cv::Mat& m, char* name); + /** \brief Constructor for full Kalman implementation */ + KalmanVisualize(Kalman* _kalman, KalmanSensor* _sensor); + /** \brief Constructor for core Kalman implementation (not all visualizations + * possible) */ + KalmanVisualize(KalmanCore* _kalman, KalmanSensorCore* _sensor); + /** \brief Destructor */ + ~KalmanVisualize(); + /** \brief Update the visualization image - call this before the Kalman's + * predict_update */ + void update_pre(); + /** \brief Update the visualization image - call this after the Kalman's + * predict_update */ + void update_post(); + /** \brief Show the genrated visualization image */ + void show(); }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/Line.h b/ar_track_alvar/include/ar_track_alvar/Line.h index d93ac4d..4acf004 100644 --- a/ar_track_alvar/include/ar_track_alvar/Line.h +++ b/ar_track_alvar/include/ar_track_alvar/Line.h @@ -33,42 +33,46 @@ #include "Alvar.h" #include "Util.h" -namespace alvar { - +namespace alvar +{ /** - * \brief Struct representing a line. The line is parametrized by its center and direction vector. + * \brief Struct representing a line. The line is parametrized by its center and + * direction vector. */ struct ALVAR_EXPORT Line { - /** - * \brief Constructor. - * \param params params[0] and params[1] are the x and y components of the direction vector, params[2] and params[3] are the x and y coordinates of the line center. - */ - Line(float params[4]); - Line() - {} - - /** - * \brief Line center. - */ - PointDouble c; // center - /** - * \brief Direction vector. - */ - PointDouble s; // direction vector + /** + * \brief Constructor. + * \param params params[0] and params[1] are the x and y components of the + * direction vector, params[2] and params[3] are the x and y coordinates of + * the line center. + */ + Line(const cv::Vec4f& params); + Line() + { + } + + /** + * \brief Line center. + */ + PointDouble c; // center + /** + * \brief Direction vector. + */ + PointDouble s; // direction vector }; /** - * \brief Fit lines to vector of points. + * \brief Fit lines to vector of points. * \param lines Resulting set of lines. * \param corners Index list of line breaks. * \param edge Vector of points (pixels) where the line is fitted. - * \param grey In the future, we may want to fit lines directly to grayscale image instead of thresholded edge. + * \param grey In the future, we may want to fit lines directly to grayscale + * image instead of thresholded edge. */ -int ALVAR_EXPORT FitLines(std::vector &lines, - const std::vector& corners, - const std::vector& edge, - IplImage *grey=0); +int ALVAR_EXPORT FitLines(std::vector& lines, + const std::vector& corners, + const std::vector& edge); /** * \brief Calculates an intersection point of two lines. @@ -78,15 +82,6 @@ int ALVAR_EXPORT FitLines(std::vector &lines, */ PointDouble ALVAR_EXPORT Intersection(const Line& l1, const Line& l2); -} // namespace alvar +} // namespace alvar #endif - - - - - - - - - diff --git a/ar_track_alvar/include/ar_track_alvar/Lock.h b/ar_track_alvar/include/ar_track_alvar/Lock.h index 546b780..ac5d184 100644 --- a/ar_track_alvar/include/ar_track_alvar/Lock.h +++ b/ar_track_alvar/include/ar_track_alvar/Lock.h @@ -34,8 +34,8 @@ #include "Mutex.h" #include "Uncopyable.h" -namespace alvar { - +namespace alvar +{ /** * \brief Lock for simplifying mutex handling. * @@ -46,29 +46,28 @@ namespace alvar { class ALVAR_EXPORT Lock : private Uncopyable { public: - /** - * \brief Constructor. - * - * \param mutex The mutex to lock. - */ - Lock(Mutex *mutex) - : mMutex(mutex) - { - mMutex->lock(); - } + /** + * \brief Constructor. + * + * \param mutex The mutex to lock. + */ + Lock(Mutex* mutex) : mMutex(mutex) + { + mMutex->lock(); + } - /** - * \brief Destructor. - */ - ~Lock() - { - mMutex->unlock(); - } + /** + * \brief Destructor. + */ + ~Lock() + { + mMutex->unlock(); + } private: - Mutex *mMutex; + Mutex* mMutex; }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/Marker.h b/ar_track_alvar/include/ar_track_alvar/Marker.h index 0313833..f0992c5 100644 --- a/ar_track_alvar/include/ar_track_alvar/Marker.h +++ b/ar_track_alvar/include/ar_track_alvar/Marker.h @@ -41,305 +41,399 @@ #include #include "filter/kinect_filtering.h" #include +#include -namespace alvar { +namespace alvar +{ +/** + * \brief Basic 2D \e Marker functionality. + * + * This class contains the basic \e Marker functionality for planar markers. + */ +class ALVAR_EXPORT Marker +{ +protected: + void VisualizeMarkerPose(cv::Mat& image, Camera* cam, + double visualize2d_points[12][2], + const cv::Scalar& color = CV_RGB(255, 0, 0)) const; + virtual void VisualizeMarkerContent(cv::Mat& image, Camera* cam, + double datatext_point[2], + double content_point[2]) const; + virtual void VisualizeMarkerError(cv::Mat& image, Camera* cam, + double errortext_point[2]) const; + bool UpdateContentBasic(std::vector& _marker_corners_img, + cv::Mat& gray, Camera* cam, int frame_no = 0); - /** - * \brief Basic 2D \e Marker functionality. +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + bool valid; + + /** \brief Compares the marker corners with the previous match. * - * This class contains the basic \e Marker functionality for planar markers. + * In some cases the tracking of the marker can be accepted solely based on + * this. Returns the marker orientation and an error value describing the + * pixel error relative to the marker diameter. + */ + void CompareCorners(std::vector& _marker_corners_img, + int* orientation, double* error); + /** \brief Compares the marker corners with the previous match. + */ + void CompareContent(std::vector& _marker_corners_img, + cv::Mat& gray, Camera* cam, int* orientation) const; + /** \brief Updates the \e marker_content from the image using \e Homography + */ + virtual bool UpdateContent(std::vector& _marker_corners_img, + cv::Mat& gray, Camera* cam, int frame_no = 0); + /** \brief Updates the markers \e pose estimation + */ + void UpdatePose(std::vector& _marker_corners_img, Camera* cam, + int orientation, int frame_no = 0, bool update_pose = true); + /** \brief Decodes the marker content. Please call \e UpdateContent before + * this. This virtual method is meant to be implemented by heirs. + */ + virtual bool DecodeContent(int* orientation); + + /** \brief Returns the content as a matrix */ - class ALVAR_EXPORT Marker + cv::Mat GetContent() const { - protected: - void VisualizeMarkerPose(IplImage *image, Camera *cam, double visualize2d_points[12][2], CvScalar color=CV_RGB(255,0,0)) const; - virtual void VisualizeMarkerContent(IplImage *image, Camera *cam, double datatext_point[2], double content_point[2]) const; - virtual void VisualizeMarkerError(IplImage *image, Camera *cam, double errortext_point[2]) const; - bool UpdateContentBasic(std::vector > &_marker_corners_img, IplImage *gray, Camera *cam, int frame_no = 0); - - public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - bool valid; - - /** \brief Compares the marker corners with the previous match. - * - * In some cases the tracking of the marker can be accepted solely based on this. - * Returns the marker orientation and an error value describing the pixel error - * relative to the marker diameter. - */ - void CompareCorners(std::vector > &_marker_corners_img, int *orientation, double *error); - /** \brief Compares the marker corners with the previous match. - */ - void CompareContent(std::vector > &_marker_corners_img, IplImage *gray, Camera *cam, int *orientation) const; - /** \brief Updates the \e marker_content from the image using \e Homography - */ - virtual bool UpdateContent(std::vector > &_marker_corners_img, IplImage *gray, Camera *cam, int frame_no = 0); - /** \brief Updates the markers \e pose estimation - */ - void UpdatePose(std::vector > &_marker_corners_img, Camera *cam, int orientation, int frame_no = 0, bool update_pose = true); - /** \brief Decodes the marker content. Please call \e UpdateContent before this. - * This virtual method is meant to be implemented by heirs. - */ - virtual bool DecodeContent(int *orientation); - - /** \brief Returns the content as a matrix - */ - CvMat *GetContent() const { - return marker_content; + return marker_content; + } + /** \brief Saves the marker as an image + */ + void SaveMarkerImage(const char* filename, int save_res = 0) const; + /** \brief Draw the marker filling the ROI in the given image + */ + void ScaleMarkerToImage(cv::Mat& image) const; + /** \brief Visualize the marker + */ + void Visualize(cv::Mat& image, Camera* cam, + const cv::Scalar& color = CV_RGB(255, 0, 0)) const; + /** \brief Method for resizing the marker dimensions */ + void SetMarkerSize(double _edge_length = 0, int _res = 0, double _margin = 0); + /** \brief Get edge length (to support different size markers */ + double GetMarkerEdgeLength() const + { + return edge_length; + } + /** \brief Destructor */ + ~Marker(); + /** \brief Default constructor + * \param _edge_length Length of the marker's edge in whatever units you are + * using (e.g. cm) \param _res The marker content resolution in pixels (this + * is actually \param _margin The marker margin resolution in pixels (The + * actual captured marker image has pixel resolution of _margin+_res+_margin) + */ + Marker(double _edge_length = 0, int _res = 0, double _margin = 0); + /** \brief Copy constructor */ + Marker(const Marker& m); + /** \brief Get id for this marker + * This is used e.g. in MarkerDetector to associate a marker id with + * an appropriate edge length. This method should be overwritten + * to return a suitable identification number for each marker type. + */ + virtual unsigned long GetId() const + { + return 0; + } + virtual void SetId(unsigned long _id){}; + + /** + * Returns the resolution (the number of square rows and columns) of the + * marker content area. The total number of content squares within the + * content area is resolution*resolution. + */ + int GetRes() const + { + return res; + } + + /** + * Returns the margin thickness, that is, the number of rows or columns of + * black squares surrounding the content area. + */ + double GetMargin() const + { + return margin; + } + + /** \brief The current marker \e Pose + */ + Pose pose; + /** \brief Get marker detection error estimate + * \param errors Flags indicating what error elements are combined + * The marker detection error can consist of several elements: + * MARGIN_ERROR is updated in \e UpdateContent and it indicates erroneous + * values inside the marginal area. DECODE_ERROR is updated in \e + * DecodeContent and it indicates erroneous values inside the actual marker + * content area. TRACK_ERROR is updated in \e MarkerDetector.Detect and it + * indicates the amount of tracking error returned from \e CompareCorners + */ + double GetError(int errors = (MARGIN_ERROR | DECODE_ERROR)) const + { + int count = 0; + double error = 0; + if (errors & MARGIN_ERROR) + { + error += margin_error; + count++; } - /** \brief Saves the marker as an image - */ - void SaveMarkerImage(const char *filename, int save_res = 0) const; - /** \brief Draw the marker filling the ROI in the given image - */ - void ScaleMarkerToImage(IplImage *image) const; - /** \brief Visualize the marker - */ - void Visualize(IplImage *image, Camera *cam, CvScalar color=CV_RGB(255,0,0)) const; - /** \brief Method for resizing the marker dimensions */ - void SetMarkerSize(double _edge_length = 0, int _res = 0, double _margin = 0); - /** \brief Get edge length (to support different size markers */ - double GetMarkerEdgeLength() const { return edge_length; } - /** \brief Destructor */ - ~Marker(); - /** \brief Default constructor - * \param _edge_length Length of the marker's edge in whatever units you are using (e.g. cm) - * \param _res The marker content resolution in pixels (this is actually - * \param _margin The marker margin resolution in pixels (The actual captured marker image has pixel resolution of _margin+_res+_margin) - */ - Marker(double _edge_length = 0, int _res = 0, double _margin = 0); - /** \brief Copy constructor */ - Marker(const Marker& m); - /** \brief Get id for this marker - * This is used e.g. in MarkerDetector to associate a marker id with - * an appropriate edge length. This method should be overwritten - * to return a suitable identification number for each marker type. - */ - virtual unsigned long GetId() const { return 0; } - virtual void SetId(unsigned long _id) {}; - - /** - * Returns the resolution (the number of square rows and columns) of the - * marker content area. The total number of content squares within the - * content area is resolution*resolution. - */ - int GetRes() const { return res; } - - /** - * Returns the margin thickness, that is, the number of rows or columns of - * black squares surrounding the content area. - */ - double GetMargin() const { return margin; } - - /** \brief The current marker \e Pose - */ - Pose pose; - /** \brief Get marker detection error estimate - * \param errors Flags indicating what error elements are combined - * The marker detection error can consist of several elements: - * MARGIN_ERROR is updated in \e UpdateContent and it indicates erroneous values inside the marginal area. - * DECODE_ERROR is updated in \e DecodeContent and it indicates erroneous values inside the actual marker content area. - * TRACK_ERROR is updated in \e MarkerDetector.Detect and it indicates the amount of tracking error returned from \e CompareCorners - */ - double GetError(int errors = (MARGIN_ERROR | DECODE_ERROR)) const { - int count = 0; - double error = 0; - if (errors & MARGIN_ERROR) {error+=margin_error; count++;} - if (errors & DECODE_ERROR) {error+=decode_error; count++;} - if (errors & TRACK_ERROR) {error+=track_error; count++;} - return error/count; + if (errors & DECODE_ERROR) + { + error += decode_error; + count++; } - /** \brief Set the marker error estimate */ - void SetError(int error_type, double value) { - if (error_type == MARGIN_ERROR) margin_error = value; - else if (error_type == DECODE_ERROR) decode_error = value; - else if (error_type == TRACK_ERROR) track_error = value; + if (errors & TRACK_ERROR) + { + error += track_error; + count++; } - static const int MARGIN_ERROR=1; - static const int DECODE_ERROR=2; - static const int TRACK_ERROR=4; - protected: - double margin_error; - double decode_error; - double track_error; - double edge_length; - int res; - double margin; - CvMat *marker_content; - - public: - - /** \brief Marker color points in marker coordinates */ - std::vector marker_points; - /** \brief Marker corners in marker coordinates */ - std::vector marker_corners; - /** \brief Marker corners in image coordinates */ - std::vector marker_corners_img; - /** \brief Marker points in image coordinates */ - std::vector ros_marker_points_img; - ar_track_alvar::ARCloud ros_corners_3D; - int ros_orientation; - /** \brief Samples to be used in figuring out min/max for thresholding */ - std::vector marker_margin_w; - /** \brief Samples to be used in figuring out min/max for thresholding */ - std::vector marker_margin_b; + return error / count; + } + /** \brief Set the marker error estimate */ + void SetError(int error_type, double value) + { + if (error_type == MARGIN_ERROR) + margin_error = value; + else if (error_type == DECODE_ERROR) + decode_error = value; + else if (error_type == TRACK_ERROR) + track_error = value; + } + static const int MARGIN_ERROR = 1; + static const int DECODE_ERROR = 2; + static const int TRACK_ERROR = 4; + +protected: + double margin_error; + double decode_error; + double track_error; + double edge_length; + int res; + double margin; + cv::Mat marker_content; + +public: + /** \brief Marker color points in marker coordinates */ + std::vector marker_points; + /** \brief Marker corners in marker coordinates */ + std::vector marker_corners; + /** \brief Marker corners in image coordinates */ + std::vector marker_corners_img; + /** \brief Marker points in image coordinates */ + std::vector ros_marker_points_img; + ar_track_alvar::ARCloud ros_corners_3D; + int ros_orientation; + /** \brief Samples to be used in figuring out min/max for thresholding */ + std::vector marker_margin_w; + /** \brief Samples to be used in figuring out min/max for thresholding */ + std::vector marker_margin_b; #ifdef VISUALIZE_MARKER_POINTS - std::vector marker_allpoints_img; + std::vector marker_allpoints_img; #endif - }; +}; - /** - * \brief \e MarkerArtoolkit for using matrix markers similar with the ones used in ARToolkit. - */ - class ALVAR_EXPORT MarkerArtoolkit : public Marker +/** + * \brief \e MarkerArtoolkit for using matrix markers similar with the ones used + * in ARToolkit. + */ +class ALVAR_EXPORT MarkerArtoolkit : public Marker +{ +protected: + int default_res() + { + std::cout << "MarkerArtoolkit::default_res" << std::endl; + return 3; + } + double default_margin() { - protected: - int default_res() { std::cout<<"MarkerArtoolkit::default_res"<& _marker_corners_img, + cv::Mat& gray, Camera* cam); + +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + static const int MAX_MARKER_STRING_LEN = 2048; + enum MarkerContentType { - protected: - virtual void VisualizeMarkerContent(IplImage *image, Camera *cam, double datatext_point[2], double content_point[2]) const; - void DecodeOrientation(int *error, int *total, int *orientation); - int DecodeCode(int orientation, BitsetExt *bs, int *erroneous, int *total, unsigned char* content_type); - void Read6bitStr(BitsetExt *bs, char *s, size_t s_max_len); - void Add6bitStr(BitsetExt *bs, char *s); - int UsableDataBits(int marker_res, int hamming); - bool DetectResolution(std::vector > &_marker_corners_img, IplImage *gray, Camera *cam); - - public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - static const int MAX_MARKER_STRING_LEN=2048; - enum MarkerContentType { - MARKER_CONTENT_TYPE_NUMBER, - MARKER_CONTENT_TYPE_STRING, - MARKER_CONTENT_TYPE_FILE, - MARKER_CONTENT_TYPE_HTTP - }; - unsigned char content_type; - - /** \brief \e MarkerData content can be presented either as number (\e MARKER_CONTENT_TYPE_NUMBER) or string */ - union { - unsigned long id; - char str[MAX_MARKER_STRING_LEN]; - } data; - - /** \brief Default constructor - * \param _edge_length Length of the marker's edge in whatever units you are using (e.g. cm) - * \param _res The marker content resolution in pixels (this is actually - * \param _margin The marker margin resolution in pixels (The actual captured marker image has pixel resolution of _margin+_res+_margin) - */ - MarkerData(double _edge_length = 0, int _res = 0, double _margin = 0) : - Marker(_edge_length, _res, (_margin?_margin:2)) - { - } - /** \brief Get ID for recognizing this marker */ - unsigned long GetId() const { return data.id; } - /** \brief Set the ID */ - void SetId(unsigned long _id) { data.id = _id; } - /** \brief Updates the \e marker_content from the image using \e Homography - * Compared to the basic implementation in \e Marker this will also detect the marker - * resolution automatically when the marker resolution is specified to be 0. - */ - virtual bool UpdateContent(std::vector > &_marker_corners_img, IplImage *gray, Camera *cam, int frame_no = 0); - /** \brief \e DecodeContent should be called after \e UpdateContent to fill \e content_type, \e decode_error and \e data - */ - bool DecodeContent(int *orientation); - /** \brief Updates the \e marker_content by "encoding" the given parameters - */ - void SetContent(MarkerContentType content_type, unsigned long id, const char *str, bool force_strong_hamming=false, bool verbose=false); + MARKER_CONTENT_TYPE_NUMBER, + MARKER_CONTENT_TYPE_STRING, + MARKER_CONTENT_TYPE_FILE, + MARKER_CONTENT_TYPE_HTTP }; + unsigned char content_type; - /** \brief Iterator type for traversing templated Marker vector without the template. - */ - class ALVAR_EXPORT MarkerIterator : public std::iterator { - public: - virtual bool operator==(const MarkerIterator& other) = 0; - virtual bool operator!=(const MarkerIterator& other) = 0; - virtual MarkerIterator& operator++() = 0; - virtual const Marker* operator*() = 0; - virtual const Marker* operator->() = 0; - virtual MarkerIterator& reset() = 0; - - void *_data; - }; + /** \brief \e MarkerData content can be presented either as number (\e + * MARKER_CONTENT_TYPE_NUMBER) or string */ + union + { + unsigned long id; + char str[MAX_MARKER_STRING_LEN]; + } data; - /** \brief Iterator implementation for traversing templated Marker vector - * without the template. - * \param T T extends Marker + /** \brief Default constructor + * \param _edge_length Length of the marker's edge in whatever units you are + * using (e.g. cm) \param _res The marker content resolution in pixels (this + * is actually \param _margin The marker margin resolution in pixels (The + * actual captured marker image has pixel resolution of _margin+_res+_margin) */ - template - class ALVAR_EXPORT MarkerIteratorImpl : public MarkerIterator { - public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - typedef typename std::vector >::const_iterator const_iterator_aligntype; - MarkerIteratorImpl(const_iterator_aligntype i) : _begin(i), _i(i) { - _data = this; - } + MarkerData(double _edge_length = 0, int _res = 0, double _margin = 0) + : Marker(_edge_length, _res, (_margin ? _margin : 2)) + { + } + /** \brief Get ID for recognizing this marker */ + unsigned long GetId() const + { + return data.id; + } + /** \brief Set the ID */ + void SetId(unsigned long _id) + { + data.id = _id; + } + /** \brief Updates the \e marker_content from the image using \e Homography + * Compared to the basic implementation in \e Marker this will also detect the + * marker resolution automatically when the marker resolution is specified to + * be 0. + */ + virtual bool UpdateContent(std::vector& _marker_corners_img, + cv::Mat& gray, Camera* cam, int frame_no = 0); + /** \brief \e DecodeContent should be called after \e UpdateContent to fill \e + * content_type, \e decode_error and \e data + */ + bool DecodeContent(int* orientation); + /** \brief Updates the \e marker_content by "encoding" the given parameters + */ + void SetContent(MarkerContentType content_type, unsigned long id, + const char* str, bool force_strong_hamming = false, + bool verbose = false); +}; - ~MarkerIteratorImpl() {} +/** \brief Iterator type for traversing templated Marker vector without the + * template. + */ +class ALVAR_EXPORT MarkerIterator + : public std::iterator +{ +public: + virtual bool operator==(const MarkerIterator& other) = 0; + virtual bool operator!=(const MarkerIterator& other) = 0; + virtual MarkerIterator& operator++() = 0; + virtual const Marker* operator*() = 0; + virtual const Marker* operator->() = 0; + virtual MarkerIterator& reset() = 0; - // The assignment and relational operators are straightforward - MarkerIteratorImpl& operator=(const MarkerIteratorImpl& other) { - _i = other._i; - return(*this); - } + void* _data; +}; - bool operator==(const MarkerIterator& other) { - MarkerIteratorImpl* pother = (MarkerIteratorImpl*)other._data; - return (_i == pother->_i); - } +/** \brief Iterator implementation for traversing templated Marker vector + * without the template. + * \param T T extends Marker + */ +template +class ALVAR_EXPORT MarkerIteratorImpl : public MarkerIterator +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + typedef typename std::vector >::const_iterator + const_iterator_aligntype; + MarkerIteratorImpl(const_iterator_aligntype i) : _begin(i), _i(i) + { + _data = this; + } - bool operator!=(const MarkerIterator& other) { - MarkerIteratorImpl* pother = (MarkerIteratorImpl*)other._data; - return (_i != pother->_i); - } + ~MarkerIteratorImpl() + { + } - MarkerIterator& operator++() { - _i++; - return(*this); - } + // The assignment and relational operators are straightforward + MarkerIteratorImpl& operator=(const MarkerIteratorImpl& other) + { + _i = other._i; + return (*this); + } - const Marker* operator*() { - return &(*_i); - } + bool operator==(const MarkerIterator& other) + { + MarkerIteratorImpl* pother = (MarkerIteratorImpl*)other._data; + return (_i == pother->_i); + } - const Marker* operator->() { - return &(*_i); - } + bool operator!=(const MarkerIterator& other) + { + MarkerIteratorImpl* pother = (MarkerIteratorImpl*)other._data; + return (_i != pother->_i); + } - MarkerIterator& reset() { - _i = _begin; - return (*this); - } + MarkerIterator& operator++() + { + _i++; + return (*this); + } - private: - const_iterator_aligntype _begin; - const_iterator_aligntype _i; - }; + const Marker* operator*() + { + return &(*_i); + } + + const Marker* operator->() + { + return &(*_i); + } + + MarkerIterator& reset() + { + _i = _begin; + return (*this); + } + +private: + const_iterator_aligntype _begin; + const_iterator_aligntype _i; +}; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/MarkerDetector.h b/ar_track_alvar/include/ar_track_alvar/MarkerDetector.h index c00a2ad..5f8f816 100644 --- a/ar_track_alvar/include/ar_track_alvar/MarkerDetector.h +++ b/ar_track_alvar/include/ar_track_alvar/MarkerDetector.h @@ -46,135 +46,166 @@ using std::rotate; #include #include -namespace alvar { - +namespace alvar +{ /** - * \brief Templateless version of MarkerDetector. Please use MarkerDetector instead. + * \brief Templateless version of MarkerDetector. Please use MarkerDetector + * instead. */ -class ALVAR_EXPORT MarkerDetectorImpl { +class ALVAR_EXPORT MarkerDetectorImpl +{ protected: - virtual Marker* new_M(double _edge_length = 0, int _res = 0, double _margin = 0) = 0; - virtual void _markers_clear() = 0; - virtual void _markers_push_back(Marker *mn) = 0; - virtual size_t _markers_size() = 0; - virtual void _track_markers_clear() = 0; - virtual void _track_markers_push_back(Marker *mn) = 0; - virtual size_t _track_markers_size() = 0; - virtual Marker* _track_markers_at(size_t i) = 0; - virtual void _swap_marker_tables() = 0; - - Labeling* labeling; - - std::map map_edge_length; - double edge_length; - int res; - double margin; - bool detect_pose_grayscale; - - MarkerDetectorImpl(); - virtual ~MarkerDetectorImpl(); + virtual Marker* new_M(double _edge_length = 0, int _res = 0, + double _margin = 0) = 0; + virtual void _markers_clear() = 0; + virtual void _markers_push_back(Marker* mn) = 0; + virtual size_t _markers_size() = 0; + virtual void _track_markers_clear() = 0; + virtual void _track_markers_push_back(Marker* mn) = 0; + virtual size_t _track_markers_size() = 0; + virtual Marker* _track_markers_at(size_t i) = 0; + virtual void _swap_marker_tables() = 0; + + Labeling* labeling; + + std::map map_edge_length; + double edge_length; + int res; + double margin; + bool detect_pose_grayscale; + + MarkerDetectorImpl(); + virtual ~MarkerDetectorImpl(); public: - /** \brief Clear the markers that are tracked */ - void TrackMarkersReset(); - - /** \brief Add markers to be tracked - * Sometimes application or e.g. the \e MultiMarker implementation knows - * more about marker locations. Then this method can be used after \e Detect - * to indicate where additional trackable markers could be found. The - * \e DetectAdditional is called for tracking these. - */ - void TrackMarkerAdd(int id, PointDouble corners[4]); - - /** Set the default marker size to be used for all markers unless - * \param _edge_length Length of the marker's edge in whatever units you are using (e.g. cm) - * \param _res The marker content resolution in pixels. By default we use 5x5 markers. If you use 0 with \e MarkerData, the marker resolution is detected automatically. - * \param _margin The marker margin resolution in pixels (The actual captured marker image has pixel resolution of _margin+_res+_margin) - * - * \note The default marker content resolution (_res) of 5 can only detect marker ids from 0 to 255. For larger marker ids, you need to increase the marker content resolution accordingly. - */ - void SetMarkerSize(double _edge_length = 1, int _res = 5, double _margin = 2); - - /** Set marker size for specified marker id. This needs to be called after setting the default marker size. - * \param id The specified marker id - * \param _edge_length Length of the marker's edge in whatever units you are using (e.g. cm) - */ - void SetMarkerSizeForId(unsigned long id, double _edge_length = 1); - - /** Set marker size for specified marker id. This needs to be called after setting the default marker size. - * \param _detect_pose_grayscale Do we detect marker pose using grayscale optimization? - */ - void SetOptions(bool _detect_pose_grayscale=false); - - /** - * \brief \e Detect \e Marker 's from \e image - * - * The coordinates are little tricky. Here is a short summary. - * - * - Image (top-left origin). - * - The marker corners in the image are searched in sub-pixel accuracy in - * counter-clockwise order starting from "lower-left" corner. - * - The corresponding marker corners and marker points are in marker coordinates - * (x is to east, y is to north, and z is up from the marker) - * - The marker points are read from inside the margins starting from top-left - * and reading the bits first left-to-right one line at a time. - */ - int Detect(IplImage *image, - Camera *cam, - bool track=false, - bool visualize=false, - double max_new_marker_error=0.08, - double max_track_error=0.2, - LabelingMethod labeling_method=CVSEQ, - bool update_pose=true); - - int DetectAdditional(IplImage *image, Camera *cam, bool visualize=false, double max_track_error=0.2); + /** \brief Clear the markers that are tracked */ + void TrackMarkersReset(); + + /** \brief Add markers to be tracked + * Sometimes application or e.g. the \e MultiMarker implementation knows + * more about marker locations. Then this method can be used after \e Detect + * to indicate where additional trackable markers could be found. The + * \e DetectAdditional is called for tracking these. + */ + void TrackMarkerAdd(int id, PointDouble corners[4]); + + /** Set the default marker size to be used for all markers unless + * \param _edge_length Length of the marker's edge in whatever units you are + * using (e.g. cm) \param _res The marker content resolution in pixels. By + * default we use 5x5 markers. If you use 0 with \e MarkerData, the marker + * resolution is detected automatically. \param _margin The marker margin + * resolution in pixels (The actual captured marker image has pixel resolution + * of _margin+_res+_margin) + * + * \note The default marker content resolution (_res) of 5 can only detect + * marker ids from 0 to 255. For larger marker ids, you need to increase the + * marker content resolution accordingly. + */ + void SetMarkerSize(double _edge_length = 1, int _res = 5, double _margin = 2); + + /** Set marker size for specified marker id. This needs to be called after + * setting the default marker size. \param id The specified marker id \param + * _edge_length Length of the marker's edge in whatever units you are using + * (e.g. cm) + */ + void SetMarkerSizeForId(unsigned long id, double _edge_length = 1); + + /** Set marker size for specified marker id. This needs to be called after + * setting the default marker size. \param _detect_pose_grayscale Do we detect + * marker pose using grayscale optimization? + */ + void SetOptions(bool _detect_pose_grayscale = false); + + /** + * \brief \e Detect \e Marker 's from \e image + * + * The coordinates are little tricky. Here is a short summary. + * + * - Image (top-left origin). + * - The marker corners in the image are searched in sub-pixel accuracy in + * counter-clockwise order starting from "lower-left" corner. + * - The corresponding marker corners and marker points are in marker + * coordinates (x is to east, y is to north, and z is up from the marker) + * - The marker points are read from inside the margins starting from top-left + * and reading the bits first left-to-right one line at a time. + */ + int Detect(cv::Mat& image, Camera* cam, bool track = false, + bool visualize = false, double max_new_marker_error = 0.08, + double max_track_error = 0.2, + LabelingMethod labeling_method = CVSEQ, bool update_pose = true); + + int DetectAdditional(cv::Mat& image, Camera* cam, bool visualize = false, + double max_track_error = 0.2); }; /** * \brief \e MarkerDetector for detecting markers of type \e M * \param M Class that extends \e Marker */ -template +template class ALVAR_EXPORT MarkerDetector : public MarkerDetectorImpl { protected: - Marker* new_M(double _edge_length = 0, int _res = 0, double _margin = 0) { + Marker* new_M(double _edge_length = 0, int _res = 0, double _margin = 0) + { return new M(_edge_length, _res, _margin); } - void _markers_clear() { markers->clear(); } - void _markers_push_back(Marker *mn) { markers->push_back(*((M*)mn)); } - size_t _markers_size() { return markers->size(); } - void _track_markers_clear() { track_markers->clear(); } - void _track_markers_push_back(Marker *mn) { track_markers->push_back(*((M*)mn)); } - size_t _track_markers_size() { return track_markers->size(); } - Marker* _track_markers_at(size_t i) { return &track_markers->at(i); } - - void _swap_marker_tables() { - std::vector > *tmp_markers = markers; - markers = track_markers; - track_markers = tmp_markers; + void _markers_clear() + { + markers->clear(); + } + void _markers_push_back(Marker* mn) + { + markers->push_back(*((M*)mn)); + } + size_t _markers_size() + { + return markers->size(); + } + void _track_markers_clear() + { + track_markers->clear(); + } + void _track_markers_push_back(Marker* mn) + { + track_markers->push_back(*((M*)mn)); + } + size_t _track_markers_size() + { + return track_markers->size(); + } + Marker* _track_markers_at(size_t i) + { + return &track_markers->at(i); + } + + void _swap_marker_tables() + { + std::vector >* tmp_markers = markers; + markers = track_markers; + track_markers = tmp_markers; } public: + std::vector >* markers; + std::vector >* track_markers; - std::vector > *markers; - std::vector > *track_markers; - - /** Constructor */ - MarkerDetector() : MarkerDetectorImpl() { + /** Constructor */ + MarkerDetector() : MarkerDetectorImpl() + { markers = new std::vector >; track_markers = new std::vector >; - } + } - /** Destructor */ - ~MarkerDetector() { + /** Destructor */ + ~MarkerDetector() + { delete markers; delete track_markers; - } + } }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/MultiMarker.h b/ar_track_alvar/include/ar_track_alvar/MultiMarker.h index ade1d2d..cda20ad 100644 --- a/ar_track_alvar/include/ar_track_alvar/MultiMarker.h +++ b/ar_track_alvar/include/ar_track_alvar/MultiMarker.h @@ -36,163 +36,212 @@ #include "Camera.h" #include "Filter.h" #include "FileFormat.h" -#include +#include #include -namespace alvar { - +namespace alvar +{ /** * \brief Base class for using MultiMarker. */ -class ALVAR_EXPORT MultiMarker { - - +class ALVAR_EXPORT MultiMarker +{ private: - - bool SaveXML(const char* fname); - bool SaveText(const char* fname); - bool LoadText(const char* fname); - bool LoadXML(const char* fname); + bool SaveXML(const char* fname); + bool SaveText(const char* fname); + bool LoadText(const char* fname); + bool LoadXML(const char* fname); public: + // The marker information is stored in all three tables using // The marker information is stored in all three tables using + // The marker information is stored in all three tables using + // the indices-order given in constructor. // the indices-order given in constructor. + // the indices-order given in constructor. + // One idea is that the same 'pointcloud' could contain feature // One idea is that the same 'pointcloud' could contain feature - // points after marker-corner-points. This way they would be - // optimized simultaneously with marker corners... - std::map pointcloud; - std::vector marker_indices; // The marker id's to be used in marker field (first being the base) - std::vector marker_status; // 0: not in point cloud, 1: in point cloud, 2: used in GetPose() - std::vector< std::vector > rel_corners; //The coords of the master marker relative to each child marker in marker_indices - - int pointcloud_index(int marker_id, int marker_corner, bool add_if_missing=false); - int get_id_index(int id, bool add_if_missing=false); - - double _GetPose(MarkerIterator &begin, MarkerIterator &end, Camera* cam, Pose& pose, IplImage* image); - - int _SetTrackMarkers(MarkerDetectorImpl &marker_detector, Camera* cam, Pose& pose, IplImage *image); - int master_id; //The id of the first marker specified in the XML file - - - /** \brief Resets the multi marker. */ - virtual void Reset(); - - /** \brief Saves multi marker configuration to a text file. - * \param fname file name - * \param format FILE_FORMAT_TEXT (default) or FILE_FORMAT_XML (see doc/MultiMarker.xsd). - */ - bool Save(const char* fname, FILE_FORMAT format = FILE_FORMAT_DEFAULT); - - /** \brief Loads multi marker configuration from a text file. - * \param fname file name - * \param format FILE_FORMAT_TEXT (default) or FILE_FORMAT_XML (see doc/MultiMarker.xsd). - */ - bool Load(const char* fname, FILE_FORMAT format = FILE_FORMAT_DEFAULT); - - /** \brief Constructor. - \param indices Vector of marker codes that are included into multi marker. The first element defines origin. - */ - MultiMarker(std::vector& indices); - - /** \brief Default constructor */ - MultiMarker() {} - - /** \brief Calculates the pose of the camera from multi marker. Method uses the true 3D coordinates of - markers to get the initial pose and then optimizes it by minimizing the reprojection error. - - \param markers Vector of markers seen by camera. - \param cam Camera object containing internal calibration etc. - \param pose The resulting pose is stored here. - \param image If != 0 some visualizations are drawn. - */ - template - double GetPose(const std::vector >* markers, Camera* cam, Pose& pose, IplImage* image = 0) - { - MarkerIteratorImpl begin(markers->begin()); - MarkerIteratorImpl end(markers->end()); - return _GetPose(begin, end, - cam, pose, image); - } - - /** \brief Calls GetPose to obtain camera pose. - */ - template - double Update(const std::vector >* markers, Camera* cam, Pose& pose, IplImage* image = 0) - { - if(markers->size() < 1) return -1.0; - - return GetPose(markers, cam, pose, image); - } - - /** \brief Reserts the 3D point cloud of the markers. - */ - void PointCloudReset(); - - /** \brief Calculates 3D coordinates of marker corners relative to given pose (camera). - - \param edge_length The edge length of the marker. - \param pose Current pose of the camera. - \param corners Resulted 3D corner points are stored here. - */ - void PointCloudCorners3d(double edge_length, Pose &pose, CvPoint3D64f corners[4]); - - /** \brief Adds marker corners to 3D point cloud of multi marker. - \param marker_id Id of the marker to be added. - \param edge_length Edge length of the marker. - \param pose Current camera pose. - */ - void PointCloudAdd(int marker_id, double edge_length, Pose &pose); - - /** \brief Copies the 3D point cloud from other multi marker object. - */ - void PointCloudCopy(const MultiMarker *m); - - /** \brief Returns true if the are not points in the 3D opint cloud. - */ - bool PointCloudIsEmpty() { - return pointcloud.empty(); - } - - /** \brief Return the number of markers added using PointCloudAdd */ - size_t Size() { - return marker_indices.size(); - } - - /** \brief Returns 3D co-ordinates of one corner point of a marker. - * \param marker_id ID of the marker which corner point is returned. - * \param point Number of the corner point to return, from 0 to 3. - * \param x x co-ordinate is returned. - * \param y y co-ordinate is returned. - * \param z z co-ordinate is returned. - */ - void PointCloudGet(int marker_id, int point, - double &x, double &y, double &z); - - /** \brief Returns true if the marker is in the point cloud. - * \param marker_id ID of the marker. - */ - bool IsValidMarker(int marker_id); - - std::vector getIndices(){ - return marker_indices; - } - - int getMasterId(){ - return master_id; - } - - /** \brief Set new markers to be tracked for \e MarkerDetector - * Sometimes the \e MultiMarker implementation knows more about marker - * locations compared to \e MarkerDetector . Then this method can be - * used to reset the internal state of \e MarkerDetector for tracking - * also these markers. - */ - template - int SetTrackMarkers(MarkerDetector &marker_detector, Camera* cam, Pose& pose, IplImage *image=0) { + // One idea is that the same 'pointcloud' could contain feature + // points after marker-corner-points. This way they would be + // optimized simultaneously with marker corners... + std::map pointcloud; + std::vector marker_indices; // The marker id's to be used in marker + // field (first being the base) + std::vector marker_status; // 0: not in point cloud, 1: in point cloud, + // 2: used in GetPose() + std::vector > rel_corners; // The coords of the + // master marker relative + // to each child marker + // in marker_indices + + int pointcloud_index(int marker_id, int marker_corner, + bool add_if_missing = false); + int get_id_index(int id, bool add_if_missing = false); + + double _GetPose(MarkerIterator& begin, MarkerIterator& end, Camera* cam, + Pose& pose, cv::Mat& image); + double _GetPose(MarkerIterator& begin, MarkerIterator& end, Camera* cam, + Pose& pose) + { + cv::Mat empty_img; + return _GetPose(begin, end, cam, pose, empty_img); + } + + int _SetTrackMarkers(MarkerDetectorImpl& marker_detector, Camera* cam, + Pose& pose, cv::Mat& image); + int master_id; // The id of the first marker specified in the XML file + + /** \brief Resets the multi marker. */ + virtual void Reset(); + + /** \brief Saves multi marker configuration to a text file. + * \param fname file name + * \param format FILE_FORMAT_TEXT (default) or FILE_FORMAT_XML (see + * doc/MultiMarker.xsd). + */ + bool Save(const char* fname, FILE_FORMAT format = FILE_FORMAT_DEFAULT); + + /** \brief Loads multi marker configuration from a text file. + * \param fname file name + * \param format FILE_FORMAT_TEXT (default) or FILE_FORMAT_XML (see + * doc/MultiMarker.xsd). + */ + bool Load(const char* fname, FILE_FORMAT format = FILE_FORMAT_DEFAULT); + + /** \brief Constructor. + \param indices Vector of marker codes that are included into multi marker. + The first element defines origin. + */ + MultiMarker(std::vector& indices); + + /** \brief Default constructor */ + MultiMarker() + { + } + + /** \brief Calculates the pose of the camera from multi marker. Method uses + the true 3D coordinates of markers to get the initial pose and then + optimizes it by minimizing the reprojection error. + + \param markers Vector of markers seen by camera. + \param cam Camera object containing internal calibration etc. + \param pose The resulting pose is stored here. + \param image If != 0 some visualizations are drawn. + */ + template + double GetPose(const std::vector >* markers, + Camera* cam, Pose& pose, cv::Mat& image) + { + MarkerIteratorImpl begin(markers->begin()); + MarkerIteratorImpl end(markers->end()); + return _GetPose(begin, end, cam, pose, image); + } + template + double GetPose(const std::vector >* markers, + Camera* cam, Pose& pose) + { + MarkerIteratorImpl begin(markers->begin()); + MarkerIteratorImpl end(markers->end()); + return _GetPose(begin, end, cam, pose); + } + + /** \brief Calls GetPose to obtain camera pose. + */ + template + double Update(const std::vector >* markers, + Camera* cam, Pose& pose, cv::Mat& image) + { + if (markers->size() < 1) + return -1.0; + + return GetPose(markers, cam, pose, image); + } + template + double Update(const std::vector >* markers, + Camera* cam, Pose& pose) + { + if (markers->size() < 1) + return -1.0; + + return GetPose(markers, cam, pose); + } + + /** \brief Reserts the 3D point cloud of the markers. + */ + void PointCloudReset(); + + /** \brief Calculates 3D coordinates of marker corners relative to given pose + (camera). + + \param edge_length The edge length of the marker. + \param pose Current pose of the camera. + \param corners Resulted 3D corner points are stored here. + */ + void PointCloudCorners3d(double edge_length, Pose& pose, + cv::Point3d corners[4]); + + /** \brief Adds marker corners to 3D point cloud of multi marker. + \param marker_id Id of the marker to be added. + \param edge_length Edge length of the marker. + \param pose Current camera pose. + */ + void PointCloudAdd(int marker_id, double edge_length, Pose& pose); + + /** \brief Copies the 3D point cloud from other multi marker object. + */ + void PointCloudCopy(const MultiMarker* m); + + /** \brief Returns true if the are not points in the 3D opint cloud. + */ + bool PointCloudIsEmpty() + { + return pointcloud.empty(); + } + + /** \brief Return the number of markers added using PointCloudAdd */ + size_t Size() + { + return marker_indices.size(); + } + + /** \brief Returns 3D co-ordinates of one corner point of a marker. + * \param marker_id ID of the marker which corner point is returned. + * \param point Number of the corner point to return, from 0 to 3. + * \param x x co-ordinate is returned. + * \param y y co-ordinate is returned. + * \param z z co-ordinate is returned. + */ + void PointCloudGet(int marker_id, int point, double& x, double& y, double& z); + + /** \brief Returns true if the marker is in the point cloud. + * \param marker_id ID of the marker. + */ + bool IsValidMarker(int marker_id); + + std::vector getIndices() + { + return marker_indices; + } + + int getMasterId() + { + return master_id; + } + + /** \brief Set new markers to be tracked for \e MarkerDetector + * Sometimes the \e MultiMarker implementation knows more about marker + * locations compared to \e MarkerDetector . Then this method can be + * used to reset the internal state of \e MarkerDetector for tracking + * also these markers. + */ + template + int SetTrackMarkers(MarkerDetector& marker_detector, Camera* cam, + Pose& pose, cv::Mat& image) + { return _SetTrackMarkers(marker_detector, cam, pose, image); - } + } }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/MultiMarkerBundle.h b/ar_track_alvar/include/ar_track_alvar/MultiMarkerBundle.h index cf9d126..e6ca73b 100644 --- a/ar_track_alvar/include/ar_track_alvar/MultiMarkerBundle.h +++ b/ar_track_alvar/include/ar_track_alvar/MultiMarkerBundle.h @@ -35,69 +35,93 @@ #include "Optimization.h" #include -namespace alvar { - +namespace alvar +{ /** - * \brief Multi marker that uses bundle adjustment to refine the 3D positions of the markers (point cloud). + * \brief Multi marker that uses bundle adjustment to refine the 3D positions of + * the markers (point cloud). * * This can be initialized by using e.g. MultiMarkerAverage class. */ class ALVAR_EXPORT MultiMarkerBundle : public MultiMarker { protected: - int optimization_keyframes; - int optimization_markers; - double optimization_error; - bool optimizing; - std::vector camera_poses; // Estimated camera pose for every frame - std::map measurements; // - int measurements_index(int frame, int marker_id, int marker_corner) { - // hop return (int) (frame*marker_indices.size()*4)+(marker_id*4)+marker_corner; - return (int) (frame*marker_indices.size()*4)+(get_id_index(marker_id)*4)+marker_corner; - } + int optimization_keyframes; + int optimization_markers; + double optimization_error; + bool optimizing; + std::vector camera_poses; // Estimated camera pose for every frame + std::map measurements; // + int measurements_index(int frame, int marker_id, int marker_corner) + { + // hop return (int) + // (frame*marker_indices.size()*4)+(marker_id*4)+marker_corner; + return (int)(frame * marker_indices.size() * 4) + + (get_id_index(marker_id) * 4) + marker_corner; + } - void _MeasurementsAdd(MarkerIterator &begin, MarkerIterator &end, const Pose& camera_pose); + void _MeasurementsAdd(MarkerIterator& begin, MarkerIterator& end, + const Pose& camera_pose); public: + /** \brief Constructor. + \param indices Vector of marker codes that are included into multi marker. + The first element defines origin. + */ + MultiMarkerBundle(std::vector& indices); - /** \brief Constructor. - \param indices Vector of marker codes that are included into multi marker. The first element defines origin. - */ - MultiMarkerBundle(std::vector& indices); - - ~MultiMarkerBundle(); + ~MultiMarkerBundle(); - /** \brief Resets the measurements and camera poses that are stored for bundle adjustment. If something goes from in - optimization one will call this and acquire new measurements. - */ - void MeasurementsReset(); + /** \brief Resets the measurements and camera poses that are stored for bundle + adjustment. If something goes from in optimization one will call this and + acquire new measurements. + */ + void MeasurementsReset(); - double GetOptimizationError() { return optimization_error; } - int GetOptimizationKeyframes() { return optimization_keyframes; } - int GetOptimizationMarkers() { return optimization_markers; } - bool GetOptimizing() { return optimizing; } + double GetOptimizationError() + { + return optimization_error; + } + int GetOptimizationKeyframes() + { + return optimization_keyframes; + } + int GetOptimizationMarkers() + { + return optimization_markers; + } + bool GetOptimizing() + { + return optimizing; + } - /** \brief Adds new measurements that are used in bundle adjustment. - \param markers Vector of markers detected by MarkerDetector. - \param camera_pose Current camera pose. - */ - template - void MeasurementsAdd(const std::vector > *markers, const Pose& camera_pose) { - MarkerIteratorImpl begin(markers->begin()); - MarkerIteratorImpl end(markers->end()); - _MeasurementsAdd(begin, end, - camera_pose); - } + /** \brief Adds new measurements that are used in bundle adjustment. + \param markers Vector of markers detected by MarkerDetector. + \param camera_pose Current camera pose. + */ + template + void + MeasurementsAdd(const std::vector >* markers, + const Pose& camera_pose) + { + MarkerIteratorImpl begin(markers->begin()); + MarkerIteratorImpl end(markers->end()); + _MeasurementsAdd(begin, end, camera_pose); + } - /** \brief Runs the bundle adjustment optimization. - \param markers Vector of markers detected by MarkerDetector. - \param camera_pose Current camera pose. - \param max_iter Maximum number of iteration loops. - \param method The method that is applied inside optimization. Try Optimization::LEVENBERGMARQUARDT or Optimization::GAUSSNEWTON or Optmization::TUKEY_LM - */ //LEVENBERGMARQUARDT - bool Optimize(Camera *_cam, double stop, int max_iter, Optimization::OptimizeMethod method = Optimization::TUKEY_LM); //TUKEY_LM + /** \brief Runs the bundle adjustment optimization. + \param markers Vector of markers detected by MarkerDetector. + \param camera_pose Current camera pose. + \param max_iter Maximum number of iteration loops. + \param method The method that is applied inside optimization. Try + Optimization::LEVENBERGMARQUARDT or Optimization::GAUSSNEWTON or + Optmization::TUKEY_LM + */ //LEVENBERGMARQUARDT + bool Optimize(Camera* _cam, double stop, int max_iter, + Optimization::OptimizeMethod method = + Optimization::TUKEY_LM); // TUKEY_LM }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/MultiMarkerFiltered.h b/ar_track_alvar/include/ar_track_alvar/MultiMarkerFiltered.h index c6b4eac..91e23ed 100644 --- a/ar_track_alvar/include/ar_track_alvar/MultiMarkerFiltered.h +++ b/ar_track_alvar/include/ar_track_alvar/MultiMarkerFiltered.h @@ -33,8 +33,8 @@ #include "MultiMarker.h" -namespace alvar { - +namespace alvar +{ /** * \brief Multi marker that is constructed by first calculating the marker * poses directly relative to base marker and then filtering the results @@ -45,52 +45,56 @@ namespace alvar { class ALVAR_EXPORT MultiMarkerFiltered : public MultiMarker { protected: - static const int filter_buffer_max=15; - FilterMedian *pointcloud_filtered; + static const int filter_buffer_max = 15; + FilterMedian* pointcloud_filtered; - double _Update(MarkerIterator &begin, MarkerIterator &end, - Camera* cam, Pose& pose, IplImage* image); + double _Update(MarkerIterator& begin, MarkerIterator& end, Camera* cam, + Pose& pose, IplImage* image); public: - /** \brief Constructor. - \param indices Vector of marker codes that are included into multi marker. The first element defines origin. - */ - MultiMarkerFiltered(std::vector& indices); + /** \brief Constructor. + \param indices Vector of marker codes that are included into multi marker. + The first element defines origin. + */ + MultiMarkerFiltered(std::vector& indices); - /** \brief Destructor. */ - ~MultiMarkerFiltered(); + /** \brief Destructor. */ + ~MultiMarkerFiltered(); - /** \brief Updates the 3D point cloud by averaging the calculated results. - \param marker_id The id of the marker whose corner positions are updated. - \param edge_length The edge length of the marker. - \param pose Current camera pose that is used to estimate the marker position. - */ - void PointCloudAverage(int marker_id, double edge_length, Pose &pose); + /** \brief Updates the 3D point cloud by averaging the calculated results. + \param marker_id The id of the marker whose corner positions are updated. + \param edge_length The edge length of the marker. + \param pose Current camera pose that is used to estimate the marker + position. + */ + void PointCloudAverage(int marker_id, double edge_length, Pose& pose); - /** \brief Updates the marker field and camera pose. - \param markers Markers seen by the camera. - \param camera Camera object used to detect markers. - \param pose Current camera pose. This is also updated. - \param image If != 0 some visualization will be drawn. - */ - template - double Update(const std::vector* markers, Camera* cam, Pose& pose, IplImage* image = 0) - { - if(markers->size() < 1) return false; - MarkerIteratorImpl begin(markers->begin()); - MarkerIteratorImpl end(markers->end()); - return _Update(begin, end, - cam, pose, image); - } + /** \brief Updates the marker field and camera pose. + \param markers Markers seen by the camera. + \param camera Camera object used to detect markers. + \param pose Current camera pose. This is also updated. + \param image If != 0 some visualization will be drawn. + */ + template + double Update(const std::vector* markers, Camera* cam, Pose& pose, + IplImage* image = 0) + { + if (markers->size() < 1) + return false; + MarkerIteratorImpl begin(markers->begin()); + MarkerIteratorImpl end(markers->end()); + return _Update(begin, end, cam, pose, image); + } - /** - * \brief Reset the measurements - */ - void MeasurementsReset() { - pointcloud_filtered->reset(); - } + /** + * \brief Reset the measurements + */ + void MeasurementsReset() + { + pointcloud_filtered->reset(); + } }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/MultiMarkerInitializer.h b/ar_track_alvar/include/ar_track_alvar/MultiMarkerInitializer.h index 4ad4190..cd3645e 100644 --- a/ar_track_alvar/include/ar_track_alvar/MultiMarkerInitializer.h +++ b/ar_track_alvar/include/ar_track_alvar/MultiMarkerInitializer.h @@ -34,8 +34,8 @@ #include "MultiMarker.h" #include -namespace alvar { - +namespace alvar +{ /** * \brief Initializes multi marker by estimating their relative positions * from one or more images. @@ -50,79 +50,110 @@ namespace alvar { class ALVAR_EXPORT MultiMarkerInitializer : public MultiMarker { public: - /** - * \brief MarkerMeasurement that holds the maker id. - */ - class MarkerMeasurement : public Marker { - long _id; - public: - MarkerMeasurement() : globalPose(false) {} - bool globalPose; - unsigned long GetId() const { return _id; } - void SetId(unsigned long _id) { this->_id = _id; } - }; + /** + * \brief MarkerMeasurement that holds the maker id. + */ + class MarkerMeasurement : public Marker + { + long _id; + + public: + MarkerMeasurement() : globalPose(false) + { + } + bool globalPose; + unsigned long GetId() const + { + return _id; + } + void SetId(unsigned long _id) + { + this->_id = _id; + } + }; protected: - std::vector marker_detected; - std::vector > > measurements; - typedef std::vector > >::iterator MeasurementIterator; - FilterMedian *pointcloud_filtered; - int filter_buffer_min; - - bool updateMarkerPoses(std::vector > &markers, const Pose &pose); - void MeasurementsAdd(MarkerIterator &begin, MarkerIterator &end); + std::vector marker_detected; + std::vector > > + measurements; + typedef std::vector< + std::vector > >::iterator + MeasurementIterator; + FilterMedian* pointcloud_filtered; + int filter_buffer_min; + + bool updateMarkerPoses( + std::vector >& markers, + const Pose& pose); + void MeasurementsAdd(MarkerIterator& begin, MarkerIterator& end); public: - MultiMarkerInitializer(std::vector& indices, int filter_buffer_min = 4, int filter_buffer_max = 15); - ~MultiMarkerInitializer(); - - /** - * Adds a new measurement for marker field initialization. - * Each measurement should contain at least two markers. - * It does not matter which markers are visible, especially - * the zero marker does not have to be visible in every measurement. - * It suffices that there exists a 'path' from the zero marker - * to every other marker in the marker field. - * - * For example: - * - first measurement contains marker A and B. - * - second measurement containt markers ZERO and A. - * - * When Initialize is called, the system can first deduce the pose of A - * and then the pose of B. - */ - template - void MeasurementsAdd(const std::vector > *markers) { - MarkerIteratorImpl begin(markers->begin()); - MarkerIteratorImpl end(markers->end()); - MeasurementsAdd(begin, end); - } - - /** - * - */ - void MeasurementsReset(); - - /** - * Tries to deduce marker poses from measurements. - * - * Returns the number of initialized markers. - */ - int Initialize(Camera* cam); - - int getMeasurementCount() { return measurements.size(); } - - const std::vector >& getMeasurementMarkers(int measurement) { - return measurements[measurement]; - } - - double getMeasurementPose(int measurement, Camera *cam, Pose &pose) { - MarkerIteratorImpl m_begin(measurements[measurement].begin()); - MarkerIteratorImpl m_end(measurements[measurement].end()); - return _GetPose(m_begin, m_end, cam, pose, NULL); - } + MultiMarkerInitializer(std::vector& indices, int filter_buffer_min = 4, + int filter_buffer_max = 15); + ~MultiMarkerInitializer(); + + /** + * Adds a new measurement for marker field initialization. + * Each measurement should contain at least two markers. + * It does not matter which markers are visible, especially + * the zero marker does not have to be visible in every measurement. + * It suffices that there exists a 'path' from the zero marker + * to every other marker in the marker field. + * + * For example: + * - first measurement contains marker A and B. + * - second measurement containt markers ZERO and A. + * + * When Initialize is called, the system can first deduce the pose of A + * and then the pose of B. + */ + template + void + MeasurementsAdd(const std::vector >* markers) + { + MarkerIteratorImpl begin(markers->begin()); + MarkerIteratorImpl end(markers->end()); + MeasurementsAdd(begin, end); + } + + /** + * + */ + void MeasurementsReset(); + + /** + * Tries to deduce marker poses from measurements. + * + * Returns the number of initialized markers. + */ + int Initialize(Camera* cam); + + int getMeasurementCount() + { + return measurements.size(); + } + + const std::vector >& + getMeasurementMarkers(int measurement) + { + return measurements[measurement]; + } + + double getMeasurementPose(int measurement, Camera* cam, Pose& pose) + { + MarkerIteratorImpl m_begin( + measurements[measurement].begin()); + MarkerIteratorImpl m_end( + measurements[measurement].end()); + cv::Mat empty_img; + return _GetPose(m_begin, m_end, cam, pose); + } }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/Mutex.h b/ar_track_alvar/include/ar_track_alvar/Mutex.h index ef2437d..483d519 100644 --- a/ar_track_alvar/include/ar_track_alvar/Mutex.h +++ b/ar_track_alvar/include/ar_track_alvar/Mutex.h @@ -32,8 +32,8 @@ #include "Alvar.h" -namespace alvar { - +namespace alvar +{ class MutexPrivate; /** @@ -44,33 +44,33 @@ class MutexPrivate; class ALVAR_EXPORT Mutex { public: - /** - * \brief Constructor. - */ - Mutex(); + /** + * \brief Constructor. + */ + Mutex(); - /** - * \brief Destructor. - */ - ~Mutex(); + /** + * \brief Destructor. + */ + ~Mutex(); - /** - * \brief Locks the mutex. - * - * If the mutex is already locked by another thread, this method will - * block until the other thread unlocks the mutex. - */ - void lock(); + /** + * \brief Locks the mutex. + * + * If the mutex is already locked by another thread, this method will + * block until the other thread unlocks the mutex. + */ + void lock(); - /** - * \brief Unlocks the mutex. - */ - void unlock(); + /** + * \brief Unlocks the mutex. + */ + void unlock(); private: - MutexPrivate *d; + MutexPrivate* d; }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/Mutex_private.h b/ar_track_alvar/include/ar_track_alvar/Mutex_private.h index 99bd9bd..a4d76ca 100644 --- a/ar_track_alvar/include/ar_track_alvar/Mutex_private.h +++ b/ar_track_alvar/include/ar_track_alvar/Mutex_private.h @@ -24,21 +24,21 @@ #ifndef MUTEX_PRIVATE_H #define MUTEX_PRIVATE_H -namespace alvar { - +namespace alvar +{ class MutexPrivateData; class MutexPrivate { public: - MutexPrivate(); - ~MutexPrivate(); - void lock(); - void unlock(); + MutexPrivate(); + ~MutexPrivate(); + void lock(); + void unlock(); - MutexPrivateData *d; + MutexPrivateData* d; }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/Optimization.h b/ar_track_alvar/include/ar_track_alvar/Optimization.h index 4699329..bd9d0fa 100644 --- a/ar_track_alvar/include/ar_track_alvar/Optimization.h +++ b/ar_track_alvar/include/ar_track_alvar/Optimization.h @@ -25,114 +25,117 @@ #define OPTIMIZATION_H #include "Alvar.h" -#include +#include //#include - /** * \file Optimization.h * * \brief This file implements several optimization algorithms. */ -namespace alvar { - -/** - * \brief Non-linear optimization routines. There are three methods implemented that include Gauss-Newton, Levenberg-Marquardt and Tukey m-estimator. - * - */ -class ALVAR_EXPORT Optimization +namespace alvar +{ +/** + * \brief Non-linear optimization routines. There are three methods implemented + * that include Gauss-Newton, Levenberg-Marquardt and Tukey m-estimator. + * + */ +class ALVAR_EXPORT Optimization { - private: - - void *estimate_param; - CvMat *J; - CvMat *JtJ; - CvMat *W; - CvMat *diag; - CvMat *tmp; - CvMat *err; - CvMat *delta; - CvMat *x_plus; - CvMat *x_minus; - CvMat *x_tmp1; - CvMat *x_tmp2; - CvMat *tmp_par; - - double CalcTukeyWeight(double residual, double c); - double CalcTukeyWeightSimple(double residual, double c); - - double lambda; + void* estimate_param; + cv::Mat J; + cv::Mat JtJ; + cv::Mat W; + cv::Mat diag; + cv::Mat tmp; + cv::Mat err; + cv::Mat delta; + cv::Mat x_plus; + cv::Mat x_minus; + cv::Mat x_tmp1; + cv::Mat x_tmp2; + cv::Mat tmp_par; + + double CalcTukeyWeight(double residual, double c); + double CalcTukeyWeightSimple(double residual, double c); + + double lambda; public: - - /** - * \brief Selection between the algorithm used in optimization. Following should be noticed: - * \li GAUSSNEWTON - */ - enum OptimizeMethod - { - GAUSSNEWTON, - LEVENBERGMARQUARDT, - TUKEY_LM - }; - - /** - * \brief Constructor. - * \param n_params Number of parameters to be optimized. - * \param n_meas Number of measurements that are observed. - */ - Optimization(int n_params, int n_meas); - ~Optimization(); - - /** - * \brief Returns the current residual vector. - * \return Pointer to the residual vector. - */ - CvMat *GetErr() { return err; } - - /** - * \brief Pointer to the function that projects the state of the system to the measurements. - * \param state System parameters, e.g. camera parameterization in optical tracking. - * \param projection The system state projection is stored here. E.g image measurements in optical tracking. - * \param param Additional parameters to the function. E.g. some constant parameters that are not optimized. - */ - typedef void (*EstimateCallback)(CvMat* state, CvMat *projection, void *param); - - /** - * \brief Numerically differentiates and calculates the Jacobian around x. - * \param x The set of parameters around which the Jacobian is evaluated. - * \param J Resulting Jacobian matrix is stored here. - * \param Estimate The function to be differentiated. - */ - void CalcJacobian(CvMat* x, CvMat* J, EstimateCallback Estimate); - - /** - * \brief Runs the optimization loop with selected parameters. - * \param parameters Vector of parameters to be optimized. Initial values should be set. - * \param measurements Vector of measurements that are observed. - * \param stop Optimization loop ends as the \e stop limit is reached. Criteria is calculated as - * \param max_iter Maximum number of iteration loops that are evaluated if \e stop is not reached. - * \param Estimate Pointer to the function that maps the state to the measurements. See \e EstimateCallback. - * \param method One of the three possible optimization methods. - * \param parameters_mask Vector that defines the parameters that are optimized. If vector element is 0, corresponding parameter is not altered. - * \param J_mat Jacobian matrix. If not given, numerical differentation is used. - * \param weights Weight vector that can be submitted to give different weights to different measurements. Currently works only with OptimizeMethod::TUKEY_LM. - */ - double Optimize(CvMat* parameters, - CvMat* measurements, - double stop, - int max_iter, - EstimateCallback Estimate, - void *param = 0, - OptimizeMethod method = LEVENBERGMARQUARDT, - CvMat* parameters_mask = 0, - CvMat* J_mat = 0, - CvMat* weights = 0); - + /** + * \brief Selection between the algorithm used in optimization. Following + * should be noticed: \li GAUSSNEWTON + */ + enum OptimizeMethod + { + GAUSSNEWTON, + LEVENBERGMARQUARDT, + TUKEY_LM + }; + + /** + * \brief Constructor. + * \param n_params Number of parameters to be optimized. + * \param n_meas Number of measurements that are observed. + */ + Optimization(int n_params, int n_meas); + ~Optimization(); + + /** + * \brief Returns the current residual vector. + * \return Pointer to the residual vector. + */ + cv::Mat& GetErr() + { + return err; + } + + /** + * \brief Pointer to the function that projects the state of the system to the + * measurements. + * \param state System parameters, e.g. camera parameterization in optical + * tracking. + * \param projection The system state projection is stored here. E.g image + * measurements in optical tracking. \param param Additional parameters to + * the function. E.g. some constant parameters that are not optimized. + */ + typedef void (*EstimateCallback)(cv::Mat& state, cv::Mat& projection, + void* param); + + /** + * \brief Numerically differentiates and calculates the Jacobian around x. + * \param x The set of parameters around which the Jacobian is evaluated. + * \param J Resulting Jacobian matrix is stored here. + * \param Estimate The function to be differentiated. + */ + void CalcJacobian(cv::Mat& x, cv::Mat& J, EstimateCallback Estimate); + + /** + * \brief Runs the optimization loop with selected parameters. + * \param parameters Vector of parameters to be optimized. Initial values + * should be set. \param measurements Vector of measurements that are + * observed. \param stop Optimization loop ends as the \e stop limit is + * reached. Criteria is calculated as \param max_iter Maximum number of + * iteration loops that are evaluated if \e stop is not reached. \param + * Estimate Pointer to the function that maps the state to the + * measurements. See \e EstimateCallback. \param method One of the three + * possible optimization methods. \param parameters_mask Vector that defines + * the parameters that are optimized. If vector element is 0, corresponding + * parameter is not altered. \param J_mat Jacobian matrix. If not given, + * numerical differentation is used. \param weights Weight vector that can + * be submitted to give different weights to different measurements. Currently + * works only with OptimizeMethod::TUKEY_LM. + */ + double Optimize(cv::Mat& parameters, cv::Mat& measurements, double stop, + int max_iter, EstimateCallback Estimate, void* param = 0, + OptimizeMethod method = LEVENBERGMARQUARDT, + const cv::Mat& parameters_mask = cv::Mat(), + const cv::Mat& J_mat = cv::Mat(), + const cv::Mat& weights = cv::Mat()); }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/Platform.h b/ar_track_alvar/include/ar_track_alvar/Platform.h index a28701c..063af8e 100644 --- a/ar_track_alvar/include/ar_track_alvar/Platform.h +++ b/ar_track_alvar/include/ar_track_alvar/Platform.h @@ -37,8 +37,8 @@ #include "Timer.h" #include "Uncopyable.h" -namespace alvar { - +namespace alvar +{ /** * \brief Error reporting function inspired by error_at_line() on Linux. * @@ -47,22 +47,22 @@ namespace alvar { * the corresponding error message from strerror(). If status is not zero, it * exits the process. */ -void ALVAR_EXPORT errorAtLine(int status, int error, const char *filename, - unsigned int line, const char *format, ...); +void ALVAR_EXPORT errorAtLine(int status, int error, const char* filename, + unsigned int line, const char* format, ...); /** * \brief Sleep for a specified amount of milliseconds. */ void ALVAR_EXPORT sleep(unsigned long milliseconds); -} // namespace alvar +} // namespace alvar #ifdef min - #undef min +#undef min #endif #ifdef max - #undef max +#undef max #endif #endif diff --git a/ar_track_alvar/include/ar_track_alvar/Plugin.h b/ar_track_alvar/include/ar_track_alvar/Plugin.h index 9d033bb..de625ce 100644 --- a/ar_track_alvar/include/ar_track_alvar/Plugin.h +++ b/ar_track_alvar/include/ar_track_alvar/Plugin.h @@ -34,64 +34,67 @@ #include -namespace alvar { - +namespace alvar +{ class PluginPrivate; /** * \brief Plugin for loading dynamic libraries. * - * Plugin class for loading dynamic libraires. The library is loaded during construction and - * unloaded during destruction. + * Plugin class for loading dynamic libraires. The library is loaded during + * construction and unloaded during destruction. */ class Plugin { public: - /** - * \brief Constructor. - * - * Constructing a Plugin object will attempt to load the plugin dynamic library. - * - * \param filename The filename of the dynamic library to load. - * \exception AlvarException An exeption is thrown if the library can't be loaded. - */ - Plugin(const std::string filename); + /** + * \brief Constructor. + * + * Constructing a Plugin object will attempt to load the plugin dynamic + * library. + * + * \param filename The filename of the dynamic library to load. + * \exception AlvarException An exeption is thrown if the library can't be + * loaded. + */ + Plugin(const std::string filename); - /** - * \brief Copy constructor. - * - * \param plugin The Plugin to copy. - */ - Plugin(const Plugin &plugin); + /** + * \brief Copy constructor. + * + * \param plugin The Plugin to copy. + */ + Plugin(const Plugin& plugin); - /** - * \brief Assignment operator. - * - * \param plugin The Plugin to copy. - */ - Plugin &operator=(const Plugin &plugin); + /** + * \brief Assignment operator. + * + * \param plugin The Plugin to copy. + */ + Plugin& operator=(const Plugin& plugin); - /** - * \brief Destructor. - */ - ~Plugin(); + /** + * \brief Destructor. + */ + ~Plugin(); - /** - * \brief Resolves the address of a symbol. - * - * The symbol must be exported from the library as a C function. - * - * \param symbol The signature of the symbol. - * \return The address of the symbol. - * \exception AlvarException An exception is thrown if the symbol is not found. - */ - void *resolve(const char *symbol); + /** + * \brief Resolves the address of a symbol. + * + * The symbol must be exported from the library as a C function. + * + * \param symbol The signature of the symbol. + * \return The address of the symbol. + * \exception AlvarException An exception is thrown if the symbol is not + * found. + */ + void* resolve(const char* symbol); private: - PluginPrivate *d; - int *mReferenceCount; + PluginPrivate* d; + int* mReferenceCount; }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/Plugin_private.h b/ar_track_alvar/include/ar_track_alvar/Plugin_private.h index 6f4f592..d81eb9b 100644 --- a/ar_track_alvar/include/ar_track_alvar/Plugin_private.h +++ b/ar_track_alvar/include/ar_track_alvar/Plugin_private.h @@ -26,22 +26,22 @@ #include -namespace alvar { - +namespace alvar +{ class PluginPrivateData; class PluginPrivate { public: - PluginPrivate(); - ~PluginPrivate(); - void load(const std::string filename); - void unload(); - void *resolve(const char *symbol); + PluginPrivate(); + ~PluginPrivate(); + void load(const std::string filename); + void unload(); + void* resolve(const char* symbol); - PluginPrivateData *d; + PluginPrivateData* d; }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/Pose.h b/ar_track_alvar/include/ar_track_alvar/Pose.h index f92f5c0..51d69de 100644 --- a/ar_track_alvar/include/ar_track_alvar/Pose.h +++ b/ar_track_alvar/include/ar_track_alvar/Pose.h @@ -33,91 +33,96 @@ * \brief This file implements a pose. */ -namespace alvar { - +namespace alvar +{ /** * \brief \e Pose representation derived from the \e Rotation class * - * The rotation part of the transformation is handled by \e Rotation . + * The rotation part of the transformation is handled by \e Rotation . * The translation part is stored internally using homogeneous 4-vector. * * Internally in ALVAR we assume coordinate system where - * 'x' is right, 'y' is down, and 'z' is forward. However - * the \e SetMatrixGL and \e GetMatrixGL change the pose + * 'x' is right, 'y' is down, and 'z' is forward. However + * the \e SetMatrixGL and \e GetMatrixGL change the pose * to support coordinates commonly used in OpenGL: * 'x' is right, 'y' is up, and 'z' is backward. */ -class ALVAR_EXPORT Pose : public Rotation { +class ALVAR_EXPORT Pose : public Rotation +{ protected: - // Note, although we are using homogeneous coordinates x, y, z, w -- w is now mostly ignored + // Note, although we are using homogeneous coordinates x, y, z, w -- w is now + // mostly ignored public: - double translation[4]; - CvMat translation_mat; + double translation[4]; + cv::Mat translation_mat; - /** \e Output for debugging purposes */ - void Output() const; - /** \e Constructor */ - Pose(); - /** \e Constructor using the given translation and rotation elements - * \param tra Column vector containing three translation elements - * \param rot Handled using the \e Rotation class - * \param t Handled using the \e Rotation class - */ - Pose(CvMat *tra, CvMat *rot, RotationType t); - /** \e Constructor with 3x3, 3x4 or 4x4 matrix representation - * \param mat A 3x3 rotation matrix or 3x4 / 4x4 transformation matrix - */ - Pose(CvMat *mat); - /** \e Copy constructor */ - Pose(const Pose& p); - /** \e Reset the pose */ - void Reset(); - /** Set the transformation from the given matrix \e mat - * \param mat A 3x3 rotation matrix or 3x4 / 4x4 transformation matrix - */ - void SetMatrix(const CvMat *mat); - /** \brief Set the \e Pose using OpenGL's transposed format. - * Note, that by default this also mirrors both the y- and z-axis (see \e Camera and \e Pose for more information) - * \param gl OpenGL 4x4 transformation matrix elements in column-order - */ - void SetMatrixGL(double gl[16], bool mirror=true); - /** Get the transformation into the given matrix \e mat - * \param mat A 3x3 rotation matrix or 3x4 / 4x4 transformation matrix - */ - void GetMatrix(CvMat *mat) const; - /** \brief Get the transformation matrix representation of the \e Pose using OpenGL's transposed format. - * Note, that by default this also mirrors both the y- and z-axis (see \e Camera and \e Pose for more information) - * \param gl OpenGL 4x4 transformation matrix elements in column-order - */ - void GetMatrixGL(double gl[16], bool mirror=true); - /** \e Transpose the transformation */ - void Transpose(); - /** Invert the pose */ - void Invert(); - /** \e Mirror the \e Pose - * \param x If true mirror the x-coordinates - * \param y If true mirror the y-coordinates - * \param z If true mirror the z-coordinates - */ - void Mirror(bool x, bool y, bool z); - /** Set the translation part for the \e Pose - * \param tra Column vector containing three translation elements - */ - void SetTranslation(const CvMat *tra); - /** Set the translation part for the \e Pose - * \param tra Array containing three translation elements - */ - void SetTranslation(const double *tra); - /** Set the translation part for the \e Pose */ - void SetTranslation(const double x, const double y, const double z); - /** Get the translation part from the \e Pose - * \param tra Column vector where the three translation elements are filled in - */ - void GetTranslation(CvMat *tra) const; - /** Assignment operator for copying \e Pose class */ - Pose& operator = (const Pose& p); + /** \e Output for debugging purposes */ + void Output() const; + /** \e Constructor */ + Pose(); + /** \e Constructor using the given translation and rotation elements + * \param tra Column vector containing three translation elements + * \param rot Handled using the \e Rotation class + * \param t Handled using the \e Rotation class + */ + Pose(const cv::Mat& tra, const cv::Mat& rot, RotationType t); + /** \e Constructor with 3x3, 3x4 or 4x4 matrix representation + * \param mat A 3x3 rotation matrix or 3x4 / 4x4 transformation matrix + */ + Pose(const cv::Mat& mat); + /** \e Copy constructor */ + Pose(const Pose& p); + /** \e Reset the pose */ + void Reset(); + /** Set the transformation from the given matrix \e mat + * \param mat A 3x3 rotation matrix or 3x4 / 4x4 transformation matrix + */ + void SetMatrix(const cv::Mat& mat); + /** \brief Set the \e Pose using OpenGL's transposed format. + * Note, that by default this also mirrors both the y- and z-axis (see \e + * Camera and \e Pose for more information) \param gl OpenGL 4x4 + * transformation matrix elements in column-order + */ + void SetMatrixGL(double gl[16], bool mirror = true); + /** Get the transformation into the given matrix \e mat + * \param mat A 3x3 rotation matrix or 3x4 / 4x4 transformation matrix + */ + void GetMatrix(cv::Mat& mat) const; + /** \brief Get the transformation matrix representation of the \e Pose using + * OpenGL's transposed format. Note, that by default this also mirrors both + * the y- and z-axis (see \e Camera and \e Pose for more information) \param + * gl OpenGL 4x4 transformation matrix elements in column-order + */ + void GetMatrixGL(double gl[16], bool mirror = true); + /** \e Transpose the transformation */ + void Transpose(); + /** Invert the pose */ + void Invert(); + /** \e Mirror the \e Pose + * \param x If true mirror the x-coordinates + * \param y If true mirror the y-coordinates + * \param z If true mirror the z-coordinates + */ + void Mirror(bool x, bool y, bool z); + /** Set the translation part for the \e Pose + * \param tra Column vector containing three translation elements + */ + void SetTranslation(const cv::Mat& tra); + /** Set the translation part for the \e Pose + * \param tra Array containing three translation elements + */ + void SetTranslation(const double* tra); + /** Set the translation part for the \e Pose */ + void SetTranslation(const double x, const double y, const double z); + /** Get the translation part from the \e Pose + * \param tra Column vector where the three translation elements are filled + * in + */ + void GetTranslation(cv::Mat& tra) const; + /** Assignment operator for copying \e Pose class */ + Pose& operator=(const Pose& p); }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/Ransac.h b/ar_track_alvar/include/ar_track_alvar/Ransac.h index f43ae3d..c6f1d6b 100644 --- a/ar_track_alvar/include/ar_track_alvar/Ransac.h +++ b/ar_track_alvar/include/ar_track_alvar/Ransac.h @@ -33,14 +33,14 @@ * \brief This file implements a generic RANSAC algorithm. */ -namespace alvar { - +namespace alvar +{ /** * \brief Internal implementation of RANSAC. Please use Ransac or IndexRansac. */ -class ALVAR_EXPORT RansacImpl { - - protected: +class ALVAR_EXPORT RansacImpl +{ +protected: void** samples; void* hypothesis; int min_params; @@ -48,40 +48,39 @@ class ALVAR_EXPORT RansacImpl { int sizeof_param; int sizeof_model; - RansacImpl(int min_params, int max_params, - int sizeof_param, int sizeof_model); + RansacImpl(int min_params, int max_params, int sizeof_param, + int sizeof_model); virtual ~RansacImpl(); - int _estimate(void* params, int param_c, - int support_limit, int max_rounds, - void* model); + int _estimate(void* params, int param_c, int support_limit, int max_rounds, + void* model); - int _refine(void* params, int param_c, - int support_limit, int max_rounds, - void* model, char *inlier_mask = NULL); + int _refine(void* params, int param_c, int support_limit, int max_rounds, + void* model, char* inlier_mask = NULL); - virtual void _doEstimate(void** params, int param_c, void* model) {}; - virtual bool _doSupports(void* param, void* model) { return false; }; + virtual void _doEstimate(void** params, int param_c, void* model){}; + virtual bool _doSupports(void* param, void* model) + { + return false; + }; /** IndexRansac version */ - int *indices; - - RansacImpl(int min_params, int max_params, - int sizeof_model); + int* indices; - int _estimate(int param_c, - int support_limit, int max_rounds, - void* model); + RansacImpl(int min_params, int max_params, int sizeof_model); - int _refine(int param_c, - int support_limit, int max_rounds, - void* model, char *inlier_mask = NULL); + int _estimate(int param_c, int support_limit, int max_rounds, void* model); - virtual void _doEstimate(int* params, int param_c, void* model) {}; - virtual bool _doSupports(int param, void* model) { return false; }; + int _refine(int param_c, int support_limit, int max_rounds, void* model, + char* inlier_mask = NULL); - public: + virtual void _doEstimate(int* params, int param_c, void* model){}; + virtual bool _doSupports(int param, void* model) + { + return false; + }; +public: /** \brief How many rounds are needed for the Ransac to work. * * Computes the required amount of rounds from the estimated @@ -96,360 +95,371 @@ class ALVAR_EXPORT RansacImpl { * max_rounds when calling the estimate method. */ int estimateRequiredRounds(float success_propability, - float inlier_percentage); - + float inlier_percentage); }; - /** - * \brief Implementation of a general RANdom SAmple Consensus algorithm. +/** + * \brief Implementation of a general RANdom SAmple Consensus algorithm. + * + * This implementation can be used to estimate model from a set of input + * data. The user needs to provide support methods to compute the best + * model given a set of input data and to classify input data into inliers + * and outliers. + * + * For more information see "Martin A Fischler and Robrt C. Bolles: + * Random Sample Consensus: a paradigm for model fitting with applications + * to image analysis and automated cartography. Comm of the ACM 24: 381-395" + * (http://portal.acm.org/citation.cfm?doid=358669.358692). + * + * MODEL is the estimated model, for example endpoints of a line for + * line fitting. + * + * PARAMETER is the input for model estimation, for example 2D point + * for line fitting. + * + * MODEL must support an assigment operator. + * + * The user needs to extend this class and provide two methods: + * \code + * void doEstimate(PARAMETER** params, int param_c, MODEL* model); + * bool doSupports(PARAMETER* param, MODEL* model); + * \endcode + * + * Example: Fitting points to a line: + * + * \code + * typedef struct Point { double x, double y; }; + * typedef struct Line { Point p1, p2; }; + * + * class MyLineFittingRansac : public Ransac { + * // Line fitting needs at least 2 parameters and here we want + * // to support at most 16 parameters. + * MyLineFittingRansac() : Ransac(2, 16) {} + * + * void doEstimate(Point **points, int points_c, Line *line) { + * if (points_c == 2) { return Line(*points[0], *points[1]); } + * else { // compute best line fitting up to 16 points. } + * } + * + * bool doSupports(Point *point, Line *line) { + * double distance = // compute point distance to line. + * return distance < POINT_DISTANCE_LIMIT; + * } + * }; + * + * Point input[N_POINTS] = { .. }; + * Line line; + * MyLineFittingRansac ransac; + * + * // assuming 60% of inliers, run RANSAC until the best model is found with 99% + * propability. int max_rounds = ransac.estimateRequiredRounds(0.99, 0.6); int + * number_of_inliers = ransac.estimate(input, N_POINTS, N_POINTS, max_rounds, + * &line); + * + * // lets make the estimate even better. + * if (number_of_inliers > 0 && number_of_inliers < N_POINTS) + * number_of_inliers = ransac.refine(input, N_POINTS, N_POINTS, 10, &line); + * + * // you should keep track of the real percentage of inliers to determine + * // the required number of RANSAC rounds. + * double inlier_percentage = (double)number_of_inliers / (double)N_POINTS; + * + * \endcode + */ +template +class Ransac : public RansacImpl +{ +protected: + /** \brief Creates a model estimate from a set of parameters. * - * This implementation can be used to estimate model from a set of input - * data. The user needs to provide support methods to compute the best - * model given a set of input data and to classify input data into inliers - * and outliers. + * The user must implement this method to compute model parameters + * from the input data. * - * For more information see "Martin A Fischler and Robrt C. Bolles: - * Random Sample Consensus: a paradigm for model fitting with applications - * to image analysis and automated cartography. Comm of the ACM 24: 381-395" - * (http://portal.acm.org/citation.cfm?doid=358669.358692). - * - * MODEL is the estimated model, for example endpoints of a line for - * line fitting. + * \param params An array of pointers to sampled parameters (input data). + * \param param_c The number of parameter pointers in the params array. + * \param model Pointer to the model where to store the estimate. + */ + virtual void doEstimate(PARAMETER** params, int param_c, MODEL* model) = 0; + + /** \brief Computes how well a parameters supports a model. * - * PARAMETER is the input for model estimation, for example 2D point - * for line fitting. + * This method is used by the RANSAC algorithm to count how many + * parameters support the estimated model (inliers). Althought this + * is case specific, usually parameter supports the model when the distance + * from model prediction is not too far away from the parameter. * - * MODEL must support an assigment operator. + * \param param Pointer to the parameter to check. + * \param model Pointer to the model to check the parameter against. + * \return True when the parameter supports the model. + */ + virtual bool doSupports(PARAMETER* param, MODEL* model) = 0; + + /** + * Wrapper for templated parameters. + */ + void _doEstimate(void** params, int param_c, void* model) + { + doEstimate((PARAMETER**)params, param_c, (MODEL*)model); + } + + /** + * Wrapper for templated parameters. + */ + bool _doSupports(void* param, void* model) + { + return doSupports((PARAMETER*)param, (MODEL*)model); + } + +public: + /** \brief Initialize the algorithm. * - * The user needs to extend this class and provide two methods: - * \code - * void doEstimate(PARAMETER** params, int param_c, MODEL* model); - * bool doSupports(PARAMETER* param, MODEL* model); - * \endcode + * Uses at least min_params and at most max_params number of input data + * elements for model estimation. * - * Example: Fitting points to a line: + * Must be: max_params >= min_params * - * \code - * typedef struct Point { double x, double y; }; - * typedef struct Line { Point p1, p2; }; + * \param min_params is the minimum number of parameters needed to create + * a model. + * \param max_params is the maximum number of parameters to using in refining + * the model. + */ + Ransac(int min_params, int max_params) + : RansacImpl(min_params, max_params, sizeof(PARAMETER), sizeof(MODEL)) + { + } + + virtual ~Ransac() + { + } + + /** \brief Estimates a model from input data parameters. * - * class MyLineFittingRansac : public Ransac { - * // Line fitting needs at least 2 parameters and here we want - * // to support at most 16 parameters. - * MyLineFittingRansac() : Ransac(2, 16) {} + * Randomly samples min_params number of input data elements from params array + * and chooses the model that has the largest set of supporting parameters + * (inliers) in the params array. * - * void doEstimate(Point **points, int points_c, Line *line) { - * if (points_c == 2) { return Line(*points[0], *points[1]); } - * else { // compute best line fitting up to 16 points. } - * } + * Note that this method always uses min_params number of parameters, that is, + * \e doEstimate method can be implemented to support only the minimum number + * of parameters unless \e refine method is used. * - * bool doSupports(Point *point, Line *line) { - * double distance = // compute point distance to line. - * return distance < POINT_DISTANCE_LIMIT; - * } - * }; + * \param params Parameters that the model is estimated from (input data). + * \param param_c Number of elements in the params array. + * \param support_limit The search is stopped if a model receives + * more support that this limit. + * \param max_rounds How many different samples are tried before + * stopping the search. + * \param model The estimated model is stored here. + * \return the number of parameters supporting the model, or 0 + * if a suitable model could not be build at all. + */ + int estimate(PARAMETER* params, int param_c, int support_limit, + int max_rounds, MODEL* model) + { + return _estimate(params, param_c, support_limit, max_rounds, model); + } + + /** \brief Iteratively makes the estimated model better. * - * Point input[N_POINTS] = { .. }; - * Line line; - * MyLineFittingRansac ransac; + * Starting with the estimated model, computes the model from all + * inlier parameters and interates until no new parameters support + * the model. * - * // assuming 60% of inliers, run RANSAC until the best model is found with 99% propability. - * int max_rounds = ransac.estimateRequiredRounds(0.99, 0.6); - * int number_of_inliers = ransac.estimate(input, N_POINTS, N_POINTS, max_rounds, &line); + * Note that this method uses up to max_params number of parameters, + * that is, \e doEstimate method must be implemented in such a way + * that it can estimate a model from a variable number of parameters. * - * // lets make the estimate even better. - * if (number_of_inliers > 0 && number_of_inliers < N_POINTS) - * number_of_inliers = ransac.refine(input, N_POINTS, N_POINTS, 10, &line); + * \param params Parameters that the model is estimated from. + * \param param_c Number of parameters. + * \param support_limit The search is stopped is a model receives + * more support that this limit. + * \param max_rounds How many iterations of the refinement are run. + * \param model The estimated model that is refined. + * \param inlier_mask Byte array where 1 is stored for inliers and 0 for + * outliers. \return the number of parameters supporting the model. + */ + int refine(PARAMETER* params, int param_c, int support_limit, int max_rounds, + MODEL* model, char* inlier_mask = NULL) + { + return _refine(params, param_c, support_limit, max_rounds, model, + inlier_mask); + } +}; + +/** + * \brief Implementation of a general RANdom SAmple Consensus algorithm + * with implicit parameters. + * + * These parameters are accessed by indises. The benefit of this is that + * we avoid copying input data from an array into another. See \e Ransac + * class for more details. + * + * Extending class must provide two methods: + * \code + * void doEstimate(int* params, int param_c, MODEL* model); + * bool doSupports(int param, MODEL* model); + * \endcode + * + * Example fitting points to a line (compare this with the example + * in the \e Ransac class): + * + * \code + * typedef struct Point { double x, double y; }; + * typedef struct Line { Point p1, p2; }; + * + * class MyLineFittingRansac : public IndexRansac { + * Point *points; + * + * // Line fitting needs at least 2 parameters and here we want + * // to support at most 16 parameters. + * MyLineFittingRansac(Points *input) : Ransac(2, 16), points(input) {} + * + * void doEstimate(int *indises, int points_c, Line *line) { + * if (points_c == 2) { return Line(points[indises[0]], + * *points[indises[1]]); } else { // compute best line fitting up to 16 points. + * } + * } + * + * bool doSupports(int index, Line *line) { + * Point *point = &points[index]; + * double distance = // compute point distance to line. + * return distance < POINT_DISTANCE_LIMIT; + * } + * }; + * + * Point input[N_POINTS] = { .. }; + * Line line; + * MyLineFittingRansac ransac(input); + * + * // assuming 60% of inliers, run RANSAC until the best model is found with 99% + * propability. int max_rounds = ransac.estimateRequiredRounds(0.99, 0.6); int + * number_of_inliers = ransac.estimate(input, N_POINTS, N_POINTS, max_rounds, + * &line); + * + * // lets make the estimate even better. + * if (number_of_inliers > 0 && number_of_inliers < N_POINTS) + * number_of_inliers = ransac.refine(input, N_POINTS, N_POINTS, 10, &line); + * + * // you should keep track of the real percentage of inliers to determine + * // the required number of RANSAC rounds. + * double inlier_percentage = (double)number_of_inliers / (double)N_POINTS; + * + * \endcode + */ +template +class IndexRansac : public RansacImpl +{ +protected: + /** \brief Creates a model estimate from a set of parameters. * - * // you should keep track of the real percentage of inliers to determine - * // the required number of RANSAC rounds. - * double inlier_percentage = (double)number_of_inliers / (double)N_POINTS; + * The user must implement this method to compute model parameters + * from the input data. * - * \endcode + * \param params An array of indises of sampled parameters. + * \param param_c The number of parameter indises in the params array. + * \param model Pointer to the model where to store the estimate. */ - template - class Ransac : public RansacImpl { - - protected: - /** \brief Creates a model estimate from a set of parameters. - * - * The user must implement this method to compute model parameters - * from the input data. - * - * \param params An array of pointers to sampled parameters (input data). - * \param param_c The number of parameter pointers in the params array. - * \param model Pointer to the model where to store the estimate. - */ - virtual void doEstimate(PARAMETER** params, int param_c, MODEL* model) = 0; - - /** \brief Computes how well a parameters supports a model. - * - * This method is used by the RANSAC algorithm to count how many - * parameters support the estimated model (inliers). Althought this - * is case specific, usually parameter supports the model when the distance - * from model prediction is not too far away from the parameter. - * - * \param param Pointer to the parameter to check. - * \param model Pointer to the model to check the parameter against. - * \return True when the parameter supports the model. - */ - virtual bool doSupports(PARAMETER* param, MODEL* model) = 0; - - /** - * Wrapper for templated parameters. - */ - void _doEstimate(void** params, int param_c, void* model) { - doEstimate((PARAMETER**)params, param_c, (MODEL*)model); - } - - /** - * Wrapper for templated parameters. - */ - bool _doSupports(void* param, void* model) { - return doSupports((PARAMETER*)param, (MODEL*)model); - } - - public: - /** \brief Initialize the algorithm. - * - * Uses at least min_params and at most max_params number of input data - * elements for model estimation. - * - * Must be: max_params >= min_params - * - * \param min_params is the minimum number of parameters needed to create - * a model. - * \param max_params is the maximum number of parameters to using in refining - * the model. - */ - Ransac(int min_params, int max_params) - : RansacImpl(min_params, max_params, sizeof(PARAMETER), sizeof(MODEL)) {} - - virtual ~Ransac() {} - - /** \brief Estimates a model from input data parameters. - * - * Randomly samples min_params number of input data elements from params array - * and chooses the model that has the largest set of supporting parameters - * (inliers) in the params array. - * - * Note that this method always uses min_params number of parameters, that is, - * \e doEstimate method can be implemented to support only the minimum number - * of parameters unless \e refine method is used. - * - * \param params Parameters that the model is estimated from (input data). - * \param param_c Number of elements in the params array. - * \param support_limit The search is stopped if a model receives - * more support that this limit. - * \param max_rounds How many different samples are tried before - * stopping the search. - * \param model The estimated model is stored here. - * \return the number of parameters supporting the model, or 0 - * if a suitable model could not be build at all. - */ - int estimate(PARAMETER* params, int param_c, - int support_limit, int max_rounds, - MODEL* model) { - return _estimate(params, param_c, support_limit, max_rounds, model); - } - - /** \brief Iteratively makes the estimated model better. - * - * Starting with the estimated model, computes the model from all - * inlier parameters and interates until no new parameters support - * the model. - * - * Note that this method uses up to max_params number of parameters, - * that is, \e doEstimate method must be implemented in such a way - * that it can estimate a model from a variable number of parameters. - * - * \param params Parameters that the model is estimated from. - * \param param_c Number of parameters. - * \param support_limit The search is stopped is a model receives - * more support that this limit. - * \param max_rounds How many iterations of the refinement are run. - * \param model The estimated model that is refined. - * \param inlier_mask Byte array where 1 is stored for inliers and 0 for outliers. - * \return the number of parameters supporting the model. - */ - int refine(PARAMETER* params, int param_c, - int support_limit, int max_rounds, - MODEL* model, char *inlier_mask = NULL) { - return _refine(params, param_c, support_limit, max_rounds, model, inlier_mask); - } + virtual void doEstimate(int* params, int param_c, MODEL* model) = 0; - }; + /** \brief Computes how well a parameters supports a model. + * + * This method is used by the RANSAC algorithm to count how many + * parameters support the estimated model (inliers). Althought this + * is case specific, usually parameter supports the model when the distance + * from model prediction is not too far away from the parameter. + * + * \param param Index of the parameter to check. + * \param model Pointer to the model to check the parameter against. + * \return True when the parameter supports the model. + */ + virtual bool doSupports(int param, MODEL* model) = 0; + /** + * Wrapper for templated parameters. + */ + void _doEstimate(int* params, int param_c, void* model) + { + doEstimate(params, param_c, (MODEL*)model); + } /** - * \brief Implementation of a general RANdom SAmple Consensus algorithm - * with implicit parameters. - * - * These parameters are accessed by indises. The benefit of this is that - * we avoid copying input data from an array into another. See \e Ransac - * class for more details. - * - * Extending class must provide two methods: - * \code - * void doEstimate(int* params, int param_c, MODEL* model); - * bool doSupports(int param, MODEL* model); - * \endcode - * - * Example fitting points to a line (compare this with the example - * in the \e Ransac class): - * - * \code - * typedef struct Point { double x, double y; }; - * typedef struct Line { Point p1, p2; }; + * Wrapper for templated parameters. + */ + bool _doSupports(int param, void* model) + { + return doSupports(param, (MODEL*)model); + } + +public: + /** \brief Initialize the algorithm. * - * class MyLineFittingRansac : public IndexRansac { - * Point *points; + * Uses at least min_params and at most max_params number of input data + * elements for model estimation. * - * // Line fitting needs at least 2 parameters and here we want - * // to support at most 16 parameters. - * MyLineFittingRansac(Points *input) : Ransac(2, 16), points(input) {} + * Must be: max_params >= min_params * - * void doEstimate(int *indises, int points_c, Line *line) { - * if (points_c == 2) { return Line(points[indises[0]], *points[indises[1]]); } - * else { // compute best line fitting up to 16 points. } - * } + * \param min_params is the minimum number of parameters needed to create + * a model. + * \param max_params is the maximum number of parameters to using in refining + * the model. + */ + IndexRansac(int min_params, int max_params) + : RansacImpl(min_params, max_params, sizeof(MODEL)) + { + } + + virtual ~IndexRansac() + { + } + + /** \brief Estimates a model from input data parameters. * - * bool doSupports(int index, Line *line) { - * Point *point = &points[index]; - * double distance = // compute point distance to line. - * return distance < POINT_DISTANCE_LIMIT; - * } - * }; + * Randomly samples min_params number of input data elements from params array + * and chooses the model that has the largest set of supporting parameters + * (inliers) in the params array. * - * Point input[N_POINTS] = { .. }; - * Line line; - * MyLineFittingRansac ransac(input); + * Note that this method always uses min_params number of parameters, that is, + * \e doEstimate method can be implemented to support only the minimum number + * of parameters unless \e refine method is used. * - * // assuming 60% of inliers, run RANSAC until the best model is found with 99% propability. - * int max_rounds = ransac.estimateRequiredRounds(0.99, 0.6); - * int number_of_inliers = ransac.estimate(input, N_POINTS, N_POINTS, max_rounds, &line); + * \param param_c Number of parameters available in estimation. + * \param support_limit The search is stopped if a model receives + * more support that this limit. + * \param max_rounds How many different samples are tried before + * stopping the search. + * \param model The estimated model is stored here. + * \return the number of parameters supporting the model, or 0 + * if a suitable model could not be build at all. + */ + int estimate(int param_c, int support_limit, int max_rounds, MODEL* model) + { + return _estimate(param_c, support_limit, max_rounds, model); + } + + /** \brief Iteratively makes the estimated model better. * - * // lets make the estimate even better. - * if (number_of_inliers > 0 && number_of_inliers < N_POINTS) - * number_of_inliers = ransac.refine(input, N_POINTS, N_POINTS, 10, &line); + * Starting with the estimated model, computes the model from all + * inlier parameters and interates until no new parameters support + * the model. * - * // you should keep track of the real percentage of inliers to determine - * // the required number of RANSAC rounds. - * double inlier_percentage = (double)number_of_inliers / (double)N_POINTS; + * Note that this method uses up to max_params number of parameters, + * that is, \e doEstimate method must be implemented in such a way + * that it can estimate a model from a variable number of parameters. * - * \endcode + * \param param_c Number of parameters available for estimation. + * \param support_limit The search is stopped if a model receives + * more support that this limit. + * \param max_rounds How many iterations of the refinement are run. + * \param model The estimated model that is refined. + * \param inlier_mask Byte array where 1 is stored for inliers and 0 for + * outliers. \return the number of parameters supporting the model. */ - template - class IndexRansac : public RansacImpl { - - protected: - /** \brief Creates a model estimate from a set of parameters. - * - * The user must implement this method to compute model parameters - * from the input data. - * - * \param params An array of indises of sampled parameters. - * \param param_c The number of parameter indises in the params array. - * \param model Pointer to the model where to store the estimate. - */ - virtual void doEstimate(int* params, int param_c, MODEL* model) = 0; - - /** \brief Computes how well a parameters supports a model. - * - * This method is used by the RANSAC algorithm to count how many - * parameters support the estimated model (inliers). Althought this - * is case specific, usually parameter supports the model when the distance - * from model prediction is not too far away from the parameter. - * - * \param param Index of the parameter to check. - * \param model Pointer to the model to check the parameter against. - * \return True when the parameter supports the model. - */ - virtual bool doSupports(int param, MODEL* model) = 0; - - /** - * Wrapper for templated parameters. - */ - void _doEstimate(int* params, int param_c, void* model) { - doEstimate(params, param_c, (MODEL*)model); - } - - /** - * Wrapper for templated parameters. - */ - bool _doSupports(int param, void* model) { - return doSupports(param, (MODEL*)model); - } - - public: - /** \brief Initialize the algorithm. - * - * Uses at least min_params and at most max_params number of input data - * elements for model estimation. - * - * Must be: max_params >= min_params - * - * \param min_params is the minimum number of parameters needed to create - * a model. - * \param max_params is the maximum number of parameters to using in refining - * the model. - */ - IndexRansac(int min_params, int max_params) - : RansacImpl(min_params, max_params, sizeof(MODEL)) {} - - virtual ~IndexRansac() {} - - /** \brief Estimates a model from input data parameters. - * - * Randomly samples min_params number of input data elements from params array - * and chooses the model that has the largest set of supporting parameters - * (inliers) in the params array. - * - * Note that this method always uses min_params number of parameters, that is, - * \e doEstimate method can be implemented to support only the minimum number - * of parameters unless \e refine method is used. - * - * \param param_c Number of parameters available in estimation. - * \param support_limit The search is stopped if a model receives - * more support that this limit. - * \param max_rounds How many different samples are tried before - * stopping the search. - * \param model The estimated model is stored here. - * \return the number of parameters supporting the model, or 0 - * if a suitable model could not be build at all. - */ - int estimate(int param_c, - int support_limit, int max_rounds, - MODEL* model) { - return _estimate(param_c, support_limit, max_rounds, model); - } - - /** \brief Iteratively makes the estimated model better. - * - * Starting with the estimated model, computes the model from all - * inlier parameters and interates until no new parameters support - * the model. - * - * Note that this method uses up to max_params number of parameters, - * that is, \e doEstimate method must be implemented in such a way - * that it can estimate a model from a variable number of parameters. - * - * \param param_c Number of parameters available for estimation. - * \param support_limit The search is stopped if a model receives - * more support that this limit. - * \param max_rounds How many iterations of the refinement are run. - * \param model The estimated model that is refined. - * \param inlier_mask Byte array where 1 is stored for inliers and 0 for outliers. - * \return the number of parameters supporting the model. - */ - int refine(int param_c, - int support_limit, int max_rounds, - MODEL* model, char *inlier_mask = NULL) { - return _refine(param_c, support_limit, max_rounds, model, inlier_mask); - } - - }; + int refine(int param_c, int support_limit, int max_rounds, MODEL* model, + char* inlier_mask = NULL) + { + return _refine(param_c, support_limit, max_rounds, model, inlier_mask); + } +}; -} // namespace alvar +} // namespace alvar -#endif //__Ransac_h__ +#endif //__Ransac_h__ diff --git a/ar_track_alvar/include/ar_track_alvar/Rotation.h b/ar_track_alvar/include/ar_track_alvar/Rotation.h index 62bde79..ab5c509 100644 --- a/ar_track_alvar/include/ar_track_alvar/Rotation.h +++ b/ar_track_alvar/include/ar_track_alvar/Rotation.h @@ -35,188 +35,198 @@ #include "Alvar.h" #include "Util.h" -namespace alvar { - +namespace alvar +{ /** - * \brief \e Rotation structure and transformations between different parameterizations. + * \brief \e Rotation structure and transformations between different + * parameterizations. */ class ALVAR_EXPORT Rotation { - -public: - CvMat quaternion_mat; - double quaternion[4]; - - /** - * \brief Rotation can be represented in four ways: quaternion (QUAT), matrix (MAT), euler angles (EUL) and exponential map (ROD). - */ - enum RotationType {QUAT, MAT, EUL, ROD}; - - Rotation(); - Rotation(const Rotation& r); - - /** - * \brief Constructor. - * \param data Rotation data stored in CvMat. With RotationType::MAT both 3x3 and 4x4 matrices are allowed. - * \param t Rotation type that corresponds to data. - */ - Rotation(CvMat *data, RotationType t); - - Rotation& operator = (const Rotation& p); - Rotation& operator += (const Rotation& v); - //Rotation& operator + () {return *this;} - - void Transpose(); - - /** - * \brief Simple function to mirror a rotation matrix in different directions. - * \param mat Matrix to be mirrored. - * \param x - * \param y - * \param z - */ - static void MirrorMat(CvMat *mat, bool x, bool y, bool z); - - /** - * \brief Mirrors the rotation in selected directions. - * \param x - * \param y - * \param z - */ - void Mirror(bool x, bool y, bool z); - - /** - * \brief Resets the rotation into identity. - */ - void Reset(); - - /** - * \brief Converts 3x3 rotation matrix into Rodriques representation. - * \param mat 3x3 rotation matrix. - * \param rod Resulting 3x1 rotation vector. - */ - static void Mat9ToRod(double *mat, double *rod); - - /** - * \brief Converts 3x1 rotation vector into 3x3 rotation matrix using Rodriques' formula. - * \param rod 3x1 rotation vector. - * \param Resulting 3x3 rotation matrix. - */ - static void RodToMat9(double *rod, double *mat); - - /** - * \brief Inverts unit quaternion. - * \param q Unit quaternion to be inverted. - * \param qi Resulting quaternion. - */ - static void QuatInv(const double *q, double *qi); - - /** - * \brief Normalizes a quaternion. - * \param q Quaternion to be normalized. - */ - static void QuatNorm(double *q); - - /** - * \brief Quaternion multiplication. - * \param q1 - * \param q2 - * \param q3 Resulting quaternion. - */ - static void QuatMul(const double *q1, const double *q2, double *q3); - - //% The quaternion has to be normalized!!! - /** - * \brief Converts a rotation described by a quaternion into 3x3 rotation matrix. - * \param quat Rotation in quaternion form. - * \param mat Corresponding 3x3 rotation matrix. - */ - static void QuatToMat9(const double *quat, double *mat); - - // TODO: Now we don't want to eliminate the translation part from the matrix here. Did this change break something??? - /** - * \brief Converts a rotation described by a quaternion into 4x4 OpenGL-like transformation matrix. The translation part is not altered. - * \param quat Rotation in quaternion form. - * \param mat Resulting 4x4 transformation matrix. - */ - static void QuatToMat16(const double *quat, double *mat); - - /** - * \brief Converts a rotation described by a quaternion into Euler angles. - * \param q Rotation in quaternion form. - * \param eul Resulting Euler angles. - */ - static void QuatToEul(const double *q, double *eul); - - /** - * \brief Converts a 3x3 rotation martix into quaternion form. - * \param mat 3x3 rotation matrix. - * \param quat Resulting quaternion. - */ - static void Mat9ToQuat(const double *mat, double *quat); - - /** - * \brief Converts a rotation described by Euler angles into quaternion form. - * \param eul Rotation in Euler angles. - * \param quat Resulting quaternion. - */ - static void EulToQuat(const double *eul, double *quat); - - /** - * \brief Sets the rotation from given quaternion. - * \param mat Input quaternion (4x1 CvMat). - */ - void SetQuaternion(CvMat *mat); - - /** - * \brief Sets the rotation from given quaternion. - * \param mat Input quaternion (4x1 double array). - */ - void SetQuaternion(const double *quat); - - /** - * \brief Sets the rotation from given Euler angles. - * \param mat Input Euler angles (3x1 CvMat). - */ - void SetEuler(const CvMat *mat); - - /** - * \brief Sets the rotation from given rotation vector. - * \param mat Input rotation vector (3x1 CvMat). - */ - void SetRodriques(const CvMat *mat); - - /** - * \brief Sets the rotation from given rotation matrix. 3x3 and 4x4 matrices are allowed. - * \param mat Input rotation matrix (3x3 or 4x4 CvMat). - */ - void SetMatrix(const CvMat *mat); - - /** - * \brief Returns the rotation in matrix form. 3x3 and 4x4 matrices are allowed. - * \param mat The rotation is stored here. - */ - void GetMatrix(CvMat *mat) const; - - /** - * \brief Returns the rotation in rotation vector form. - * \param mat The rotation is stored here (3x1 CvMat). - */ - void GetRodriques(CvMat *mat) const; - - /** - * \brief Returns the rotation in Euler angles. - * \param mat The rotation is stored here (3x1 CvMat). - */ - void GetEuler(CvMat *mat) const; - - /** - * \brief Returns the rotation in quaternion form. - * \param mat The rotation is stored here (4x1 CvMat). - */ - void GetQuaternion(CvMat *mat) const; +public: + cv::Mat quaternion_mat; + double quaternion[4]; + + /** + * \brief Rotation can be represented in four ways: quaternion (QUAT), matrix + * (MAT), euler angles (EUL) and exponential map (ROD). + */ + enum RotationType + { + QUAT, + MAT, + EUL, + ROD + }; + + Rotation(); + Rotation(const Rotation& r); + + /** + * \brief Constructor. + * \param data Rotation data stored in cv::Mat. With RotationType::MAT both + * 3x3 and 4x4 matrices are allowed. \param t Rotation type that corresponds + * to data. + */ + Rotation(const cv::Mat& data, RotationType t); + + Rotation& operator=(const Rotation& p); + Rotation& operator+=(const Rotation& v); + // Rotation& operator + () {return *this;} + + void Transpose(); + + /** + * \brief Simple function to mirror a rotation matrix in different directions. + * \param mat Matrix to be mirrored. + * \param x + * \param y + * \param z + */ + static void MirrorMat(cv::Mat& mat, bool x, bool y, bool z); + + /** + * \brief Mirrors the rotation in selected directions. + * \param x + * \param y + * \param z + */ + void Mirror(bool x, bool y, bool z); + + /** + * \brief Resets the rotation into identity. + */ + void Reset(); + + /** + * \brief Converts 3x3 rotation matrix into Rodriques representation. + * \param mat 3x3 rotation matrix. + * \param rod Resulting 3x1 rotation vector. + */ + static void Mat9ToRod(double* mat, double* rod); + + /** + * \brief Converts 3x1 rotation vector into 3x3 rotation matrix using + * Rodriques' formula. \param rod 3x1 rotation vector. \param Resulting 3x3 + * rotation matrix. + */ + static void RodToMat9(const double* rod, double* mat); + + /** + * \brief Inverts unit quaternion. + * \param q Unit quaternion to be inverted. + * \param qi Resulting quaternion. + */ + static void QuatInv(const double* q, double* qi); + + /** + * \brief Normalizes a quaternion. + * \param q Quaternion to be normalized. + */ + static void QuatNorm(double* q); + + /** + * \brief Quaternion multiplication. + * \param q1 + * \param q2 + * \param q3 Resulting quaternion. + */ + static void QuatMul(const double* q1, const double* q2, double* q3); + + //% The quaternion has to be normalized!!! + /** + * \brief Converts a rotation described by a quaternion into 3x3 rotation + * matrix. \param quat Rotation in quaternion form. \param mat Corresponding + * 3x3 rotation matrix. + */ + static void QuatToMat9(const double* quat, double* mat); + + // TODO: Now we don't want to eliminate the translation part from the matrix + // here. Did this change break something??? + /** + * \brief Converts a rotation described by a quaternion into 4x4 OpenGL-like + * transformation matrix. The translation part is not altered. \param quat + * Rotation in quaternion form. \param mat Resulting 4x4 transformation + * matrix. + */ + static void QuatToMat16(const double* quat, double* mat); + + /** + * \brief Converts a rotation described by a quaternion into Euler angles. + * \param q Rotation in quaternion form. + * \param eul Resulting Euler angles. + */ + static void QuatToEul(const double* q, double* eul); + + /** + * \brief Converts a 3x3 rotation martix into quaternion form. + * \param mat 3x3 rotation matrix. + * \param quat Resulting quaternion. + */ + static void Mat9ToQuat(const double* mat, double* quat); + + /** + * \brief Converts a rotation described by Euler angles into quaternion form. + * \param eul Rotation in Euler angles. + * \param quat Resulting quaternion. + */ + static void EulToQuat(const double* eul, double* quat); + + /** + * \brief Sets the rotation from given quaternion. + * \param mat Input quaternion (4x1 cv::Mat). + */ + void SetQuaternion(const cv::Mat& mat); + + /** + * \brief Sets the rotation from given quaternion. + * \param mat Input quaternion (4x1 double array). + */ + void SetQuaternion(const double* quat); + + /** + * \brief Sets the rotation from given Euler angles. + * \param mat Input Euler angles (3x1 cv::Mat). + */ + void SetEuler(const cv::Mat& mat); + + /** + * \brief Sets the rotation from given rotation vector. + * \param mat Input rotation vector (3x1 cv::Mat). + */ + void SetRodriques(const cv::Mat& mat); + + /** + * \brief Sets the rotation from given rotation matrix. 3x3 and 4x4 matrices + * are allowed. \param mat Input rotation matrix (3x3 or 4x4 cv::Mat). + */ + void SetMatrix(const cv::Mat& mat); + + /** + * \brief Returns the rotation in matrix form. 3x3 and 4x4 matrices are + * allowed. \param mat The rotation is stored here. + */ + void GetMatrix(cv::Mat& mat) const; + + /** + * \brief Returns the rotation in rotation vector form. + * \param mat The rotation is stored here (3x1 cv::Mat). + */ + void GetRodriques(cv::Mat& mat) const; + + /** + * \brief Returns the rotation in Euler angles. + * \param mat The rotation is stored here (3x1 cv::Mat). + */ + void GetEuler(cv::Mat& mat) const; + + /** + * \brief Returns the rotation in quaternion form. + * \param mat The rotation is stored here (4x1 cv::Mat). + */ + void GetQuaternion(cv::Mat& mat) const; }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/SfM.h b/ar_track_alvar/include/ar_track_alvar/SfM.h index f36fa62..47808ec 100644 --- a/ar_track_alvar/include/ar_track_alvar/SfM.h +++ b/ar_track_alvar/include/ar_track_alvar/SfM.h @@ -32,92 +32,121 @@ #include "EC.h" -namespace alvar { - -/** \brief Simple structure from motion implementation using \e CameraEC , \e MarkerDetectorEC and \e TrackerFeaturesEC +namespace alvar +{ +/** \brief Simple structure from motion implementation using \e CameraEC , \e + * MarkerDetectorEC and \e TrackerFeaturesEC * * See \e SamplePointcloud for usage example. */ -class ALVAR_EXPORT SimpleSfM { +class ALVAR_EXPORT SimpleSfM +{ public: - /** \brief Extended version of \e ExternalContainer structure used internally in \e SimpleSfM */ - class Feature : public ExternalContainer { - public: - bool has_stored_pose; - Pose pose1; - CvPoint2D32f p2d1; - CvPoint3D32f p3d_sh; - CvPoint2D32f projected_p2d_sh; - int tri_cntr; // This is needed only by UpdateTriangulateOnly - int estimation_type; + /** \brief Extended version of \e ExternalContainer structure used internally + * in \e SimpleSfM */ + class Feature : public ExternalContainer + { + public: + bool has_stored_pose; + Pose pose1; + CvPoint2D32f p2d1; + CvPoint3D32f p3d_sh; + CvPoint2D32f projected_p2d_sh; + int tri_cntr; // This is needed only by UpdateTriangulateOnly + int estimation_type; - Feature() : ExternalContainer() { - has_stored_pose = false; - p3d_sh.x = 0.f; p3d_sh.y = 0.f; p3d_sh.z = 0.f; - tri_cntr = 0; - estimation_type = 0; - } - Feature(const Feature &c) : ExternalContainer(c) { - has_stored_pose = c.has_stored_pose; - pose1 = c.pose1; - p2d1 = c.p2d1; - p3d_sh = c.p3d_sh; - projected_p2d_sh = c.projected_p2d_sh; - tri_cntr = c.tri_cntr; - estimation_type = c.estimation_type; - } - }; - /** \brief The map of all tracked features */ - std::map container; - std::map container_triangulated; + Feature() : ExternalContainer() + { + has_stored_pose = false; + p3d_sh.x = 0.f; + p3d_sh.y = 0.f; + p3d_sh.z = 0.f; + tri_cntr = 0; + estimation_type = 0; + } + Feature(const Feature& c) : ExternalContainer(c) + { + has_stored_pose = c.has_stored_pose; + pose1 = c.pose1; + p2d1 = c.p2d1; + p3d_sh = c.p3d_sh; + projected_p2d_sh = c.projected_p2d_sh; + tri_cntr = c.tri_cntr; + estimation_type = c.estimation_type; + } + }; + /** \brief The map of all tracked features */ + std::map container; + std::map container_triangulated; protected: - std::map container_reset_point; - std::map container_triangulated_reset_point; + std::map container_reset_point; + std::map container_triangulated_reset_point; - CameraEC cam; - MarkerDetectorEC marker_detector; - TrackerFeaturesEC tf; - Pose pose; - bool pose_ok; + CameraEC cam; + MarkerDetectorEC marker_detector; + TrackerFeaturesEC tf; + Pose pose; + bool pose_ok; - bool update_tri; - std::string multi_marker_file; - bool markers_found; - double scale; - Pose pose_original; - Pose pose_difference; + bool update_tri; + std::string multi_marker_file; + bool markers_found; + double scale; + Pose pose_original; + Pose pose_difference; public: - /** \brief Constructor */ - SimpleSfM() : tf(200, 200, 0.01, 20, 4, 6), pose_ok(false), update_tri(false), markers_found(false) {} - /** \brief Reset the situation back to the point it was when \e SetResetPoint was called */ - void Reset(bool reset_also_triangulated = true); - /** \brief Remember the current state and return here when the \e Reset is called */ - void SetResetPoint(); - /** \brief Clear all tracked features */ - void Clear(); - /** \brief Set the suitable scale to be used. This affects quite much how the tracking behaves (when features are triangulated etc.) */ - void SetScale(double s) { scale = s; } - /** \brief Get the camera used internally. You need to use this to set correct camera calibration (see \e SamplePointcloud) */ - CameraEC *GetCamera(); - /** \brief Get the estimated pose */ - Pose *GetPose(); - /** \brief Add \e MultiMarker from file as a basis for tracking. It is good idea to call \e SetResetPoint after these. */ - bool AddMultiMarker(const char *fname, FILE_FORMAT format = FILE_FORMAT_XML); - /** \brief Add \e MultiMarker to be a basis for tracking. It is good idea to call \e SetResetPoint after these. */ - bool AddMultiMarker(MultiMarkerEC *mm); - /** \brief Add an marker to be a basis for tracking. It is good idea to call \e SetResetPoint after these. */ - void AddMarker(int marker_id, double edge_length, Pose &pose); - /** \brief Update position assuming that camera is moving with 6-DOF */ - bool Update(IplImage *image, bool assume_plane=true, bool triangulate=true, float reproj_err_limit=5.f, float triangulate_angle=15.f); - /** \brief Update camera 6-DOF position using triangulated points only (This is the old version of Update) */ - bool UpdateTriangulateOnly(IplImage *image); - /** \brief Update position assuming that user is more standing still and viewing the environment. */ - bool UpdateRotationsOnly(IplImage *image); - /** \brief Draw debug information about the tracked features and detected markers. */ - void Draw(IplImage *rgba); + /** \brief Constructor */ + SimpleSfM() + : tf(200, 200, 0.01, 20, 4, 6) + , pose_ok(false) + , update_tri(false) + , markers_found(false) + { + } + /** \brief Reset the situation back to the point it was when \e SetResetPoint + * was called */ + void Reset(bool reset_also_triangulated = true); + /** \brief Remember the current state and return here when the \e Reset is + * called */ + void SetResetPoint(); + /** \brief Clear all tracked features */ + void Clear(); + /** \brief Set the suitable scale to be used. This affects quite much how the + * tracking behaves (when features are triangulated etc.) */ + void SetScale(double s) + { + scale = s; + } + /** \brief Get the camera used internally. You need to use this to set correct + * camera calibration (see \e SamplePointcloud) */ + CameraEC* GetCamera(); + /** \brief Get the estimated pose */ + Pose* GetPose(); + /** \brief Add \e MultiMarker from file as a basis for tracking. It is good + * idea to call \e SetResetPoint after these. */ + bool AddMultiMarker(const char* fname, FILE_FORMAT format = FILE_FORMAT_XML); + /** \brief Add \e MultiMarker to be a basis for tracking. It is good idea to + * call \e SetResetPoint after these. */ + bool AddMultiMarker(MultiMarkerEC* mm); + /** \brief Add an marker to be a basis for tracking. It is good idea to call + * \e SetResetPoint after these. */ + void AddMarker(int marker_id, double edge_length, Pose& pose); + /** \brief Update position assuming that camera is moving with 6-DOF */ + bool Update(IplImage* image, bool assume_plane = true, + bool triangulate = true, float reproj_err_limit = 5.f, + float triangulate_angle = 15.f); + /** \brief Update camera 6-DOF position using triangulated points only (This + * is the old version of Update) */ + bool UpdateTriangulateOnly(IplImage* image); + /** \brief Update position assuming that user is more standing still and + * viewing the environment. */ + bool UpdateRotationsOnly(IplImage* image); + /** \brief Draw debug information about the tracked features and detected + * markers. */ + void Draw(IplImage* rgba); }; -} +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/Shared.h b/ar_track_alvar/include/ar_track_alvar/Shared.h old mode 100755 new mode 100644 index 34335da..0a28bb1 --- a/ar_track_alvar/include/ar_track_alvar/Shared.h +++ b/ar_track_alvar/include/ar_track_alvar/Shared.h @@ -5,47 +5,56 @@ using namespace alvar; -void outputEnumeratedPlugins(CaptureFactory::CapturePluginVector &plugins) +void outputEnumeratedPlugins(CaptureFactory::CapturePluginVector& plugins) { - for (int i = 0; i < (int)plugins.size(); ++i) { - if (i != 0) { - std::cout << ", "; - } - std::cout << plugins.at(i); + for (int i = 0; i < (int)plugins.size(); ++i) + { + if (i != 0) + { + std::cout << ", "; } + std::cout << plugins.at(i); + } - std::cout << std::endl; + std::cout << std::endl; } -void outputEnumeratedDevices(CaptureFactory::CaptureDeviceVector &devices, int selectedDevice) +void outputEnumeratedDevices(CaptureFactory::CaptureDeviceVector& devices, + int selectedDevice) { - for (int i = 0; i < (int)devices.size(); ++i) { - if (selectedDevice == i) { - std::cout << "* "; - } - else { - std::cout << " "; - } - - std::cout << i << ": " << devices.at(i).uniqueName(); + for (int i = 0; i < (int)devices.size(); ++i) + { + if (selectedDevice == i) + { + std::cout << "* "; + } + else + { + std::cout << " "; + } - if (devices[i].description().length() > 0) { - std::cout << ", " << devices.at(i).description(); - } + std::cout << i << ": " << devices.at(i).uniqueName(); - std::cout << std::endl; + if (devices[i].description().length() > 0) + { + std::cout << ", " << devices.at(i).description(); } + + std::cout << std::endl; + } } -int defaultDevice(CaptureFactory::CaptureDeviceVector &devices) +int defaultDevice(CaptureFactory::CaptureDeviceVector& devices) { - for (int i = 0; i < (int)devices.size(); ++i) { - if (devices.at(i).captureType() == "highgui") { - return i; - } + for (int i = 0; i < (int)devices.size(); ++i) + { + if (devices.at(i).captureType() == "highgui") + { + return i; } + } - return 0; + return 0; } #endif diff --git a/ar_track_alvar/include/ar_track_alvar/Threads.h b/ar_track_alvar/include/ar_track_alvar/Threads.h index 376a24e..1d330ca 100644 --- a/ar_track_alvar/include/ar_track_alvar/Threads.h +++ b/ar_track_alvar/include/ar_track_alvar/Threads.h @@ -32,8 +32,8 @@ #include "Alvar.h" -namespace alvar { - +namespace alvar +{ class ThreadsPrivate; /** @@ -44,28 +44,28 @@ class ThreadsPrivate; class ALVAR_EXPORT Threads { public: - /** - * \brief Constructor. - */ - Threads(); + /** + * \brief Constructor. + */ + Threads(); - /** - * \brief Destructor. - */ - ~Threads(); + /** + * \brief Destructor. + */ + ~Threads(); - /** - * \brief Creates a new thread and returns true on success. - * - * \param method The method that the thread will execute. - * \param parameters The parameters sent to the method. - */ - bool create(void *(*method)(void *), void *parameters); + /** + * \brief Creates a new thread and returns true on success. + * + * \param method The method that the thread will execute. + * \param parameters The parameters sent to the method. + */ + bool create(void* (*method)(void*), void* parameters); private: - ThreadsPrivate *d; + ThreadsPrivate* d; }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/Threads_private.h b/ar_track_alvar/include/ar_track_alvar/Threads_private.h index 11bae01..e2c0509 100644 --- a/ar_track_alvar/include/ar_track_alvar/Threads_private.h +++ b/ar_track_alvar/include/ar_track_alvar/Threads_private.h @@ -24,20 +24,20 @@ #ifndef THREADS_PRIVATE_H #define THREADS_PRIVATE_H -namespace alvar { - +namespace alvar +{ class ThreadsPrivateData; class ThreadsPrivate { public: - ThreadsPrivate(); - ~ThreadsPrivate(); - bool create(void *(*method)(void *), void *parameters); + ThreadsPrivate(); + ~ThreadsPrivate(); + bool create(void* (*method)(void*), void* parameters); - ThreadsPrivateData *d; + ThreadsPrivateData* d; }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/Timer.h b/ar_track_alvar/include/ar_track_alvar/Timer.h index d7c145f..0398079 100644 --- a/ar_track_alvar/include/ar_track_alvar/Timer.h +++ b/ar_track_alvar/include/ar_track_alvar/Timer.h @@ -32,8 +32,8 @@ #include "Alvar.h" -namespace alvar { - +namespace alvar +{ class TimerPrivate; /** @@ -44,30 +44,30 @@ class TimerPrivate; class ALVAR_EXPORT Timer { public: - /** - * \brief Constructor. - */ - Timer(); + /** + * \brief Constructor. + */ + Timer(); - /** - * \brief Destructor. - */ - ~Timer(); + /** + * \brief Destructor. + */ + ~Timer(); - /** - * \brief Starts the timer. - */ - void start(); + /** + * \brief Starts the timer. + */ + void start(); - /** - * \brief Stops the timer and returns the elapsed time in seconds. - */ - double stop(); + /** + * \brief Stops the timer and returns the elapsed time in seconds. + */ + double stop(); private: - TimerPrivate *d; + TimerPrivate* d; }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/Timer_private.h b/ar_track_alvar/include/ar_track_alvar/Timer_private.h index 5874b62..bb87cfd 100644 --- a/ar_track_alvar/include/ar_track_alvar/Timer_private.h +++ b/ar_track_alvar/include/ar_track_alvar/Timer_private.h @@ -24,21 +24,21 @@ #ifndef TIMER_PRIVATE_H #define TIMER_PRIVATE_H -namespace alvar { - +namespace alvar +{ class TimerPrivateData; class TimerPrivate { public: - TimerPrivate(); - ~TimerPrivate(); - void start(); - double stop(); + TimerPrivate(); + ~TimerPrivate(); + void start(); + double stop(); - TimerPrivateData *d; + TimerPrivateData* d; }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/Tracker.h b/ar_track_alvar/include/ar_track_alvar/Tracker.h index 50829b1..92abfe2 100644 --- a/ar_track_alvar/include/ar_track_alvar/Tracker.h +++ b/ar_track_alvar/include/ar_track_alvar/Tracker.h @@ -33,22 +33,29 @@ #include #include "Alvar.h" -namespace alvar { - +namespace alvar +{ /** * \brief Pure virtual base class for tracking optical flow * - * The idea is to make own versions of \e Track method which updates the class member variables accordingly + * The idea is to make own versions of \e Track method which updates the class + * member variables accordingly */ -class ALVAR_EXPORT Tracker { +class ALVAR_EXPORT Tracker +{ public: - Tracker() {} - /** \brief Pure virtual function for making the next track step. This analyses the image and updates class member variables accordingly */ - virtual double Track(IplImage *img) = 0; - - virtual void Compensate(double *x, double *y) {} + Tracker() + { + } + /** \brief Pure virtual function for making the next track step. This analyses + * the image and updates class member variables accordingly */ + virtual double Track(IplImage* img) = 0; + + virtual void Compensate(double* x, double* y) + { + } }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/TrackerFeatures.h b/ar_track_alvar/include/ar_track_alvar/TrackerFeatures.h index 106f2c9..6cbcb09 100644 --- a/ar_track_alvar/include/ar_track_alvar/TrackerFeatures.h +++ b/ar_track_alvar/include/ar_track_alvar/TrackerFeatures.h @@ -32,85 +32,102 @@ #include "Tracker.h" -namespace alvar { - +namespace alvar +{ /** - * \brief \e TrackerFeatures tracks features using OpenCV's cvGoodFeaturesToTrack and cvCalcOpticalFlowPyrLK + * \brief \e TrackerFeatures tracks features using OpenCV's + * cvGoodFeaturesToTrack and cvCalcOpticalFlowPyrLK */ -class ALVAR_EXPORT TrackerFeatures : public Tracker { +class ALVAR_EXPORT TrackerFeatures : public Tracker +{ protected: - int x_res, y_res; - int frame_count; - double quality_level; - double min_distance; - int min_features; - int max_features; - char *status; - IplImage *img_eig; - IplImage *img_tmp; - IplImage *gray; - IplImage *prev_gray; - IplImage *pyramid; - IplImage *prev_pyramid; - IplImage *mask; - int next_id; - int win_size; - int pyr_levels; + int x_res, y_res; + int frame_count; + double quality_level; + double min_distance; + int min_features; + int max_features; + char* status; + IplImage* img_eig; + IplImage* img_tmp; + IplImage* gray; + IplImage* prev_gray; + IplImage* pyramid; + IplImage* prev_pyramid; + IplImage* mask; + int next_id; + int win_size; + int pyr_levels; + + /** \brief Reset track features on specified mask area */ + double TrackHid(IplImage* img, IplImage* mask = NULL, + bool add_features = true); - /** \brief Reset track features on specified mask area */ - double TrackHid(IplImage *img, IplImage *mask=NULL, bool add_features=true); public: - /** \brief \e Track result: previous features */ - CvPoint2D32f *prev_features; - /** \brief \e Track result: current features */ - CvPoint2D32f *features; - /** \brief \e Track result: count of previous features */ - int prev_feature_count; - /** \brief \e Track result: count of current features */ - int feature_count; - /** \brief \e Track result: ID:s for previous features */ - int *prev_ids; - /** \brief \e Track result: ID:s for current features */ - int *ids; - /** - * \brief Constructor for \e TrackerFeatures tracks features using OpenCV's cvGoodFeaturesToTrack and cvCalcOpticalFlowPyrLK - * \param _max_features The maximum amount of features to be tracked - * \param _min_features The minimum amount of features. The featureset is filled up when the number of features is lower than this. - * \param _quality_level Multiplier for the maxmin eigenvalue; specifies minimal accepted quality of image corners. - * \param _min_distance Limit, specifying minimum possible distance between returned corners; Euclidian distance is used. - * If 0 given we use default value aiming for uniform cover: _min_distance = 0.8*sqrt(x_res*y_res/max_features)) - * \param _pyr_levels Number of pyramid levels - */ - TrackerFeatures(int _max_features=100, int _min_features=90, double _quality_level=0.01, double _min_distance=10, int _pyr_levels=1, int _win_size=3); - /** \brief Destructor */ - ~TrackerFeatures(); - /** \brief Change settings while running */ - void ChangeSettings(int _max_features=100, int _min_features=90, double _quality_level=0.01, double _min_distance=10); - /** \brief Reset */ - void Reset(); - /** \brief Reset track features on specified mask area */ - double Reset(IplImage *img, IplImage *mask); - /** \brief Stop tracking the identified feature (with index for features array)*/ - bool DelFeature(int index); - /** \brief Stop tracking the identified feature (with feature id) */ - bool DelFeatureId(int id); - /** \brief Track features */ - double Track(IplImage *img) { return Track(img, true); } - /** \brief Track features */ - double Track(IplImage *img, bool add_features); - /** \brief Track features */ - double Track(IplImage* img, IplImage* mask); - /** \brief add features to the previously tracked frame if there are less than min_features */ - int AddFeatures(IplImage *mask=NULL); - /** \brief Create and get the pointer to new_features_mask */ - IplImage *NewFeatureMask(); - /** \brief Purge features that are considerably closer than the defined min_distance. - * - * Note, that we always try to maintain the smaller id's assuming that they are older ones - */ - int Purge(); + /** \brief \e Track result: previous features */ + CvPoint2D32f* prev_features; + /** \brief \e Track result: current features */ + CvPoint2D32f* features; + /** \brief \e Track result: count of previous features */ + int prev_feature_count; + /** \brief \e Track result: count of current features */ + int feature_count; + /** \brief \e Track result: ID:s for previous features */ + int* prev_ids; + /** \brief \e Track result: ID:s for current features */ + int* ids; + /** + * \brief Constructor for \e TrackerFeatures tracks features using OpenCV's + * cvGoodFeaturesToTrack and cvCalcOpticalFlowPyrLK \param _max_features The + * maximum amount of features to be tracked \param _min_features The minimum + * amount of features. The featureset is filled up when the number of features + * is lower than this. \param _quality_level Multiplier for the maxmin + * eigenvalue; specifies minimal accepted quality of image corners. \param + * _min_distance Limit, specifying minimum possible distance between returned + * corners; Euclidian distance is used. If 0 given we use default value aiming + * for uniform cover: _min_distance = 0.8*sqrt(x_res*y_res/max_features)) + * \param _pyr_levels Number of pyramid levels + */ + TrackerFeatures(int _max_features = 100, int _min_features = 90, + double _quality_level = 0.01, double _min_distance = 10, + int _pyr_levels = 1, int _win_size = 3); + /** \brief Destructor */ + ~TrackerFeatures(); + /** \brief Change settings while running */ + void ChangeSettings(int _max_features = 100, int _min_features = 90, + double _quality_level = 0.01, double _min_distance = 10); + /** \brief Reset */ + void Reset(); + /** \brief Reset track features on specified mask area */ + double Reset(IplImage* img, IplImage* mask); + /** \brief Stop tracking the identified feature (with index for features + * array)*/ + bool DelFeature(int index); + /** \brief Stop tracking the identified feature (with feature id) */ + bool DelFeatureId(int id); + /** \brief Track features */ + double Track(IplImage* img) + { + return Track(img, true); + } + /** \brief Track features */ + double Track(IplImage* img, bool add_features); + /** \brief Track features */ + double Track(IplImage* img, IplImage* mask); + /** \brief add features to the previously tracked frame if there are less than + * min_features */ + int AddFeatures(IplImage* mask = NULL); + /** \brief Create and get the pointer to new_features_mask */ + IplImage* NewFeatureMask(); + /** \brief Purge features that are considerably closer than the defined + * min_distance. + * + * Note, that we always try to maintain the smaller id's assuming that they + * are older ones + */ + int Purge(); }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/TrackerOrientation.h b/ar_track_alvar/include/ar_track_alvar/TrackerOrientation.h index 31a5a8b..4a39c67 100644 --- a/ar_track_alvar/include/ar_track_alvar/TrackerOrientation.h +++ b/ar_track_alvar/include/ar_track_alvar/TrackerOrientation.h @@ -35,85 +35,96 @@ * \brief This file implements an orientation tracker. */ -namespace alvar { - +namespace alvar +{ /** * \brief Track orientation based only on features in the image plane. */ -class ALVAR_EXPORT TrackerOrientation : public Tracker { - +class ALVAR_EXPORT TrackerOrientation : public Tracker +{ public: - TrackerOrientation(int width, int height, int image_scale=1, int outlier_limit=20) - : _image_scale(image_scale) - , _outlier_limit(outlier_limit) - , _xres(width) - , _yres(height) - { - _camera = 0; - _grsc = 0; - _object_model = 0; - } + TrackerOrientation(int width, int height, int image_scale = 1, + int outlier_limit = 20) + : _image_scale(image_scale) + , _outlier_limit(outlier_limit) + , _xres(width) + , _yres(height) + { + _camera = 0; + _grsc = 0; + _object_model = 0; + } private: - - struct Feature - { - enum {NOT_TRACKED=0, IS_TRACKED} status2D; - enum {NONE=0, USE_FOR_POSE, IS_OUTLIER, IS_INITIAL} status3D; - - Feature() - { - status3D = NONE; - status2D = NOT_TRACKED; - } - - Feature(double vx, double vy) - { - point.x = float(vx); - point.y = float(vy); - status3D = NONE; - status2D = NOT_TRACKED; - } - - ~Feature() {} - - CvPoint2D32f point; - CvPoint3D64f point3d; - }; - - TrackerFeatures _ft; - std::map _F_v; - - int _xres; - int _yres; - int _image_scale; - int _outlier_limit; - - Pose _pose; - IplImage *_grsc; - Camera *_camera; - CvMat *_object_model; + struct Feature + { + enum + { + NOT_TRACKED = 0, + IS_TRACKED + } status2D; + enum + { + NONE = 0, + USE_FOR_POSE, + IS_OUTLIER, + IS_INITIAL + } status3D; + + Feature() + { + status3D = NONE; + status2D = NOT_TRACKED; + } + + Feature(double vx, double vy) + { + point.x = float(vx); + point.y = float(vy); + status3D = NONE; + status2D = NOT_TRACKED; + } + + ~Feature() + { + } + + CvPoint2D32f point; + CvPoint3D64f point3d; + }; + + TrackerFeatures _ft; + std::map _F_v; + + int _xres; + int _yres; + int _image_scale; + int _outlier_limit; + + Pose _pose; + IplImage* _grsc; + Camera* _camera; + CvMat* _object_model; public: - void SetCamera(Camera *camera) { - _camera = camera; - } - void GetPose(Pose &pose); - void GetPose(double gl_mat[16]) { - _pose.GetMatrixGL(gl_mat); - } - void Reset(); - double Track(IplImage *image); + void SetCamera(Camera* camera) + { + _camera = camera; + } + void GetPose(Pose& pose); + void GetPose(double gl_mat[16]) + { + _pose.GetMatrixGL(gl_mat); + } + void Reset(); + double Track(IplImage* image); private: - static void Project(CvMat* state, CvMat* projection, void *param); - bool UpdatePose(IplImage* image=0); - bool UpdateRotationOnly(IplImage *gray, IplImage *image=0); - + static void Project(CvMat* state, CvMat* projection, void* param); + bool UpdatePose(IplImage* image = 0); + bool UpdateRotationOnly(IplImage* gray, IplImage* image = 0); }; -} // namespace alvar +} // namespace alvar #endif - - diff --git a/ar_track_alvar/include/ar_track_alvar/TrackerPsa.h b/ar_track_alvar/include/ar_track_alvar/TrackerPsa.h index 4b47574..6ecfe5e 100644 --- a/ar_track_alvar/include/ar_track_alvar/TrackerPsa.h +++ b/ar_track_alvar/include/ar_track_alvar/TrackerPsa.h @@ -32,59 +32,61 @@ * \brief This file implements a PSA tracker. */ -namespace alvar { - +namespace alvar +{ /** - * \brief \e TrackerPsa implements a very simple PSA tracker + * \brief \e TrackerPsa implements a very simple PSA tracker * - * (See Drab, Stephan A. & Artner, Nicole M. "Motion Detection as Interaction Technique for Games & Applications on Mobile Devices" PERMID 2005) + * (See Drab, Stephan A. & Artner, Nicole M. "Motion Detection as Interaction + * Technique for Games & Applications on Mobile Devices" PERMID 2005) */ -class ALVAR_EXPORT TrackerPsa : public Tracker { +class ALVAR_EXPORT TrackerPsa : public Tracker +{ protected: - int max_shift; - int x_res, y_res; - long *hor, *horprev; - long *ver, *verprev; - long framecount; + int max_shift; + int x_res, y_res; + long *hor, *horprev; + long *ver, *verprev; + long framecount; public: - /** \brief \e Track result x-translation in pixels */ - double xd; - /** \brief \e Track result y-translation in pixels */ - double yd; - /** \brief Constructor */ - TrackerPsa(int _max_shift = 50); - /** \brief Destructor */ - ~TrackerPsa(); - /** \brief Track using PSA */ - double Track(IplImage *img); + /** \brief \e Track result x-translation in pixels */ + double xd; + /** \brief \e Track result y-translation in pixels */ + double yd; + /** \brief Constructor */ + TrackerPsa(int _max_shift = 50); + /** \brief Destructor */ + ~TrackerPsa(); + /** \brief Track using PSA */ + double Track(IplImage* img); - virtual void Compensate(double *x, double *y); + virtual void Compensate(double* x, double* y); }; /** - * \brief \e TrackerPsaRot implements a slightly extended version of a \e TrackerPsa which can also detect sideways rotation + * \brief \e TrackerPsaRot implements a slightly extended version of a \e + * TrackerPsa which can also detect sideways rotation */ -class ALVAR_EXPORT TrackerPsaRot : public TrackerPsa { +class ALVAR_EXPORT TrackerPsaRot : public TrackerPsa +{ protected: - double *rot, *rotprev; - int *rot_count; + double *rot, *rotprev; + int* rot_count; public: - /** \brief \e Track result rotation in degrees */ - double rotd; - /** \brief Constructor */ - TrackerPsaRot(int _max_shift = 50); - /** \brief Destructor */ - ~TrackerPsaRot(); - /** \brief Track using PSA with rotation*/ - double Track(IplImage *img); + /** \brief \e Track result rotation in degrees */ + double rotd; + /** \brief Constructor */ + TrackerPsaRot(int _max_shift = 50); + /** \brief Destructor */ + ~TrackerPsaRot(); + /** \brief Track using PSA with rotation*/ + double Track(IplImage* img); - virtual void Compensate(double *x, double *y); + virtual void Compensate(double* x, double* y); }; -} // namespace alvar +} // namespace alvar #endif - - diff --git a/ar_track_alvar/include/ar_track_alvar/TrackerStat.h b/ar_track_alvar/include/ar_track_alvar/TrackerStat.h index 53d54d0..fba855a 100644 --- a/ar_track_alvar/include/ar_track_alvar/TrackerStat.h +++ b/ar_track_alvar/include/ar_track_alvar/TrackerStat.h @@ -34,51 +34,55 @@ * \brief This file implements a statistical tracker. */ -namespace alvar { - +namespace alvar +{ /** - * \brief \e TrackerStat deduces the optical flow based on tracked features using Seppo Valli's statistical tracking. + * \brief \e TrackerStat deduces the optical flow based on tracked features + * using Seppo Valli's statistical tracking. */ -class ALVAR_EXPORT TrackerStat : public Tracker { +class ALVAR_EXPORT TrackerStat : public Tracker +{ protected: - TrackerFeatures f; - HistogramSubpixel hist; + TrackerFeatures f; + HistogramSubpixel hist; + public: - /** \brief \e Track result x-translation in pixels */ - double xd; - /** \brief \e Track result y-translation in pixels */ - double yd; - /** \brief Constructor */ - TrackerStat(int binsize=8); - /** \brief Reset */ - void Reset(); - /** - * \brief Translation tracker (the simplest possible) - */ - double Track(IplImage *img); - virtual void Compensate(double *x, double *y); + /** \brief \e Track result x-translation in pixels */ + double xd; + /** \brief \e Track result y-translation in pixels */ + double yd; + /** \brief Constructor */ + TrackerStat(int binsize = 8); + /** \brief Reset */ + void Reset(); + /** + * \brief Translation tracker (the simplest possible) + */ + double Track(IplImage* img); + virtual void Compensate(double* x, double* y); }; /** - * \brief TrackerStatRot implements a slightly extended version of TrackerStat which can also detect sideways rotation. + * \brief TrackerStatRot implements a slightly extended version of TrackerStat + * which can also detect sideways rotation. */ -class ALVAR_EXPORT TrackerStatRot : public TrackerStat { - int x_res, y_res; - HistogramSubpixel hist_rot; +class ALVAR_EXPORT TrackerStatRot : public TrackerStat +{ + int x_res, y_res; + HistogramSubpixel hist_rot; + public: - /** \brief \e Track result rotation in degrees */ - double rotd; - /** \brief Constructor */ - TrackerStatRot(int binsize=8, int binsize_rot=3); - /** - * \brief Translation + rotation tracker - */ - double Track(IplImage *img); - virtual void Compensate(double *x, double *y); + /** \brief \e Track result rotation in degrees */ + double rotd; + /** \brief Constructor */ + TrackerStatRot(int binsize = 8, int binsize_rot = 3); + /** + * \brief Translation + rotation tracker + */ + double Track(IplImage* img); + virtual void Compensate(double* x, double* y); }; -} // namespace alvar +} // namespace alvar #endif - - diff --git a/ar_track_alvar/include/ar_track_alvar/TrifocalTensor.h b/ar_track_alvar/include/ar_track_alvar/TrifocalTensor.h index 5429440..5f16aad 100644 --- a/ar_track_alvar/include/ar_track_alvar/TrifocalTensor.h +++ b/ar_track_alvar/include/ar_track_alvar/TrifocalTensor.h @@ -34,118 +34,122 @@ * \brief This file implements a trifocal tensor. */ -namespace alvar { +namespace alvar +{ +/** + * \brief Trifocal tensor works for three images like a fundamental matrix + * works for two images. + * + * Given three camera poses P0, P1, P2 and a 3D point X, we can calculate a + * trifocal tensor T(P0, P1, P2). The tensor relates projections of X in P0, + * P1 and P2 in such a way that when any two projections are known the third + * projection can be calculated. + * + * This implementation of trifocal tensor assumes that the camera poses + * P0, P1 and P2 are known. When projections of X in P0 and P1 are known + * the projection in P2 can be computed using the tensor. + * + * The current implementation cannot be used to directly compute the tensor + * from point correspondencies alone. The implementation can be used for + * example for optimizing three camera poses when point correspondences are + * known in the three images by minimizing the trifocal 'projection error' + * computed by \e projectError -method. + * + * \code + * Pose P1, P2; // P0 is identity pose. + * CvPoint2D64f proj0, proj1, proj2; // A 3D point projected with poses P0, P1 + * and P2. + * + * TrifocalTensor T(P1, P2); + * CvPoint2D64f test2; + * T.project(proj0, proj1, test2); + * assert(proj2.x == test2.x); + * assert(proj2.y == test2.y); + * assert(proj2.z == test2.z); + * \endcode + */ +class ALVAR_EXPORT TrifocalTensor +{ +private: + double T[3][3][3]; + double projectAxis(const CvPoint2D64f& p0, const CvPoint2D64f& p1, int l); + +public: + TrifocalTensor(); - /** - * \brief Trifocal tensor works for three images like a fundamental matrix - * works for two images. - * - * Given three camera poses P0, P1, P2 and a 3D point X, we can calculate a - * trifocal tensor T(P0, P1, P2). The tensor relates projections of X in P0, - * P1 and P2 in such a way that when any two projections are known the third - * projection can be calculated. + /** \brief Constructs a tensor from identity pose and two other known poses. + * See \e computeTensor. * - * This implementation of trifocal tensor assumes that the camera poses - * P0, P1 and P2 are known. When projections of X in P0 and P1 are known - * the projection in P2 can be computed using the tensor. + * \param P1 The second pose relative to the first pose. + * \param P2 The third pose relative to the first pose. + */ + TrifocalTensor(const Pose& P1, const Pose& P2); + + /** \brief Constructs a tensor from three known poses. + * See \e computeTensor. * - * The current implementation cannot be used to directly compute the tensor - * from point correspondencies alone. The implementation can be used for - * example for optimizing three camera poses when point correspondences are - * known in the three images by minimizing the trifocal 'projection error' - * computed by \e projectError -method. + * \param P0 The first camera pose. + * \param P1 The second pose relative to the first pose. + * \param P2 The third pose relative to the first pose. + */ + TrifocalTensor(const Pose& P0, const Pose& P1, const Pose& P2); + + ~TrifocalTensor(); + + /** \brief Initializes the tensor from identity pose and two other known + * poses. * - * \code - * Pose P1, P2; // P0 is identity pose. - * CvPoint2D64f proj0, proj1, proj2; // A 3D point projected with poses P0, P1 and P2. + * The first pose is identity and the two other poses are relative + * translations/rotation between the first and the second pose and + * between the first and the third pose. * - * TrifocalTensor T(P1, P2); - * CvPoint2D64f test2; - * T.project(proj0, proj1, test2); - * assert(proj2.x == test2.x); - * assert(proj2.y == test2.y); - * assert(proj2.z == test2.z); - * \endcode + * \param P1 The second pose relative to the first pose. + * \param P2 The third pose relative to the first pose. */ - class ALVAR_EXPORT TrifocalTensor { - private: - double T[3][3][3]; - double projectAxis(const CvPoint2D64f &p0, const CvPoint2D64f &p1, int l); - - public: - TrifocalTensor(); + void computeTensor(const Pose& P1, const Pose& P2); - /** \brief Constructs a tensor from identity pose and two other known poses. - * See \e computeTensor. - * - * \param P1 The second pose relative to the first pose. - * \param P2 The third pose relative to the first pose. - */ - TrifocalTensor(const Pose &P1, const Pose &P2); + /** \brief Initializes the tensor from three known poses. + * + * \param P0 The first camera pose. + * \param P1 The second pose relative to the first pose. + * \param P2 The third pose relative to the first pose. + */ + void computeTensor(const Pose& P0, const Pose& P1, const Pose& P2); - /** \brief Constructs a tensor from three known poses. - * See \e computeTensor. - * - * \param P0 The first camera pose. - * \param P1 The second pose relative to the first pose. - * \param P2 The third pose relative to the first pose. - */ - TrifocalTensor(const Pose &P0, const Pose &P1, const Pose &P2); + /** \brief Computes the projection of a point in the third pose. + * + * When we have three images, each a projection of a scene from the three + * different camera poses (identity and the two poses that were used to + * initialize the tensor) and a 2D point correspondence between the first + * and the second images, a position in the third image is computed. + * + * \param p0 2D position in a image projected from the first pose. + * \param p1 2D position in a image projected from the second pose. + * \param p2 Computed 2D position in a image projected form the third pose. + */ + void project(const CvPoint2D64f& p0, const CvPoint2D64f& p1, + CvPoint2D64f& p2); - ~TrifocalTensor(); - - /** \brief Initializes the tensor from identity pose and two other known - * poses. - * - * The first pose is identity and the two other poses are relative - * translations/rotation between the first and the second pose and - * between the first and the third pose. - * - * \param P1 The second pose relative to the first pose. - * \param P2 The third pose relative to the first pose. - */ - void computeTensor(const Pose &P1, const Pose &P2); - - /** \brief Initializes the tensor from three known poses. - * - * \param P0 The first camera pose. - * \param P1 The second pose relative to the first pose. - * \param P2 The third pose relative to the first pose. - */ - void computeTensor(const Pose &P0, const Pose &P1, const Pose &P2); - - /** \brief Computes the projection of a point in the third pose. - * - * When we have three images, each a projection of a scene from the three - * different camera poses (identity and the two poses that were used to - * initialize the tensor) and a 2D point correspondence between the first - * and the second images, a position in the third image is computed. - * - * \param p0 2D position in a image projected from the first pose. - * \param p1 2D position in a image projected from the second pose. - * \param p2 Computed 2D position in a image projected form the third pose. - */ - void project(const CvPoint2D64f &p0, const CvPoint2D64f &p1, CvPoint2D64f &p2); - - /** \brief Computes how much three points differ from the tensor. - * - * When we have three images, each a projection of a scene from the three - * different camera poses (identity and the two poses that were used to - * initialize the tensor) and a 2D point correspondence between the first - * the second and the third images, we compute how well the three projections - * match with the trifocal tensor. - * - * When the third point lies exactly where the tensor projects the first - * two points, the returned error is zero. - * - * \param p0 2D position in a image projected from the first pose. - * \param p1 2D position in a image projected from the second pose. - * \param p2 2D position in a image projected form the third pose. - * \return Squared projection error. - */ - double projectError(const CvPoint2D64f &p0, const CvPoint2D64f &p1, const CvPoint2D64f &p2); - }; + /** \brief Computes how much three points differ from the tensor. + * + * When we have three images, each a projection of a scene from the three + * different camera poses (identity and the two poses that were used to + * initialize the tensor) and a 2D point correspondence between the first + * the second and the third images, we compute how well the three projections + * match with the trifocal tensor. + * + * When the third point lies exactly where the tensor projects the first + * two points, the returned error is zero. + * + * \param p0 2D position in a image projected from the first pose. + * \param p1 2D position in a image projected from the second pose. + * \param p2 2D position in a image projected form the third pose. + * \return Squared projection error. + */ + double projectError(const CvPoint2D64f& p0, const CvPoint2D64f& p1, + const CvPoint2D64f& p2); +}; -} // namespace alvar +} // namespace alvar -#endif // __TRIFOCAL_TENSOR__ +#endif // __TRIFOCAL_TENSOR__ diff --git a/ar_track_alvar/include/ar_track_alvar/Uncopyable.h b/ar_track_alvar/include/ar_track_alvar/Uncopyable.h index ed708c0..2649df7 100644 --- a/ar_track_alvar/include/ar_track_alvar/Uncopyable.h +++ b/ar_track_alvar/include/ar_track_alvar/Uncopyable.h @@ -32,8 +32,8 @@ #include "Alvar.h" -namespace alvar { - +namespace alvar +{ /** * \brief Uncopyable for preventing object copies. * @@ -43,32 +43,32 @@ namespace alvar { class ALVAR_EXPORT Uncopyable { protected: - /** - * \brief Constructor. - */ - Uncopyable() - { - } + /** + * \brief Constructor. + */ + Uncopyable() + { + } - /** - * \brief Destructor. - */ - ~Uncopyable() - { - } + /** + * \brief Destructor. + */ + ~Uncopyable() + { + } private: - /** - * \brief Copy constructor. - */ - Uncopyable(const Uncopyable &uncopyable); + /** + * \brief Copy constructor. + */ + Uncopyable(const Uncopyable& uncopyable); - /** - * \brief Assignment operator. - */ - Uncopyable &operator=(const Uncopyable &uncopyable); + /** + * \brief Assignment operator. + */ + Uncopyable& operator=(const Uncopyable& uncopyable); }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/UnscentedKalman.h b/ar_track_alvar/include/ar_track_alvar/UnscentedKalman.h index 9e953ff..010032c 100644 --- a/ar_track_alvar/include/ar_track_alvar/UnscentedKalman.h +++ b/ar_track_alvar/include/ar_track_alvar/UnscentedKalman.h @@ -33,257 +33,267 @@ * \brief This file implements an unscented Kalman filter. */ -namespace alvar { +namespace alvar +{ +class UnscentedProcess; +class UnscentedObservation; - class UnscentedProcess; - class UnscentedObservation; +/** + * \brief Implementation of unscented kalman filter (UKF) for filtering + * non-linear processes. + * + * See http://www.cs.unc.edu/~welch/kalman/media/pdf/Julier1997_SPIE_KF.pdf + * for more details about UKF. + * + * The UKF estimates a process state (represented by a vector) using + * observations of the process. Observations are some derivate of the process + * state as usually the process state cannot be directly observed. + * + * \e UnscentedProcess models the process by predicting the next filter state + * based on the current filter state. + * + * \e UnscentedObservation models the observation by predicting observation + * results based on the current filter state. + * + * UnscentedKalman holds the estimated process state vector and its covariance + * matrix. The new process state can be estimated using \e predict and \e update + * methods. + * + * The current implementation does not separate process noise elements from + * the process state vector. It is therefore the responsibility of the user + * to include noise terms into process state and state covariance. + * + * \code + * class MyUnscentedProcess : public UnscentedProcess { + * void f(CvMat *state) { // compute new state } + * CvMat *getProcessNoise() { return _noise; } + * } myProcess; + * + * class MyUnscentedObservation : public UnscentedObservation { + * void h(CvMat *z, cvMat *state) { // compute measurement vector z from + * state } CvMat *getObservation() { return _obs; } CvMat *getObservationNoise() + * { return _noise; } } myObservation; + * + * int state_n = NUMBER_OF_ELEMENTS_IN_PROCESS_STATE_VECTOR; + * int obs_n = NUMBER_OF_ELEMENTS_IN_PROCESS_OBSERVATION_VECTOR; + * int state_k = NUMBER_OF_PROCESS_NOISE_ELEMENTS; //TODO: Not supported at + * the moment. + * + * UnscentedKalman ukf(state_n, obs_n, state_k); + * initializeState(ukf.getState(), ukf.getStateCovariance()); + * ukf.initialize(); + * + * while (1) { + * ukf.predict(&myProcess); + * // measure new observation. + * ukf.update(&myObservation); + * CvMat *state = ukf.getState(); + * // unpack state information from the state vector and do something with + * it. + * } + * + * \endcode + */ +class ALVAR_EXPORT UnscentedKalman +{ +private: + int state_n; + int state_k; + int obs_n; + int sigma_n; + bool sigmasUpdated; + double lambda, lambda2; + + CvMat* state; + CvMat* stateCovariance; + CvMat* sqrtStateCovariance; + CvMat* stateD; + CvMat* stateU; + CvMat* stateV; + CvMat* stateTmp; + CvMat* stateDiff; - /** - * \brief Implementation of unscented kalman filter (UKF) for filtering non-linear - * processes. + CvMat* predObs; + CvMat* predObsCovariance; + CvMat* invPredObsCovariance; + CvMat* predObsDiff; + + CvMat* statePredObsCrossCorrelation; + CvMat* kalmanGain; + CvMat* kalmanTmp; + + CvMat** sigma_state; + CvMat** sigma_predObs; + + // possess state mean and co-variance (as a list of sigma points). + // generate sigma points from state mean vector and co-variance matrix. + // compute state mean vector and co-variance matrix from sigma points. + + // predict: + // - map sigma points thru process model f. + + // update: + // - map sigma points thru h. + // - from current sigma points and sigma observations: + // - compute state estimate x and co-variance P. + // - compute predicted observation z and innocation co-variance Z + // - compute cross correlation XZ + // - compute new state mean and co-variance. + // - generate new sigma points. +public: + /** \brief Initializes Unscented Kalman filter. * - * See http://www.cs.unc.edu/~welch/kalman/media/pdf/Julier1997_SPIE_KF.pdf - * for more details about UKF. + * Initializes Unscented Kalman filter. The state vector returned by \e + * getState and state covariance matrix returned by \e getStateCovariance + * should be initialized before using the filter. * - * The UKF estimates a process state (represented by a vector) using observations - * of the process. Observations are some derivate of the process state as usually - * the process state cannot be directly observed. + * Separate state noise vector is currently unsupported. The user should + * include noise terms in the state vector directly. Set the noise mean into + * state vector and noise variance into state covariance matrix. * - * \e UnscentedProcess models the process by predicting the next filter state - * based on the current filter state. + * \param state_n The number of elements in process state vector. + * \param obs_n The number of elements in observation vector. + * \param state_k The number of noise elements used in the process model. + * TODO: This is currently unsupported. + * \param alpha Spread of sigma points. + * \param beta Prior knowlegde about the distribution (2 for Gaussian). + */ + UnscentedKalman(int state_n, int obs_n, int state_k = 0, double alpha = 0.001, + double beta = 2.0); + ~UnscentedKalman(); + + /** \brief Returns the process state vector. * - * \e UnscentedObservation models the observation by predicting observation results - * based on the current filter state. + * The returned state vector contains the current state of the process. + * The returned vector may be modified if the current process state is + * known, for example in initialization phase. If the vector is modified, + * \e initialize method must be called before calling either predict or + * update methods. * - * UnscentedKalman holds the estimated process state vector and its covariance - * matrix. The new process state can be estimated using \e predict and \e update - * methods. + * \return A vector of state_n elements. + */ + CvMat* getState() + { + return state; + } + + /** \brief Returns the process state covariance matrix. * - * The current implementation does not separate process noise elements from - * the process state vector. It is therefore the responsibility of the user - * to include noise terms into process state and state covariance. + * The returned matrix contains the current state covariance. The matrix + * may be modified if the covariance is known, for example in initialization + * phase. If the matrix is modified, \e initialize method must be called + * before calling either predict of update methods. * - * \code - * class MyUnscentedProcess : public UnscentedProcess { - * void f(CvMat *state) { // compute new state } - * CvMat *getProcessNoise() { return _noise; } - * } myProcess; + * \return state_n by state_n covariance matrix. + */ + CvMat* getStateCovariance() + { + return stateCovariance; + } + + /** \brief (Re-)initialize UKF internal state. * - * class MyUnscentedObservation : public UnscentedObservation { - * void h(CvMat *z, cvMat *state) { // compute measurement vector z from state } - * CvMat *getObservation() { return _obs; } - * CvMat *getObservationNoise() { return _noise; } - * } myObservation; + * Must be called before predict/update when ever state or state co-variance + * are changed. + */ + void initialize(); + + /** \brief Updated the state by predicting. * - * int state_n = NUMBER_OF_ELEMENTS_IN_PROCESS_STATE_VECTOR; - * int obs_n = NUMBER_OF_ELEMENTS_IN_PROCESS_OBSERVATION_VECTOR; - * int state_k = NUMBER_OF_PROCESS_NOISE_ELEMENTS; //TODO: Not supported at the moment. + * Updates the process state by predicting new state from the current state. + * Normally each predict call is followed with a call to update method. * - * UnscentedKalman ukf(state_n, obs_n, state_k); - * initializeState(ukf.getState(), ukf.getStateCovariance()); - * ukf.initialize(); + * \param process_model The model implementation that is used to predict the + * next state. + */ + void predict(UnscentedProcess* process_model); + + /** \brief Updates the state by an observation. * - * while (1) { - * ukf.predict(&myProcess); - * // measure new observation. - * ukf.update(&myObservation); - * CvMat *state = ukf.getState(); - * // unpack state information from the state vector and do something with it. - * } + * Updates the process state by a measurement that indirectly observed the + * correct process state. The observation implementation needs to hold the + * current measurement data and implement a transformation from process state + * into measurement (the \e UnscentedObservation::h method). * - * \endcode + * \param observation The observation implementation the is used to update + * the current state. */ - class ALVAR_EXPORT UnscentedKalman { - private: - int state_n; - int state_k; - int obs_n; - int sigma_n; - bool sigmasUpdated; - double lambda, lambda2; + void update(UnscentedObservation* observation); +}; - CvMat *state; - CvMat *stateCovariance; - CvMat *sqrtStateCovariance; - CvMat *stateD; - CvMat *stateU; - CvMat *stateV; - CvMat *stateTmp; - CvMat *stateDiff; - - CvMat *predObs; - CvMat *predObsCovariance; - CvMat *invPredObsCovariance; - CvMat *predObsDiff; - - CvMat *statePredObsCrossCorrelation; - CvMat *kalmanGain; - CvMat *kalmanTmp; - - CvMat **sigma_state; - CvMat **sigma_predObs; - - // possess state mean and co-variance (as a list of sigma points). - // generate sigma points from state mean vector and co-variance matrix. - // compute state mean vector and co-variance matrix from sigma points. - - // predict: - // - map sigma points thru process model f. - - // update: - // - map sigma points thru h. - // - from current sigma points and sigma observations: - // - compute state estimate x and co-variance P. - // - compute predicted observation z and innocation co-variance Z - // - compute cross correlation XZ - // - compute new state mean and co-variance. - // - generate new sigma points. - public: - - /** \brief Initializes Unscented Kalman filter. - * - * Initializes Unscented Kalman filter. The state vector returned by \e getState - * and state covariance matrix returned by \e getStateCovariance should be - * initialized before using the filter. - * - * Separate state noise vector is currently unsupported. The user should include - * noise terms in the state vector directly. Set the noise mean into state vector - * and noise variance into state covariance matrix. - * - * \param state_n The number of elements in process state vector. - * \param obs_n The number of elements in observation vector. - * \param state_k The number of noise elements used in the process model. - * TODO: This is currently unsupported. - * \param alpha Spread of sigma points. - * \param beta Prior knowlegde about the distribution (2 for Gaussian). - */ - UnscentedKalman(int state_n, int obs_n, int state_k = 0, double alpha = 0.001, double beta = 2.0); - ~UnscentedKalman(); - - /** \brief Returns the process state vector. - * - * The returned state vector contains the current state of the process. - * The returned vector may be modified if the current process state is - * known, for example in initialization phase. If the vector is modified, - * \e initialize method must be called before calling either predict or - * update methods. - * - * \return A vector of state_n elements. - */ - CvMat *getState() { return state; } - - /** \brief Returns the process state covariance matrix. - * - * The returned matrix contains the current state covariance. The matrix - * may be modified if the covariance is known, for example in initialization - * phase. If the matrix is modified, \e initialize method must be called - * before calling either predict of update methods. - * - * \return state_n by state_n covariance matrix. - */ - CvMat *getStateCovariance() { return stateCovariance; } - - /** \brief (Re-)initialize UKF internal state. - * - * Must be called before predict/update when ever state or state co-variance - * are changed. - */ - void initialize(); - - /** \brief Updated the state by predicting. - * - * Updates the process state by predicting new state from the current state. - * Normally each predict call is followed with a call to update method. - * - * \param process_model The model implementation that is used to predict the - * next state. - */ - void predict(UnscentedProcess *process_model); - - /** \brief Updates the state by an observation. - * - * Updates the process state by a measurement that indirectly observed the - * correct process state. The observation implementation needs to hold the - * current measurement data and implement a transformation from process state - * into measurement (the \e UnscentedObservation::h method). - * - * \param observation The observation implementation the is used to update - * the current state. - */ - void update(UnscentedObservation *observation); - }; - - /** - * \brief Process model for an unscented kalman filter. +/** + * \brief Process model for an unscented kalman filter. + * + * Implementing class needs to allocate a noise matrix of correct size. + */ +class ALVAR_EXPORT UnscentedProcess +{ +public: + /** \brief process model: state+1 = f(state) + * + * Model the process by computing an estimate how the process changes + * when one timestep is taken. * - * Implementing class needs to allocate a noise matrix of correct size. + * \param state state_n size vector; The current state in input and the next + * state estimate in output. */ - class ALVAR_EXPORT UnscentedProcess { - public: - /** \brief process model: state+1 = f(state) - * - * Model the process by computing an estimate how the process changes - * when one timestep is taken. - * - * \param state state_n size vector; The current state in input and the next - * state estimate in output. - */ - virtual void f(CvMat *state) = 0; + virtual void f(CvMat* state) = 0; - /** \brief Returns the process noise covariance. - * - * The returned matrix will be added to the current state covariance matrix, - * increasing the uncertainty of the current state. The matrix should reflect - * all unknown factors of the process which are not taken into account by the - * state estimation method \e f. - * - * \return state_n by state_n size matrix; or NULL for no additional noise. - */ - virtual CvMat *getProcessNoise() = 0; - }; + /** \brief Returns the process noise covariance. + * + * The returned matrix will be added to the current state covariance matrix, + * increasing the uncertainty of the current state. The matrix should reflect + * all unknown factors of the process which are not taken into account by the + * state estimation method \e f. + * + * \return state_n by state_n size matrix; or NULL for no additional noise. + */ + virtual CvMat* getProcessNoise() = 0; +}; - /** - * \brief Observation model for an unscented kalman filter. +/** + * \brief Observation model for an unscented kalman filter. + * + * The implementation needs to allocate correct size measurement vector and + * noise matrix and to implement a transformation from process state into a + * measurement. + */ +class ALVAR_EXPORT UnscentedObservation +{ +public: + /** \brief observation model: z = h(state) * - * The implementation needs to allocate correct size measurement vector and - * noise matrix and to implement a transformation from process state into a - * measurement. + * Computes an estimated measurement vector from the current state estimate. + * + * \param z obs_n size vector; The estimated measurement. + * \param state state_n size vector; The current state. */ - class ALVAR_EXPORT UnscentedObservation { - public: - /** \brief observation model: z = h(state) - * - * Computes an estimated measurement vector from the current state estimate. - * - * \param z obs_n size vector; The estimated measurement. - * \param state state_n size vector; The current state. - */ - virtual void h(CvMat *z, CvMat *state) = 0; + virtual void h(CvMat* z, CvMat* state) = 0; - /** \brief Returns the current measurement vector. - * - * The returned vector should contain the latest measurement values. - * In the UKF update phase the process state will be modified in such a - * way to make the difference between estimated measurement (from method \e h) - * and the returned real measurement smaller. - * - * \return obs_n size vector containing the current measured values. - */ - virtual CvMat *getObservation() = 0; + /** \brief Returns the current measurement vector. + * + * The returned vector should contain the latest measurement values. + * In the UKF update phase the process state will be modified in such a + * way to make the difference between estimated measurement (from method \e h) + * and the returned real measurement smaller. + * + * \return obs_n size vector containing the current measured values. + */ + virtual CvMat* getObservation() = 0; - /** \brief Returns the observation noise covariance matrix. - * - * The returned matrix will be added to the current observation covariance - * matrix increasing the uncertainty of measurements. The matrix should - * reflect the amount of noise in the measurement vector returned by \e - * getObservation method. - * - * \return obs_n by obs_b matrix containing observation noise covariance; or - * NULL for no additional noise. - */ - virtual CvMat *getObservationNoise() = 0; - }; + /** \brief Returns the observation noise covariance matrix. + * + * The returned matrix will be added to the current observation covariance + * matrix increasing the uncertainty of measurements. The matrix should + * reflect the amount of noise in the measurement vector returned by \e + * getObservation method. + * + * \return obs_n by obs_b matrix containing observation noise covariance; or + * NULL for no additional noise. + */ + virtual CvMat* getObservationNoise() = 0; +}; -} // namespace alvar +} // namespace alvar -#endif // __UNSCENTED_KALMAN__ +#endif // __UNSCENTED_KALMAN__ diff --git a/ar_track_alvar/include/ar_track_alvar/Util.h b/ar_track_alvar/include/ar_track_alvar/Util.h index 6438724..358020d 100644 --- a/ar_track_alvar/include/ar_track_alvar/Util.h +++ b/ar_track_alvar/include/ar_track_alvar/Util.h @@ -37,267 +37,284 @@ #include #include #include -#include -#include -#include //for abs +#include +#include //for abs #include -#include //Compatibility with OpenCV 3.x - -namespace alvar { +namespace alvar +{ const double PI = 3.14159265; /** -* \brief Returns the sign of a number. -*/ -template inline -int ALVAR_EXPORT Sign(const C& v) + * \brief Returns the sign of a number. + */ +template +inline int ALVAR_EXPORT Sign(const C& v) { - return (v<0?-1:1); + return (v < 0 ? -1 : 1); } /** -* \brief Converts an angle from radians to degrees. -*/ -template inline -double ALVAR_EXPORT Rad2Deg(const C& v) + * \brief Converts an angle from radians to degrees. + */ +template +inline double ALVAR_EXPORT Rad2Deg(const C& v) { - return v*(180/PI); + return v * (180 / PI); } /** -* \brief Converts an angle from degrees to radians. -*/ -template inline -double ALVAR_EXPORT Deg2Rad(const C& v) + * \brief Converts an angle from degrees to radians. + */ +template +inline double ALVAR_EXPORT Deg2Rad(const C& v) { - return v*(PI/180); + return v * (PI / 180); } /** - * \brief Simple \e Point class meant to be inherited from OpenCV point-classes. For example: Point p + * \brief Simple \e Point class meant to be inherited from OpenCV point-classes. + * For example: Point p */ -template +template struct ALVAR_EXPORT Point : public C { - /** - * \brief Additional value can be related to the point. - */ - D val; - - Point(int vx=0, int vy=0) - { - C::x = vx; - C::y = vy; - } - Point(double vx, double vy) - { - C::x = vx; - C::y = vy; - } + /** + * \brief Additional value can be related to the point. + */ + D val; + + Point(int vx = 0, int vy = 0) + { + C::x = vx; + C::y = vy; + } + Point(double vx, double vy) + { + C::x = vx; + C::y = vy; + } }; -/** - * \brief The default integer point type. -*/ -typedef ALVAR_EXPORT Point PointInt; +/** + * \brief The default integer point type. + */ +typedef ALVAR_EXPORT Point PointInt; /** - * \brief The default double point type. -*/ -typedef ALVAR_EXPORT Point PointDouble; - -/** \brief Returns the squared distance of two points. - * \param p1 First point. - * \param p2 Second point. - * \return Squared distance. -*/ -template -double PointSquaredDistance(PointType p1, PointType p2) { - return ((p1.x-p2.x)*(p1.x-p2.x)) + - ((p1.y-p2.y)*(p1.y-p2.y)); -} + * \brief The default double point type. + */ +typedef ALVAR_EXPORT Point PointDouble; +/** \brief Returns the squared distance of two points. + * \param p1 First point. + * \param p2 Second point. + * \return Squared distance. + */ +template +double PointSquaredDistance(PointType p1, PointType p2) +{ + return ((p1.x - p2.x) * (p1.x - p2.x)) + ((p1.y - p2.y) * (p1.y - p2.y)); +} -//ttesis start +// ttesis start +/** + * \brief Computes dot product AB.BC + * \param A,B and C points defining lines (line segments) AB and BC + */ +int ALVAR_EXPORT dot(const cv::Point& A, const cv::Point& B, + const cv::Point& C); -/** - * \brief Computes dot product AB.BC - * \param A,B and C points defining lines (line segments) AB and BC +/** + * \brief Computes the cross product AB x AC + * \param A,B and C points defining lines (line segments) AB and AC + * \param */ -int ALVAR_EXPORT dot(CvPoint *A, CvPoint *B, CvPoint *C); +int ALVAR_EXPORT cross(const cv::Point& A, const cv::Point& B, + const cv::Point& C); -/** - * \brief Computes the cross product AB x AC - * \param A,B and C points defining lines (line segments) AB and AC - * \param +/** + * \brief Compute the distance from A to B + * \param A and B points */ -int ALVAR_EXPORT cross(CvPoint *A,CvPoint *B, CvPoint *C); +double ALVAR_EXPORT distance(const cv::Point& A, const cv::Point& B); +/** + * \brief Computes the distance from point C to line (segment) AB. + * \param isSegment If isSegment is true, AB is a segment, not a line. + * \param C point + * \param A abd B points defining line (segment) AB + */ +double ALVAR_EXPORT linePointDist(const cv::Point& A, const cv::Point& B, + const cv::Point& C, bool isSegment); -/** - * \brief Compute the distance from A to B - * \param A and B points +/** + * \brief Computes the angle between lines AB and CD + * \param isDirectionDependent If isDirectionDependent = 1, angle depends on + * the order of the points. Otherwise returns smaller angle. \param A start + * point of first line \param B end point of first line \param C start point + * of second line \param D end point of second line */ -double ALVAR_EXPORT distance(CvPoint *A,CvPoint *B); +double ALVAR_EXPORT angle(const cv::Point& A, const cv::Point& B, + const cv::Point& C, const cv::Point& D, + int isDirectionDependent); -/** - * \brief Computes the distance from point C to line (segment) AB. - * \param isSegment If isSegment is true, AB is a segment, not a line. - * \param C point - * \param A abd B points defining line (segment) AB +/** + * \brief Calculates minimum distance from Point C to Polygon whose points are + * in list PointList \brief Returns distance \param index index of point A in + * pointlist, where A is the starting point of the closest polygon segment + * \param isClosedPolygon is true if polygon is closed (segment of the first + * and last point is also checked) */ -double ALVAR_EXPORT linePointDist(CvPoint *A,CvPoint *B,CvPoint *C, bool isSegment); +double ALVAR_EXPORT polyLinePointDist(const std::vector& points, + const cv::Point& C, int* index, + int isClosedPolygon); +// ttesis end -/** - * \brief Computes the angle between lines AB and CD - * \param isDirectionDependent If isDirectionDependent = 1, angle depends on the order of the points. Otherwise returns smaller angle. - * \param A start point of first line - * \param B end point of first line - * \param C start point of second line - * \param D end point of second line +/** + * \brief Uses OpenCV routine to fit ellipse to a vector of points. + * \param points Vector of points on the ellipse edge. + * \param ellipse_box OpenCV struct for the fitted ellipse. */ -double ALVAR_EXPORT angle(CvPoint *A,CvPoint *B, CvPoint *C,CvPoint *D, int isDirectionDependent); +void ALVAR_EXPORT FitCVEllipse(const std::vector& points, + cv::RotatedRect& ellipse_box); +int ALVAR_EXPORT exp_filt2(std::vector& v, std::vector& ret, + bool clamp); -/** - * \brief Calculates minimum distance from Point C to Polygon whose points are in list PointList - * \brief Returns distance - * \param index index of point A in pointlist, where A is the starting point of the closest polygon segment - * \param isClosedPolygon is true if polygon is closed (segment of the first and last point is also checked) +/** + * \brief Calculates the difference between the consecutive vector elements. + * \param v Source elements. + * \param ret The difference vector. This is cleared and then resized. + * \return The number of elements. */ -double ALVAR_EXPORT polyLinePointDist(CvPoint *PointList, int nPnts,CvPoint *C, int *index, int isClosedPolygon); - -//ttesis end +template +inline int ALVAR_EXPORT diff(const std::vector& v, std::vector& ret) +{ + ret.clear(); + if (v.size() == 1) + { + ret.push_back(0); + } + else if (v.size() > 1) + { + ret.push_back(v.at(1) - v.at(0)); + for (size_t i = 1; i < v.size(); ++i) + { + ret.push_back(v.at(i) - v.at(i - 1)); + } + } + return int(ret.size()); +} +/** + * \brief Finds zero crossings of given vector elements (sequence). + * \param v Sequence of numbers from where the zero crossings are found. + * \param corners Resulting index list of zero crossings. + * \param offs + * \return Number of zero crossings found. + */ +int ALVAR_EXPORT find_zero_crossings(const std::vector& v, + std::vector& corners, int offs = 20); -/** - * \brief Uses OpenCV routine to fit ellipse to a vector of points. - * \param points Vector of points on the ellipse edge. - * \param ellipse_box OpenCV struct for the fitted ellipse. +/** + * \brief Output OpenCV matrix for debug purposes. */ -void ALVAR_EXPORT FitCVEllipse(const std::vector &points, CvBox2D& ellipse_box); - -int ALVAR_EXPORT exp_filt2(std::vector &v,std:: vector &ret, bool clamp); - -/** - * \brief Calculates the difference between the consecutive vector elements. - * \param v Source elements. - * \param ret The difference vector. This is cleared and then resized. - * \return The number of elements. - */ -template inline -int ALVAR_EXPORT diff(const std::vector &v, std::vector &ret) -{ - ret.clear(); - if (v.size() == 1) { - ret.push_back(0); - } else if (v.size() > 1) { - ret.push_back(v.at(1)-v.at(0)); - for(size_t i = 1; i < v.size(); ++i) - { - ret.push_back(v.at(i)-v.at(i-1)); - } - } - return int(ret.size()); -} +void ALVAR_EXPORT out_matrix(const cv::Mat& m, const char* name); -/** - * \brief Finds zero crossings of given vector elements (sequence). - * \param v Sequence of numbers from where the zero crossings are found. - * \param corners Resulting index list of zero crossings. - * \param offs - * \return Number of zero crossings found. - */ -int ALVAR_EXPORT find_zero_crossings(const std::vector& v, std::vector &corners, int offs = 20); - -/** - * \brief Output OpenCV matrix for debug purposes. - */ -void ALVAR_EXPORT out_matrix(const CvMat *m, const char *name); - -/** - * \brief Limits a number to between two values. - * \param val Input value. - * \param min_val Minimum value for the result. - * \param max_val Maximum value for the result. - * \return Resulting value that is between \e min_val and \e max_val. -*/ +/** + * \brief Limits a number to between two values. + * \param val Input value. + * \param min_val Minimum value for the result. + * \param max_val Maximum value for the result. + * \return Resulting value that is between \e min_val and \e max_val. + */ double ALVAR_EXPORT Limit(double val, double min_val, double max_val); -/** +/** * \brief Class for N-dimensional index to be used e.g. with STL maps * * The idea is that if you want to sort N-dimensional pointers (e.g. * when they are stored in STL maps) it is enough to have the 'operator<' - * working, instead of needing to calculate something like i=z*x_res*y_res + y*x_res + x; + * working, instead of needing to calculate something like i=z*x_res*y_res + + * y*x_res + x; */ -struct ALVAR_EXPORT Index { - /** \brief The indices for each dimension are stored in \e val (last being the most significant) */ - std::vector val; - /** \brief Constructor for 1D index */ - Index(int a); - /** \brief Constructor for 2D index */ - Index(int a, int b); - /** \brief Constructor for 3D index */ - Index(int a, int b, int c); - /** \brief Operator used for sorting the multidimensional indices (last dimension being the most significant) */ - bool operator<(const Index &index) const; +struct ALVAR_EXPORT Index +{ + /** \brief The indices for each dimension are stored in \e val (last being the + * most significant) */ + std::vector val; + /** \brief Constructor for 1D index */ + Index(int a); + /** \brief Constructor for 2D index */ + Index(int a, int b); + /** \brief Constructor for 3D index */ + Index(int a, int b, int c); + /** \brief Operator used for sorting the multidimensional indices (last + * dimension being the most significant) */ + bool operator<(const Index& index) const; }; -/** +/** * \brief Class for N-dimensional Histograms */ -class ALVAR_EXPORT Histogram { +class ALVAR_EXPORT Histogram +{ protected: - std::map bins; - std::vector dim_binsize; - int DimIndex(int dim, double val); - double DimVal(int dim, int index); + std::map bins; + std::vector dim_binsize; + int DimIndex(int dim, double val); + double DimVal(int dim, int index); + public: - /** \brief Add dimension with a binsize - */ - void AddDimension(int binsize); - /** \brief Clear the histogram */ - void Clear(); - /** \brief Increase the histogram for given dimensions */ - void Inc(double dim0, double dim1=0, double dim2=0); - /** \brief Get the maximum from the histogram - * This returns the value in the middle of the 'bin' instead of bin-number - */ - int GetMax(double *dim0, double *dim1=0, double *dim2=0); + /** \brief Add dimension with a binsize + */ + void AddDimension(int binsize); + /** \brief Clear the histogram */ + void Clear(); + /** \brief Increase the histogram for given dimensions */ + void Inc(double dim0, double dim1 = 0, double dim2 = 0); + /** \brief Get the maximum from the histogram + * This returns the value in the middle of the 'bin' instead of bin-number + */ + int GetMax(double* dim0, double* dim1 = 0, double* dim2 = 0); }; -/** - * \brief N-dimensional Histograms calculating also the subpixel average for max bin +/** + * \brief N-dimensional Histograms calculating also the subpixel average for max + * bin */ -class ALVAR_EXPORT HistogramSubpixel : public Histogram { +class ALVAR_EXPORT HistogramSubpixel : public Histogram +{ protected: - std::map acc_dim0; - std::map acc_dim1; - std::map acc_dim2; + std::map acc_dim0; + std::map acc_dim1; + std::map acc_dim2; + public: - /** \brief Clear the histogram */ - void Clear(); - /** \brief Increase the histogram for given dimensions */ - void Inc(double dim0, double dim1=0, double dim2=0); - /** \brief Get the maximum from the histogram - * This finds the maximum bin(s) and averages the original - * values contained there to achieve subpixel accuracy. - */ - int GetMax(double *dim0, double *dim1=0, double *dim2=0); + /** \brief Clear the histogram */ + void Clear(); + /** \brief Increase the histogram for given dimensions */ + void Inc(double dim0, double dim1 = 0, double dim2 = 0); + /** \brief Get the maximum from the histogram + * This finds the maximum bin(s) and averages the original + * values contained there to achieve subpixel accuracy. + */ + int GetMax(double* dim0, double* dim1 = 0, double* dim2 = 0); }; #if (_MSC_VER >= 1400) - inline void STRCPY(char *to, rsize_t size, const char *src) { - strcpy_s(to,size,src); - } +inline void STRCPY(char* to, rsize_t size, const char* src) +{ + strcpy_s(to, size, src); +} #else - inline void STRCPY(char *to, size_t size, const char *src) { - strncpy(to,src,size-1); - } +inline void STRCPY(char* to, size_t size, const char* src) +{ + strncpy(to, src, size - 1); +} #endif #ifdef min @@ -323,7 +340,7 @@ class ALVAR_EXPORT HistogramSubpixel : public Histogram { * return true; * } * \endcode - * In your classes \e Serialize -method you can use the overloaded + * In your classes \e Serialize -method you can use the overloaded * \e Serialize method of the \e Serialization class to serialize * data or data arrays. In addition you can use \e SerializeClass * to serialize inner serializable classes. @@ -346,108 +363,130 @@ class ALVAR_EXPORT HistogramSubpixel : public Histogram { * seri>>cam; * \endcode * - * See the constructor \e Serialization::Serialization documentation for + * See the constructor \e Serialization::Serialization documentation for * further use examples. */ -class ALVAR_EXPORT Serialization { +class ALVAR_EXPORT Serialization +{ protected: - bool input; - std::string filename; - //std::iostream *stream; - std::ios *stream; - void *formatter_handle; - bool Output(); - bool Input(); - bool Descend(const char *id); - bool Ascend(); + bool input; + std::string filename; + // std::iostream *stream; + std::ios* stream; + void* formatter_handle; + bool Output(); + bool Input(); + bool Descend(const char* id); + bool Ascend(); + public: - /** \brief Constructor for serializing to/from specified filename - * - * \code - * Serialization sero("test1.xml"); - * sero<>cam; - * \endcode - * - * Note that this is not identical to: - * \code - * ofstream ofs("test1.xml"); - * Serialization sero(ofs); - * sero<>cam; - * \endcode - * - * There are differences with these approaches. When using the constructor - * with 'filename', we use the tinyxml Save and Load methods, while with - * iostream we use tinyxml operators for << and >> . The prior approach - * uses properly indented xml-files with XML declaration . In the - * latter approach the indentations and the XML declaration are left out. - * The XML declaration is left out because for some reason tinyxml - * doesn't parse it correctly when using operator>> . - */ - Serialization(std::string _filename); - /** \brief Constructor for serializing any iostream (e.g. std::stringstream) */ - Serialization(std::basic_iostream &_stream); - /** \brief Constructor for serializing any istream (e.g. std::cin) */ - Serialization(std::basic_istream &_stream); - /** \brief Constructor for serializing any ostream (e.g. std::cout) */ - Serialization(std::basic_ostream &_stream); - /** \brief Destructor */ - ~Serialization(); - /** \brief Operator for outputting a serializable class into the defined filename or std::iostream */ - template - Serialization& operator<<(C &serializable) { - input=false; - if (!SerializeClass(serializable) || !Output()) { - throw(AlvarException("Serialization failure")); - } - return *this; - } - /** \brief Operator for reading a serializable class from the defined filename or std::iostream */ - template - Serialization& operator>>(C &serializable) { - input=true; - if (!Input() || !SerializeClass(serializable)) { - throw(AlvarException("Serialization failure")); - } - return *this; - } - /** \brief Method for serializing a serializable class. Used by operators << and >> . - * - * Note, in the future this should be usable also from your serializable class - * for adding nested serializable classes. - */ - template - bool SerializeClass(C &serializable) { - std::string s = serializable.SerializeId(); - if (!Descend(s.c_str()) || !serializable.Serialize(this) || !Ascend()) { - return false; - } - return true; - } - /** \brief Method for serializing 'int' data element. Used from your serializable class. */ - bool Serialize(int &data, const std::string &name); - /** \brief Method for serializing 'int' data element. Used from your serializable class. */ - bool Serialize(unsigned short &data, const std::string &name); - /** \brief Method for serializing 'int' data element. Used from your serializable class. */ - bool Serialize(unsigned long &data, const std::string &name); - /** \brief Method for serializing 'double' data element. Used from your serializable class. */ - bool Serialize(double &data, const std::string &name); - /** \brief Method for serializing 'std::string' data element. Used from your serializable class. */ - bool Serialize(std::string &data, const std::string &name); - /** \brief Method for serializing 'CvMat' data element. Used from your serializable class. */ - bool Serialize(CvMat &data, const std::string &name); - /** \brief Method for checking if we are inputting or outputting. Can be used from your serializable class. */ - bool IsInput() { return input; } + /** \brief Constructor for serializing to/from specified filename + * + * \code + * Serialization sero("test1.xml"); + * sero<>cam; + * \endcode + * + * Note that this is not identical to: + * \code + * ofstream ofs("test1.xml"); + * Serialization sero(ofs); + * sero<>cam; + * \endcode + * + * There are differences with these approaches. When using the constructor + * with 'filename', we use the tinyxml Save and Load methods, while with + * iostream we use tinyxml operators for << and >> . The prior approach + * uses properly indented xml-files with XML declaration . In the + * latter approach the indentations and the XML declaration are left out. + * The XML declaration is left out because for some reason tinyxml + * doesn't parse it correctly when using operator>> . + */ + Serialization(std::string _filename); + /** \brief Constructor for serializing any iostream (e.g. std::stringstream) + */ + Serialization(std::basic_iostream& _stream); + /** \brief Constructor for serializing any istream (e.g. std::cin) */ + Serialization(std::basic_istream& _stream); + /** \brief Constructor for serializing any ostream (e.g. std::cout) */ + Serialization(std::basic_ostream& _stream); + /** \brief Destructor */ + ~Serialization(); + /** \brief Operator for outputting a serializable class into the defined + * filename or std::iostream */ + template + Serialization& operator<<(C& serializable) + { + input = false; + if (!SerializeClass(serializable) || !Output()) + { + throw(AlvarException("Serialization failure")); + } + return *this; + } + /** \brief Operator for reading a serializable class from the defined filename + * or std::iostream */ + template + Serialization& operator>>(C& serializable) + { + input = true; + if (!Input() || !SerializeClass(serializable)) + { + throw(AlvarException("Serialization failure")); + } + return *this; + } + /** \brief Method for serializing a serializable class. Used by operators << + * and >> . + * + * Note, in the future this should be usable also from your serializable class + * for adding nested serializable classes. + */ + template + bool SerializeClass(C& serializable) + { + std::string s = serializable.SerializeId(); + if (!Descend(s.c_str()) || !serializable.Serialize(this) || !Ascend()) + { + return false; + } + return true; + } + /** \brief Method for serializing 'int' data element. Used from your + * serializable class. */ + bool Serialize(int& data, const std::string& name); + /** \brief Method for serializing 'int' data element. Used from your + * serializable class. */ + bool Serialize(unsigned short& data, const std::string& name); + /** \brief Method for serializing 'int' data element. Used from your + * serializable class. */ + bool Serialize(unsigned long& data, const std::string& name); + /** \brief Method for serializing 'double' data element. Used from your + * serializable class. */ + bool Serialize(double& data, const std::string& name); + /** \brief Method for serializing 'std::string' data element. Used from your + * serializable class. */ + bool Serialize(std::string& data, const std::string& name); + /** \brief Method for serializing 'cv::Mat' data element. Used from your + * serializable class. */ + bool Serialize(cv::Mat& data, const std::string& name); + /** \brief Method for checking if we are inputting or outputting. Can be used + * from your serializable class. */ + bool IsInput() + { + return input; + } }; -} // namespace alvar +} // namespace alvar #endif diff --git a/ar_track_alvar/include/ar_track_alvar/filter/kinect_filtering.h b/ar_track_alvar/include/ar_track_alvar/filter/kinect_filtering.h index 6d19ca1..ebbc13a 100644 --- a/ar_track_alvar/include/ar_track_alvar/filter/kinect_filtering.h +++ b/ar_track_alvar/include/ar_track_alvar/filter/kinect_filtering.h @@ -29,8 +29,8 @@ */ /** - * \file - * + * \file + * * Library for depth based filtering * * \author Bhaskara Marthi @@ -44,65 +44,61 @@ #include #include -#include -#include +#include +#include "rclcpp/rclcpp.hpp" #include #include #include #include #include -#include #include #include #include #include -#include +#include namespace ar_track_alvar { - typedef pcl::PointXYZRGB ARPoint; typedef pcl::PointCloud ARCloud; // Result of plane fit: inliers and the plane equation struct PlaneFitResult { - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - PlaneFitResult () : inliers(ARCloud::Ptr(new ARCloud)) {} + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + PlaneFitResult() : inliers(ARCloud::Ptr(new ARCloud)) + { + } ARCloud::Ptr inliers; pcl::ModelCoefficients coeffs; }; // Select out a subset of a cloud corresponding to a set of pixel coordinates -ARCloud::Ptr filterCloud (const ARCloud& cloud, - const std::vector >& pixels); +ARCloud::Ptr filterCloud( + const ARCloud& cloud, + const std::vector >& pixels); // Wrapper for PCL plane fitting -PlaneFitResult fitPlane (ARCloud::ConstPtr cloud); +PlaneFitResult fitPlane(ARCloud::ConstPtr cloud); -// Given the coefficients of a plane, and two points p1 and p2, we produce a +// Given the coefficients of a plane, and two points p1 and p2, we produce a // quaternion q that sends p2'-p1' to (1,0,0) and n to (0,0,1), where p1' and -// p2' are the projections of p1 and p2 onto the plane and n is the normal. +// p2' are the projections of p1 and p2 onto the plane and n is the normal. // There's a sign ambiguity here, which is resolved by requiring that the // difference p4'-p3' ends up with a positive y coordinate -int -extractOrientation (const pcl::ModelCoefficients& coeffs, - const ARPoint& p1, const ARPoint& p2, - const ARPoint& p3, const ARPoint& p4, - geometry_msgs::Quaternion &retQ); +int extractOrientation(const pcl::ModelCoefficients& coeffs, const ARPoint& p1, + const ARPoint& p2, const ARPoint& p3, const ARPoint& p4, + geometry_msgs::msg::Quaternion& retQ); // Like extractOrientation except return value is a btMatrix3x3 -int -extractFrame (const pcl::ModelCoefficients& coeffs, - const ARPoint& p1, const ARPoint& p2, - const ARPoint& p3, const ARPoint& p4, - tf::Matrix3x3 &retmat); - +int extractFrame(const pcl::ModelCoefficients& coeffs, const ARPoint& p1, + const ARPoint& p2, const ARPoint& p3, const ARPoint& p4, + tf2::Matrix3x3& retmat); // Return the centroid (mean) of a point cloud -geometry_msgs::Point centroid (const ARCloud& points); -} // namespace +geometry_msgs::msg::Point centroid(const ARCloud& points); +} // namespace ar_track_alvar -#endif // include guard +#endif // include guard diff --git a/ar_track_alvar/include/ar_track_alvar/filter/medianFilter.h b/ar_track_alvar/include/ar_track_alvar/filter/medianFilter.h index 4e43ada..60b9b35 100644 --- a/ar_track_alvar/include/ar_track_alvar/filter/medianFilter.h +++ b/ar_track_alvar/include/ar_track_alvar/filter/medianFilter.h @@ -29,9 +29,9 @@ */ /** - * \file - * - * N-dimensional median filter for marker poses + * \file + * + * N-dimensional median filter for marker poses * * \author Scott Niekum */ @@ -43,22 +43,20 @@ namespace ar_track_alvar { - class MedianFilter { - public: +public: MedianFilter(int n); - void addPose(const alvar::Pose &new_pose); - void getMedian(alvar::Pose &ret_pose); + void addPose(const alvar::Pose& new_pose); + void getMedian(alvar::Pose& ret_pose); - private: - int median_n; - alvar::Pose *median_poses; +private: + int median_n; + alvar::Pose* median_poses; int median_ind; bool median_init; }; +} // namespace ar_track_alvar -} // namespace - -#endif // include guard +#endif // include guard diff --git a/ar_track_alvar/launch/pr2_bundle.launch b/ar_track_alvar/launch/pr2_bundle.launch deleted file mode 100755 index 69d49da..0000000 --- a/ar_track_alvar/launch/pr2_bundle.launch +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - - - - - - - - - diff --git a/ar_track_alvar/launch/pr2_bundle.py b/ar_track_alvar/launch/pr2_bundle.py new file mode 100644 index 0000000..e6ed689 --- /dev/null +++ b/ar_track_alvar/launch/pr2_bundle.py @@ -0,0 +1,33 @@ + + +from launch import LaunchDescription +from launch_ros.actions import Node +from ament_index_python.packages import get_package_share_directory +import os + +# command line arguments +marker_size="4.4" +max_new_marker_error="0.08" +max_track_error="0.0" +cam_image_topic="/kinect_head/depth_registered/points" +cam_info_topic="/kinect_head/rgb/camera_info" +output_frame="torso_lift_link" +med_filt_size="10" +truth_table_leg = os.path.join(get_package_share_directory('ar_track_alvar'),'bundles','truthTableLeg.xml') +table_bundle = os.path.join(get_package_share_directory('ar_track_alvar'),'bundles','table_8_9_10.xml') +arguments=[marker_size, max_new_marker_error, max_track_error, cam_image_topic, cam_info_topic, output_frame, med_filt_size, truth_table_leg, table_bundle] +print(arguments) + +def generate_launch_description(): + return LaunchDescription([ + Node( + package='ar_track_alvar', + executable='findMarkerBundles', + output='screen', + remappings=[ + ("camera_image", cam_image_topic), + ("camera_info",cam_info_topic) + ], + arguments=[marker_size, max_new_marker_error, max_track_error, cam_image_topic, cam_info_topic, output_frame, med_filt_size, truth_table_leg, table_bundle], + ), + ]) \ No newline at end of file diff --git a/ar_track_alvar/launch/pr2_bundle_no_kinect.launch b/ar_track_alvar/launch/pr2_bundle_no_kinect.launch deleted file mode 100755 index c7892dd..0000000 --- a/ar_track_alvar/launch/pr2_bundle_no_kinect.launch +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - diff --git a/ar_track_alvar/launch/pr2_bundle_no_kinect.py b/ar_track_alvar/launch/pr2_bundle_no_kinect.py new file mode 100644 index 0000000..47462c3 --- /dev/null +++ b/ar_track_alvar/launch/pr2_bundle_no_kinect.py @@ -0,0 +1,29 @@ +from launch import LaunchDescription +from launch_ros.actions import Node +from ament_index_python.packages import get_package_share_directory +import os + +# command line arguments +marker_size="4.4" +max_new_marker_error="0.08" +max_track_error="0.2" +cam_image_topic="/wide_stereo/left/image_color" +cam_info_topic="/wide_stereo/left/camera_info" +output_frame="/torso_lift_link" +truth_table_leg = os.path.join(get_package_share_directory('ar_track_alvar'),'bundles','truthTableLeg.xml') +table_bundle = os.path.join(get_package_share_directory('ar_track_alvar'),'bundles','table_8_9_10.xml') + +def generate_launch_description(): + return LaunchDescription([ + Node( + package='ar_track_alvar', + name='findMarkerBundlesNoKinect', + executable='findMarkerBundlesNoKinect', + output='screen', + remappings=[ + ("camera_image", cam_image_topic), + ("camera_info",cam_info_topic) + ], + arguments=[marker_size, max_new_marker_error, max_track_error, cam_image_topic, cam_info_topic, output_frame, truth_table_leg, table_bundle], + ), + ]) \ No newline at end of file diff --git a/ar_track_alvar/launch/pr2_bundle_no_kinect_testing.py b/ar_track_alvar/launch/pr2_bundle_no_kinect_testing.py new file mode 100644 index 0000000..d9f4bf4 --- /dev/null +++ b/ar_track_alvar/launch/pr2_bundle_no_kinect_testing.py @@ -0,0 +1,29 @@ +from launch import LaunchDescription +from launch_ros.actions import Node +from ament_index_python.packages import get_package_share_directory +import os + +# command line arguments +marker_size="4.4" +max_new_marker_error="0.08" +max_track_error="0.2" +cam_image_topic="/wide_stereo/left/image_color" +cam_info_topic="/wide_stereo/left/camera_info" +output_frame="/torso_lift_link" +truth_table_leg = os.path.join(get_package_share_directory('ar_track_alvar'),'bundles','truthTableLeg.xml') +table_bundle = os.path.join(get_package_share_directory('ar_track_alvar'),'bundles','table_8_9_10.xml') + +def generate_launch_description(): + return LaunchDescription([ + Node( + package='ar_track_alvar', + name='findMarkerBundlesNoKinect', + executable='findMarkerBundlesNoKinect', + output='screen', + remappings=[ + ("camera_image", cam_image_topic), + ("camera_info",cam_info_topic) + ], + arguments=[marker_size, max_new_marker_error, max_track_error, cam_image_topic, cam_info_topic, output_frame, truth_table_leg, table_bundle,"true"], + ), + ]) \ No newline at end of file diff --git a/ar_track_alvar/launch/pr2_indiv.launch b/ar_track_alvar/launch/pr2_indiv.launch deleted file mode 100755 index 27e4689..0000000 --- a/ar_track_alvar/launch/pr2_indiv.launch +++ /dev/null @@ -1,20 +0,0 @@ - - - - - - - - - - - - - - - - - - - - diff --git a/ar_track_alvar/launch/pr2_indiv.py b/ar_track_alvar/launch/pr2_indiv.py new file mode 100644 index 0000000..aea3bfc --- /dev/null +++ b/ar_track_alvar/launch/pr2_indiv.py @@ -0,0 +1,24 @@ + + +from launch import LaunchDescription +import launch_ros.actions +import os + +def generate_launch_description(): + return LaunchDescription([ + launch_ros.actions.Node( + package='ar_track_alvar', + executable='individualMarkers', + output='screen', + remappings=[ + ("camera_image", "/kinect_head/depth_registered_points"), + ("camera_info","/kinect_head/rgb/camera_info") + ], + parameters=[ + {"marker_size":4.4}, + {"max_new_marker_error": 0.08}, + {"max_track_error":0.2}, + {"output_frame":"torso_lift_link"} + ], + ) + ]) \ No newline at end of file diff --git a/ar_track_alvar/launch/pr2_indiv_no_kinect.launch b/ar_track_alvar/launch/pr2_indiv_no_kinect.launch deleted file mode 100755 index 4428714..0000000 --- a/ar_track_alvar/launch/pr2_indiv_no_kinect.launch +++ /dev/null @@ -1,18 +0,0 @@ - - - - - - - - - - - - - - - - - - diff --git a/ar_track_alvar/launch/pr2_indiv_no_kinect.py b/ar_track_alvar/launch/pr2_indiv_no_kinect.py new file mode 100644 index 0000000..6b38558 --- /dev/null +++ b/ar_track_alvar/launch/pr2_indiv_no_kinect.py @@ -0,0 +1,23 @@ + + +from launch import LaunchDescription +import launch_ros.actions +import os + +def generate_launch_description(): + return LaunchDescription([ + launch_ros.actions.Node( + package='ar_track_alvar', + executable='individualMarkersNoKinect', + output='screen', + remappings=[ + ("camera_image", "wide_stereo/left/image_color"), + ("camera_info","wide_stereo/left/camera_info") + ], + parameters=[ + {"marker_size":4.4}, + {"max_new_marker_error":0.08}, + {"max_track_error":0.2}, + {"output_frame":"torso_lift_link"} + ],) + ]) \ No newline at end of file diff --git a/ar_track_alvar/launch/pr2_train.launch b/ar_track_alvar/launch/pr2_train.launch deleted file mode 100755 index b929260..0000000 --- a/ar_track_alvar/launch/pr2_train.launch +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - - - diff --git a/ar_track_alvar/launch/pr2_train.py b/ar_track_alvar/launch/pr2_train.py new file mode 100644 index 0000000..c135865 --- /dev/null +++ b/ar_track_alvar/launch/pr2_train.py @@ -0,0 +1,28 @@ + + +from launch import LaunchDescription +from launch_ros.actions import Node +import os + +# command line arguments +nof_markers="8" +marker_size="4.4" +max_new_marker_error="0.08" +max_track_error="0.2" +cam_image_topic="/wide_stereo/left/image_color" +cam_info_topic="/wide_stereo/left/camera_info" +output_frame="/torso_lift_link" + +def generate_launch_description(): + return LaunchDescription([ + Node( + package='ar_track_alvar', + executable='trainMarkerBundle', + output='screen', + remappings=[ + ("camera_image", cam_image_topic), + ("camera_info",cam_info_topic) + ], + arguments=[nof_markers, marker_size, max_new_marker_error, max_track_error, cam_image_topic, cam_info_topic, output_frame] + ), + ]) \ No newline at end of file diff --git a/ar_track_alvar/nodes/FindMarkerBundles.cpp b/ar_track_alvar/nodes/FindMarkerBundles.cpp index e7f83e0..97a3427 100644 --- a/ar_track_alvar/nodes/FindMarkerBundles.cpp +++ b/ar_track_alvar/nodes/FindMarkerBundles.cpp @@ -39,33 +39,37 @@ #include "ar_track_alvar/MultiMarkerInitializer.h" #include "ar_track_alvar/Shared.h" #include -#include -#include -#include -#include +#include +#include +#include "tf2_ros/buffer.h" +#include "tf2_ros/transform_listener.h" +#include -#include +#include #include #include #include #include -#include -#include -#include +#include +#include +#include "rclcpp/rclcpp.hpp" #include #include #include #include #include -#include +// #include #include #include -#include +// #include #include #include #include +#include +#include "tf2_ros/create_timer_ros.h" +#include #define MAIN_MARKER 1 @@ -82,763 +86,849 @@ using namespace alvar; using namespace std; using boost::make_shared; -Camera *cam; -cv_bridge::CvImagePtr cv_ptr_; -image_transport::Subscriber cam_sub_; -ros::Subscriber cloud_sub_; -ros::Publisher arMarkerPub_; -ros::Publisher rvizMarkerPub_; -ros::Publisher rvizMarkerPub2_; -ar_track_alvar_msgs::AlvarMarkers arPoseMarkers_; -tf::TransformListener *tf_listener; -tf::TransformBroadcaster *tf_broadcaster; -MarkerDetector marker_detector; -MultiMarkerBundle **multi_marker_bundles=NULL; - -Pose *bundlePoses; -int *master_id; -int *bundles_seen; -bool *master_visible; -std::vector *bundle_indices; -bool init = true; -ata::MedianFilter **med_filts; -int med_filt_size; - -double marker_size; -double max_new_marker_error; -double max_track_error; -std::string cam_image_topic; -std::string cam_info_topic; -std::string output_frame; -int n_bundles = 0; - -//Debugging utility function -void draw3dPoints(ARCloud::Ptr cloud, string frame, int color, int id, double rad) +class FindMarkerBundles : public rclcpp::Node { - visualization_msgs::Marker rvizMarker; - - rvizMarker.header.frame_id = frame; - rvizMarker.header.stamp = ros::Time::now(); - rvizMarker.id = id; - rvizMarker.ns = "3dpts"; - - rvizMarker.scale.x = rad; - rvizMarker.scale.y = rad; - rvizMarker.scale.z = rad; - - rvizMarker.type = visualization_msgs::Marker::SPHERE_LIST; - rvizMarker.action = visualization_msgs::Marker::ADD; - - if(color==1){ - rvizMarker.color.r = 0.0f; - rvizMarker.color.g = 1.0f; - rvizMarker.color.b = 1.0f; - rvizMarker.color.a = 1.0; - } - if(color==2){ - rvizMarker.color.r = 1.0f; - rvizMarker.color.g = 0.0f; - rvizMarker.color.b = 1.0f; - rvizMarker.color.a = 1.0; - } - if(color==3){ - rvizMarker.color.r = 1.0f; - rvizMarker.color.g = 1.0f; - rvizMarker.color.b = 0.0f; - rvizMarker.color.a = 1.0; - } - - gm::Point p; - for(int i=0; ipoints.size(); i++){ - p.x = cloud->points[i].x; - p.y = cloud->points[i].y; - p.z = cloud->points[i].z; - rvizMarker.points.push_back(p); - } - - rvizMarker.lifetime = ros::Duration (1.0); - rvizMarkerPub2_.publish (rvizMarker); -} + private: + Camera *cam; + cv_bridge::CvImagePtr cv_ptr_; + image_transport::Subscriber cam_sub_; + + // Publishers + rclcpp::Publisher::SharedPtr arMarkerPub_; + rclcpp::Publisher::SharedPtr rvizMarkerPub_; + rclcpp::Publisher::SharedPtr rvizMarkerPub2_; + + // Subscribers + rclcpp::Subscription::SharedPtr cloud_sub_; + rclcpp::Subscription::SharedPtr info_sub_; + + + tf2::TimePoint prev_stamp_; + tf2::TimePoint prev_stamp2_; + std::shared_ptr tf2_; + std::shared_ptr tf_listener_; + std::shared_ptr tf_broadcaster_; + + MarkerDetector marker_detector; + MultiMarkerBundle **multi_marker_bundles=NULL; + ar_track_alvar_msgs::msg::AlvarMarkers arPoseMarkers_; + + Pose *bundlePoses; + int *master_id; + int *bundles_seen; + bool *master_visible; + std::vector *bundle_indices; + bool init = true; + ata::MedianFilter **med_filts; + int med_filt_size; + + double marker_size; + double max_new_marker_error; + double max_track_error; + std::string cam_image_topic; + std::string cam_info_topic; + std::string output_frame; + int n_bundles = 0; + + public: + FindMarkerBundles(int argc, char* argv[]):Node("marker_detect") + { -void drawArrow(gm::Point start, tf::Matrix3x3 mat, string frame, int color, int id) -{ - visualization_msgs::Marker rvizMarker; - - rvizMarker.header.frame_id = frame; - rvizMarker.header.stamp = ros::Time::now(); - rvizMarker.id = id; - rvizMarker.ns = "arrow"; - - rvizMarker.scale.x = 0.01; - rvizMarker.scale.y = 0.01; - rvizMarker.scale.z = 0.1; - - rvizMarker.type = visualization_msgs::Marker::ARROW; - rvizMarker.action = visualization_msgs::Marker::ADD; - - for(int i=0; i<3; i++){ - rvizMarker.points.clear(); - rvizMarker.points.push_back(start); - gm::Point end; - end.x = start.x + mat[0][i]; - end.y = start.y + mat[1][i]; - end.z = start.z + mat[2][i]; - rvizMarker.points.push_back(end); - rvizMarker.id += 10*i; - rvizMarker.lifetime = ros::Duration (1.0); - - if(color==1){ - rvizMarker.color.r = 1.0f; - rvizMarker.color.g = 0.0f; - rvizMarker.color.b = 0.0f; - rvizMarker.color.a = 1.0; - } - if(color==2){ - rvizMarker.color.r = 0.0f; - rvizMarker.color.g = 1.0f; - rvizMarker.color.b = 0.0f; - rvizMarker.color.a = 1.0; - } - if(color==3){ - rvizMarker.color.r = 0.0f; - rvizMarker.color.g = 0.0f; - rvizMarker.color.b = 1.0f; - rvizMarker.color.a = 1.0; - } - color += 1; + rclcpp::Clock::SharedPtr clock = this->get_clock(); + tf2_ = std::make_shared(clock); + tf_listener_ = std::make_shared(*tf2_); + tf_broadcaster_ = std::make_shared(*this); + prev_stamp_ = tf2::get_now(); + prev_stamp2_ = tf2::get_now(); - rvizMarkerPub2_.publish (rvizMarker); - } -} + if(argc < 9) + { + std::cout << std::endl; + cout << "Not enough arguments provided." << endl; + cout << "Usage: ./findMarkerBundles " << endl; + std::cout << std::endl; + exit(0); + } -// Infer the master tag corner positons from the other observed tags -// Also does some of the bookkeeping for tracking that MultiMarker::_GetPose does -int InferCorners(const ARCloud &cloud, MultiMarkerBundle &master, ARCloud &bund_corners){ - bund_corners.clear(); - bund_corners.resize(4); - for(int i=0; i<4; i++){ - bund_corners[i].x = 0; - bund_corners[i].y = 0; - bund_corners[i].z = 0; - } + // Get params from command line + marker_size = atof(argv[1]); + max_new_marker_error = atof(argv[2]); + max_track_error = atof(argv[3]); + cam_image_topic = argv[4]; + cam_info_topic = argv[5]; + output_frame = argv[6]; + med_filt_size = atoi(argv[7]); + int n_args_before_list = 8; + + n_bundles =0; + std::string argument; + // stop before --ros-arg argument + for (int j=n_args_before_list; j 0) master.marker_status[i]=1; - } + marker_detector.SetMarkerSize(marker_size); + multi_marker_bundles = new MultiMarkerBundle*[n_bundles]; + bundlePoses = new Pose[n_bundles]; + master_id = new int[n_bundles]; + bundle_indices = new std::vector[n_bundles]; + bundles_seen = new int[n_bundles]; + master_visible = new bool[n_bundles]; + + + + // Set up camera, listeners, and broadcasters + cam = new Camera(); + arMarkerPub_ = this->create_publisher ("ar_pose_marker", 0); + rvizMarkerPub_ = this->create_publisher ("visualization_marker", 0); + rvizMarkerPub2_ = this->create_publisher ("ARmarker_points", 0); + + //Create median filters + med_filts = new ata::MedianFilter*[n_bundles]; + for(int i=0; i id_vector = loadHelper.getIndices(); + multi_marker_bundles[i] = new MultiMarkerBundle(id_vector); + multi_marker_bundles[i]->Load(argv[i + n_args_before_list], FILE_FORMAT_XML); + master_id[i] = multi_marker_bundles[i]->getMasterId(); + bundle_indices[i] = multi_marker_bundles[i]->getIndices(); + calcAndSaveMasterCoords(*(multi_marker_bundles[i])); + } + else{ + cout<<"Cannot load file "<< argv[i + n_args_before_list] << endl; + exit(0); + } + } - int n_est = 0; + //Give tf a chance to catch up before the camera callback starts asking for transforms + //TODO: come back to this, there's probably a better way to do this + rclcpp::Rate loop_rate(100); + loop_rate.sleep(); + + //Subscribe to topics and set up callbacks + RCLCPP_INFO(this->get_logger(),"Subscribing to image topic"); + - // For every detected marker - for (size_t i=0; isize(); i++) + + cloud_sub_ = this->create_subscription(cam_image_topic, 1, std::bind(&FindMarkerBundles::getPointCloudCallback, this, std::placeholders::_1)); + + RCLCPP_INFO(this->get_logger(),"Subscribing to info topic"); + info_sub_ = this->create_subscription(cam_info_topic, 1, std::bind(&FindMarkerBundles::InfoCallback, this, std::placeholders::_1)); + + } + + void InfoCallback (const sensor_msgs::msg::CameraInfo::SharedPtr cam_info) { - const Marker* marker = &((*marker_detector.markers)[i]); - int id = marker->GetId(); - int index = master.get_id_index(id); - int mast_id = master.master_id; - if (index < 0) continue; + RCLCPP_INFO(this->get_logger(),"this executed"); + if (!cam->getCamInfo_) + { + cam->SetCameraInfo(cam_info); + cam->getCamInfo_ = true; + info_sub_.reset(); + } + } - // But only if we have corresponding points in the pointcloud - if (master.marker_status[index] > 0 && marker->valid) { - n_est++; + //Debugging utility function + void draw3dPoints(ARCloud::Ptr cloud, string frame, int color, int id, double rad) + { + visualization_msgs::msg::Marker rvizMarker; - std::string marker_frame = "ar_marker_"; - std::stringstream mark_out; - mark_out << id; - std::string id_string = mark_out.str(); - marker_frame += id_string; + rvizMarker.header.frame_id = frame; + rvizMarker.header.stamp = this->get_clock()->now(); + rvizMarker.id = id; + rvizMarker.ns = "3dpts"; + + rvizMarker.scale.x = rad; + rvizMarker.scale.y = rad; + rvizMarker.scale.z = rad; + + rvizMarker.type = visualization_msgs::msg::Marker::SPHERE_LIST; + rvizMarker.action = visualization_msgs::msg::Marker::ADD; + + if(color==1){ + rvizMarker.color.r = 0.0f; + rvizMarker.color.g = 1.0f; + rvizMarker.color.b = 1.0f; + rvizMarker.color.a = 1.0; + } + if(color==2){ + rvizMarker.color.r = 1.0f; + rvizMarker.color.g = 0.0f; + rvizMarker.color.b = 1.0f; + rvizMarker.color.a = 1.0; + } + if(color==3){ + rvizMarker.color.r = 1.0f; + rvizMarker.color.g = 1.0f; + rvizMarker.color.b = 0.0f; + rvizMarker.color.a = 1.0; + } + + gm::msg::Point p; + for(int i=0; ipoints.size(); i++){ + p.x = cloud->points[i].x; + p.y = cloud->points[i].y; + p.z = cloud->points[i].z; + rvizMarker.points.push_back(p); + } + + rvizMarker.lifetime = rclcpp::Duration (1.0); + rvizMarkerPub2_->publish (rvizMarker); + } - //Grab the precomputed corner coords and correct for the weird Alvar coord system - for(int j = 0; j < 4; ++j) - { - tf::Vector3 corner_coord = master.rel_corners[index][j]; - gm::PointStamped p, output_p; - p.header.frame_id = marker_frame; - p.point.x = corner_coord.y()/100.0; - p.point.y = -corner_coord.x()/100.0; - p.point.z = corner_coord.z()/100.0; - - try{ - tf_listener->waitForTransform(cloud.header.frame_id, marker_frame, ros::Time(0), ros::Duration(0.1)); - tf_listener->transformPoint(cloud.header.frame_id, p, output_p); - } - catch (tf::TransformException ex){ - ROS_ERROR("ERROR InferCorners: %s",ex.what()); - return -1; - } - bund_corners[j].x += output_p.point.x; - bund_corners[j].y += output_p.point.y; - bund_corners[j].z += output_p.point.z; + void drawArrow(gm::msg::Point start, tf2::Matrix3x3 mat, string frame, int color, int id) + { + visualization_msgs::msg::Marker rvizMarker; + + rvizMarker.header.frame_id = frame; + rvizMarker.header.stamp = this->get_clock()->now(); + rvizMarker.id = id; + rvizMarker.ns = "arrow"; + + rvizMarker.scale.x = 0.01; + rvizMarker.scale.y = 0.01; + rvizMarker.scale.z = 0.1; + + rvizMarker.type = visualization_msgs::msg::Marker::ARROW; + rvizMarker.action = visualization_msgs::msg::Marker::ADD; + + for(int i=0; i<3; i++){ + rvizMarker.points.clear(); + rvizMarker.points.push_back(start); + gm::msg::Point end; + end.x = start.x + mat[0][i]; + end.y = start.y + mat[1][i]; + end.z = start.z + mat[2][i]; + rvizMarker.points.push_back(end); + rvizMarker.id += 10*i; + rvizMarker.lifetime = rclcpp::Duration (1.0); + + if(color==1){ + rvizMarker.color.r = 1.0f; + rvizMarker.color.g = 0.0f; + rvizMarker.color.b = 0.0f; + rvizMarker.color.a = 1.0; + } + if(color==2){ + rvizMarker.color.r = 0.0f; + rvizMarker.color.g = 1.0f; + rvizMarker.color.b = 0.0f; + rvizMarker.color.a = 1.0; + } + if(color==3){ + rvizMarker.color.r = 0.0f; + rvizMarker.color.g = 0.0f; + rvizMarker.color.b = 1.0f; + rvizMarker.color.a = 1.0; } - master.marker_status[index] = 2; // Used for tracking + color += 1; + + rvizMarkerPub2_->publish (rvizMarker); } } - - //Divide to take the average of the summed estimates - if(n_est > 0){ + + + // Infer the master tag corner positons from the other observed tags + // Also does some of the bookkeeping for tracking that MultiMarker::_GetPose does + int InferCorners(const ARCloud &cloud, MultiMarkerBundle &master, ARCloud &bund_corners){ + bund_corners.clear(); + bund_corners.resize(4); for(int i=0; i<4; i++){ - bund_corners[i].x /= n_est; - bund_corners[i].y /= n_est; - bund_corners[i].z /= n_est; + bund_corners[i].x = 0; + bund_corners[i].y = 0; + bund_corners[i].z = 0; } - } - return 0; -} + // Reset the marker_status to 1 for all markers in point_cloud for tracking purposes + for (size_t i=0; i 0) master.marker_status[i]=1; + } + int n_est = 0; + + // For every detected marker + for (size_t i=0; isize(); i++) + { + const Marker* marker = &((*marker_detector.markers)[i]); + int id = marker->GetId(); + int index = master.get_id_index(id); + int mast_id = master.master_id; + if (index < 0) continue; + + // But only if we have corresponding points in the pointcloud + if (master.marker_status[index] > 0 && marker->valid) { + n_est++; + + std::string marker_frame = "ar_marker_"; + std::stringstream mark_out; + mark_out << id; + std::string id_string = mark_out.str(); + marker_frame += id_string; + + //Grab the precomputed corner coords and correct for the weird Alvar coord system + for(int j = 0; j < 4; ++j) + { + tf2::Vector3 corner_coord = master.rel_corners[index][j]; + gm::msg::PointStamped p, output_p; + p.header.frame_id = marker_frame; + p.point.x = corner_coord.y()/100.0; + p.point.y = -corner_coord.x()/100.0; + p.point.z = corner_coord.z()/100.0; + + try{ + geometry_msgs::msg::TransformStamped pt_transform; + tf2::TimePoint tf2_time = tf2_ros::fromMsg(rclcpp::Time(cloud.header.stamp)); + pt_transform = tf2_->lookupTransform(cloud.header.frame_id, marker_frame,tf2_time,tf2_time - prev_stamp2_); + + + // tf2_.canTransform(cloud.header.frame_id, marker_frame, rclcpp::Time(0), rclcpp::Duration(0.1)); + // geometry_msgs::msg::TransformStamped pt_transform; + // pt_transform = tf2_.lookupTransform(cloud.header.frame_id, marker_frame, rclcpp::Time(0), rclcpp::Duration(0.1)); + tf2::doTransform(p, output_p,pt_transform); + } + catch (tf2::TransformException ex){ + RCLCPP_ERROR(rclcpp::get_logger("ArTrackAlvar"), "ERROR InferCorners: %s",ex.what()); + return -1; + } + prev_stamp2_ = tf2_ros::fromMsg(rclcpp::Time(cloud.header.stamp)); + + + bund_corners[j].x += output_p.point.x; + bund_corners[j].y += output_p.point.y; + bund_corners[j].z += output_p.point.z; + } + master.marker_status[index] = 2; // Used for tracking + } + } + + //Divide to take the average of the summed estimates + if(n_est > 0){ + for(int i=0; i<4; i++){ + bund_corners[i].x /= n_est; + bund_corners[i].y /= n_est; + bund_corners[i].z /= n_est; + } + } -int PlaneFitPoseImprovement(int id, const ARCloud &corners_3D, ARCloud::Ptr selected_points, const ARCloud &cloud, Pose &p){ + return 0; + } + + int PlaneFitPoseImprovement(int id, const ARCloud &corners_3D, ARCloud::Ptr selected_points, const ARCloud &cloud, Pose &p) + { - ata::PlaneFitResult res = ata::fitPlane(selected_points); - gm::PoseStamped pose; - pose.header.stamp = pcl_conversions::fromPCL(cloud.header).stamp; - pose.header.frame_id = cloud.header.frame_id; - pose.pose.position = ata::centroid(*res.inliers); + ata::PlaneFitResult res = ata::fitPlane(selected_points); + gm::msg::PoseStamped pose; + pose.header.stamp = pcl_conversions::fromPCL(cloud.header).stamp; + pose.header.frame_id = cloud.header.frame_id; + pose.pose.position = ata::centroid(*res.inliers); - draw3dPoints(selected_points, cloud.header.frame_id, 1, id, 0.005); - - //Get 2 points that point forward in marker x direction - int i1,i2; - if(isnan(corners_3D[0].x) || isnan(corners_3D[0].y) || isnan(corners_3D[0].z) || - isnan(corners_3D[3].x) || isnan(corners_3D[3].y) || isnan(corners_3D[3].z)) - { - if(isnan(corners_3D[1].x) || isnan(corners_3D[1].y) || isnan(corners_3D[1].z) || - isnan(corners_3D[2].x) || isnan(corners_3D[2].y) || isnan(corners_3D[2].z)) - { - return -1; - } + draw3dPoints(selected_points, cloud.header.frame_id, 1, id, 0.005); + + //Get 2 points that point forward in marker x direction + int i1,i2; + if(isnan(corners_3D[0].x) || isnan(corners_3D[0].y) || isnan(corners_3D[0].z) || + isnan(corners_3D[3].x) || isnan(corners_3D[3].y) || isnan(corners_3D[3].z)) + { + if(isnan(corners_3D[1].x) || isnan(corners_3D[1].y) || isnan(corners_3D[1].z) || + isnan(corners_3D[2].x) || isnan(corners_3D[2].y) || isnan(corners_3D[2].z)) + { + return -1; + } + else{ + i1 = 1; + i2 = 2; + } + } else{ - i1 = 1; - i2 = 2; - } - } - else{ - i1 = 0; - i2 = 3; - } + i1 = 0; + i2 = 3; + } - //Get 2 points the point forward in marker y direction - int i3,i4; - if(isnan(corners_3D[0].x) || isnan(corners_3D[0].y) || isnan(corners_3D[0].z) || - isnan(corners_3D[1].x) || isnan(corners_3D[1].y) || isnan(corners_3D[1].z)) - { - if(isnan(corners_3D[3].x) || isnan(corners_3D[3].y) || isnan(corners_3D[3].z) || - isnan(corners_3D[2].x) || isnan(corners_3D[2].y) || isnan(corners_3D[2].z)) - { - return -1; - } + //Get 2 points the point forward in marker y direction + int i3,i4; + if(isnan(corners_3D[0].x) || isnan(corners_3D[0].y) || isnan(corners_3D[0].z) || + isnan(corners_3D[1].x) || isnan(corners_3D[1].y) || isnan(corners_3D[1].z)) + { + if(isnan(corners_3D[3].x) || isnan(corners_3D[3].y) || isnan(corners_3D[3].z) || + isnan(corners_3D[2].x) || isnan(corners_3D[2].y) || isnan(corners_3D[2].z)) + { + return -1; + } + else{ + i3 = 2; + i4 = 3; + } + } else{ - i3 = 2; - i4 = 3; - } - } - else{ - i3 = 1; - i4 = 0; - } - - ARCloud::Ptr orient_points(new ARCloud()); - orient_points->points.push_back(corners_3D[i1]); - draw3dPoints(orient_points, cloud.header.frame_id, 3, id+1000, 0.008); + i3 = 1; + i4 = 0; + } - orient_points->clear(); - orient_points->points.push_back(corners_3D[i2]); - draw3dPoints(orient_points, cloud.header.frame_id, 2, id+2000, 0.008); - - int succ; - succ = ata::extractOrientation(res.coeffs, corners_3D[i1], corners_3D[i2], corners_3D[i3], corners_3D[i4], pose.pose.orientation); - if(succ < 0) return -1; - - tf::Matrix3x3 mat; - succ = ata::extractFrame(res.coeffs, corners_3D[i1], corners_3D[i2], corners_3D[i3], corners_3D[i4], mat); - if(succ < 0) return -1; - - drawArrow(pose.pose.position, mat, cloud.header.frame_id, 1, id); - - p.translation[0] = pose.pose.position.x * 100.0; - p.translation[1] = pose.pose.position.y * 100.0; - p.translation[2] = pose.pose.position.z * 100.0; - p.quaternion[1] = pose.pose.orientation.x; - p.quaternion[2] = pose.pose.orientation.y; - p.quaternion[3] = pose.pose.orientation.z; - p.quaternion[0] = pose.pose.orientation.w; + ARCloud::Ptr orient_points(new ARCloud()); + orient_points->points.push_back(corners_3D[i1]); + draw3dPoints(orient_points, cloud.header.frame_id, 3, id+1000, 0.008); + + orient_points->clear(); + orient_points->points.push_back(corners_3D[i2]); + draw3dPoints(orient_points, cloud.header.frame_id, 2, id+2000, 0.008); + + int succ; + succ = ata::extractOrientation(res.coeffs, corners_3D[i1], corners_3D[i2], corners_3D[i3], corners_3D[i4], pose.pose.orientation); + if(succ < 0) return -1; - return 0; -} + tf2::Matrix3x3 mat; + succ = ata::extractFrame(res.coeffs, corners_3D[i1], corners_3D[i2], corners_3D[i3], corners_3D[i4], mat); + if(succ < 0) return -1; + drawArrow(pose.pose.position, mat, cloud.header.frame_id, 1, id); -// Updates the bundlePoses of the multi_marker_bundles by detecting markers and -// using all markers in a bundle to infer the master tag's position -void GetMultiMarkerPoses(IplImage *image, ARCloud &cloud) { + p.translation[0] = pose.pose.position.x * 100.0; + p.translation[1] = pose.pose.position.y * 100.0; + p.translation[2] = pose.pose.position.z * 100.0; + p.quaternion[1] = pose.pose.orientation.x; + p.quaternion[2] = pose.pose.orientation.y; + p.quaternion[3] = pose.pose.orientation.z; + p.quaternion[0] = pose.pose.orientation.w; - for(int i=0; isize(); i++) - { - vector > pixels; - Marker *m = &((*marker_detector.markers)[i]); - int id = m->GetId(); - //cout << "******* ID: " << id << endl; + + + // Updates the bundlePoses of the multi_marker_bundles by detecting markers and + // using all markers in a bundle to infer the master tag's position + void GetMultiMarkerPoses(cv::Mat *image, ARCloud &cloud) { + + for(int i=0; isize(); i++) + { + vector > pixels; + Marker *m = &((*marker_detector.markers)[i]); + int id = m->GetId(); + + //Get the 3D inner corner points - more stable than outer corners that can "fall off" object + int resol = m->GetRes(); + int ori = m->ros_orientation; + + PointDouble pt1, pt2, pt3, pt4; + pt4 = m->ros_marker_points_img[0]; + pt3 = m->ros_marker_points_img[resol-1]; + pt1 = m->ros_marker_points_img[(resol*resol)-resol]; + pt2 = m->ros_marker_points_img[(resol*resol)-1]; - //Get the 3D points of the outer corners - /* - PointDouble corner0 = m->marker_corners_img[0]; - PointDouble corner1 = m->marker_corners_img[1]; - PointDouble corner2 = m->marker_corners_img[2]; - PointDouble corner3 = m->marker_corners_img[3]; - m->ros_corners_3D[0] = cloud(corner0.x, corner0.y); - m->ros_corners_3D[1] = cloud(corner1.x, corner1.y); - m->ros_corners_3D[2] = cloud(corner2.x, corner2.y); - m->ros_corners_3D[3] = cloud(corner3.x, corner3.y); - */ - - //Get the 3D inner corner points - more stable than outer corners that can "fall off" object - int resol = m->GetRes(); - int ori = m->ros_orientation; + m->ros_corners_3D[0] = cloud(pt1.x, pt1.y); + m->ros_corners_3D[1] = cloud(pt2.x, pt2.y); + m->ros_corners_3D[2] = cloud(pt3.x, pt3.y); + m->ros_corners_3D[3] = cloud(pt4.x, pt4.y); - PointDouble pt1, pt2, pt3, pt4; - pt4 = m->ros_marker_points_img[0]; - pt3 = m->ros_marker_points_img[resol-1]; - pt1 = m->ros_marker_points_img[(resol*resol)-resol]; - pt2 = m->ros_marker_points_img[(resol*resol)-1]; - - m->ros_corners_3D[0] = cloud(pt1.x, pt1.y); - m->ros_corners_3D[1] = cloud(pt2.x, pt2.y); - m->ros_corners_3D[2] = cloud(pt3.x, pt3.y); - m->ros_corners_3D[3] = cloud(pt4.x, pt4.y); - - if(ori >= 0 && ori < 4){ - if(ori != 0){ - std::rotate(m->ros_corners_3D.begin(), m->ros_corners_3D.begin() + ori, m->ros_corners_3D.end()); - } - } - else - ROS_ERROR("FindMarkerBundles: Bad Orientation: %i for ID: %i", ori, id); - - //Check if we have spotted a master tag - int master_ind = -1; - for(int j=0; j= 0 && ori < 4){ + if(ori != 0){ + std::rotate(m->ros_corners_3D.begin(), m->ros_corners_3D.begin() + ori, m->ros_corners_3D.end()); } } + else + RCLCPP_ERROR(this->get_logger(), "FindMarkerBundles: Bad Orientation: %i for ID: %i", ori, id); + + //Check if we have spotted a master tag + int master_ind = -1; + for(int j=0; jros_marker_points_img) - pixels.push_back(cv::Point(p.x, p.y)); - ARCloud::Ptr selected_points = ata::filterCloud(cloud, pixels); + //Mark the bundle that marker belongs to as "seen" + int bundle_ind = -1; + for(int j=0; jros_corners_3D, selected_points, cloud, m->pose); - - //If the plane fit fails... - if(ret < 0){ - //Mark this tag as invalid - m->valid = false; - //If this was a master tag, reset its visibility - if(master_ind >= 0) - master_visible[master_ind] = false; - //decrement the number of markers seen in this bundle - bundles_seen[bundle_ind] -= 1; - - } - else - m->valid = true; - } - - //For each master tag, infer the 3D position of its corners from other visible tags - //Then, do a plane fit to those new corners - ARCloud inferred_corners; - for(int i=0; i 0){ - //if(master_visible[i] == false){ - if(InferCorners(cloud, *(multi_marker_bundles[i]), inferred_corners) >= 0){ - ARCloud::Ptr inferred_cloud(new ARCloud(inferred_corners)); - PlaneFitPoseImprovement(i+5000, inferred_corners, inferred_cloud, cloud, bundlePoses[i]); - } - //} - //If master is visible, use it directly instead of inferring pose - //else{ - // for (size_t j=0; jsize(); j++){ - // Marker *m = &((*marker_detector.markers)[j]); - // if(m->GetId() == master_id[i]) - // bundlePoses[i] = m->pose; - // } - //} - Pose ret_pose; - if(med_filt_size > 0){ - med_filts[i]->addPose(bundlePoses[i]); - med_filts[i]->getMedian(ret_pose); - bundlePoses[i] = ret_pose; - } - } + //Get the 3D marker points + BOOST_FOREACH (const PointDouble& p, m->ros_marker_points_img) + pixels.push_back(cv::Point(p.x, p.y)); + ARCloud::Ptr selected_points = ata::filterCloud(cloud, pixels); + + //Use the kinect data to find a plane and pose for the marker + int ret = PlaneFitPoseImprovement(i, m->ros_corners_3D, selected_points, cloud, m->pose); + + //If the plane fit fails... + if(ret < 0){ + //Mark this tag as invalid + m->valid = false; + //If this was a master tag, reset its visibility + if(master_ind >= 0) + master_visible[master_ind] = false; + //decrement the number of markers seen in this bundle + bundles_seen[bundle_ind] -= 1; + + } + else + m->valid = true; + } + + //For each master tag, infer the 3D position of its corners from other visible tags + //Then, do a plane fit to those new corners + ARCloud inferred_corners; + for(int i=0; i 0){ + //if(master_visible[i] == false){ + if(InferCorners(cloud, *(multi_marker_bundles[i]), inferred_corners) >= 0){ + ARCloud::Ptr inferred_cloud(new ARCloud(inferred_corners)); + PlaneFitPoseImprovement(i+5000, inferred_corners, inferred_cloud, cloud, bundlePoses[i]); + } + + Pose ret_pose; + if(med_filt_size > 0){ + med_filts[i]->addPose(bundlePoses[i]); + med_filts[i]->getMedian(ret_pose); + bundlePoses[i] = ret_pose; + } + } + } } } -} -// Given the pose of a marker, builds the appropriate ROS messages for later publishing -void makeMarkerMsgs(int type, int id, Pose &p, sensor_msgs::ImageConstPtr image_msg, tf::StampedTransform &CamToOutput, visualization_msgs::Marker *rvizMarker, ar_track_alvar_msgs::AlvarMarker *ar_pose_marker, int confidence){ - double px,py,pz,qx,qy,qz,qw; - - px = p.translation[0]/100.0; - py = p.translation[1]/100.0; - pz = p.translation[2]/100.0; - qx = p.quaternion[1]; - qy = p.quaternion[2]; - qz = p.quaternion[3]; - qw = p.quaternion[0]; - - //Get the marker pose in the camera frame - tf::Quaternion rotation (qx,qy,qz,qw); - tf::Vector3 origin (px,py,pz); - tf::Transform t (rotation, origin); //transform from cam to marker - - tf::Vector3 markerOrigin (0, 0, 0); - tf::Transform m (tf::Quaternion::getIdentity (), markerOrigin); - tf::Transform markerPose = t * m; - - //Publish the cam to marker transform for each marker - std::string markerFrame = "ar_marker_"; - std::stringstream out; - out << id; - std::string id_string = out.str(); - markerFrame += id_string; - tf::StampedTransform camToMarker (t, image_msg->header.stamp, image_msg->header.frame_id, markerFrame.c_str()); - tf_broadcaster->sendTransform(camToMarker); - - //Create the rviz visualization message - tf::poseTFToMsg (markerPose, rvizMarker->pose); - rvizMarker->header.frame_id = image_msg->header.frame_id; - rvizMarker->header.stamp = image_msg->header.stamp; - rvizMarker->id = id; - - rvizMarker->scale.x = 1.0 * marker_size/100.0; - rvizMarker->scale.y = 1.0 * marker_size/100.0; - rvizMarker->scale.z = 0.2 * marker_size/100.0; - - if(type==MAIN_MARKER) - rvizMarker->ns = "main_shapes"; - else - rvizMarker->ns = "basic_shapes"; - - - rvizMarker->type = visualization_msgs::Marker::CUBE; - rvizMarker->action = visualization_msgs::Marker::ADD; - - //Determine a color and opacity, based on marker type - if(type==MAIN_MARKER){ - rvizMarker->color.r = 1.0f; - rvizMarker->color.g = 0.0f; - rvizMarker->color.b = 0.0f; - rvizMarker->color.a = 1.0; - } - else if(type==VISIBLE_MARKER){ - rvizMarker->color.r = 0.0f; - rvizMarker->color.g = 1.0f; - rvizMarker->color.b = 0.0f; - rvizMarker->color.a = 0.7; - } - else if(type==GHOST_MARKER){ - rvizMarker->color.r = 0.0f; - rvizMarker->color.g = 0.0f; - rvizMarker->color.b = 1.0f; - rvizMarker->color.a = 0.5; - } + // Given the pose of a marker, builds the appropriate ROS messages for later publishing + void makeMarkerMsgs(int type, int id, Pose &p, sensor_msgs::msg::Image image_msg, geometry_msgs::msg::TransformStamped &CamToOutput, visualization_msgs::msg::Marker *rvizMarker, ar_track_alvar_msgs::msg::AlvarMarker *ar_pose_marker, int confidence) + { + double px,py,pz,qx,qy,qz,qw; + + px = p.translation[0]/100.0; + py = p.translation[1]/100.0; + pz = p.translation[2]/100.0; + qx = p.quaternion[1]; + qy = p.quaternion[2]; + qz = p.quaternion[3]; + qw = p.quaternion[0]; + + //Get the marker pose in the camera frame + tf2::Quaternion rotation (qx,qy,qz,qw); + tf2::Vector3 origin (px,py,pz); + tf2::Transform t (rotation, origin); //transform from cam to marker + + tf2::Vector3 markerOrigin (0, 0, 0); + tf2::Transform m (tf2::Quaternion::getIdentity (), markerOrigin); + tf2::Transform markerPose = t * m; + + //Publish the cam to marker transform for each marker + std::string markerFrame = "ar_marker_"; + std::stringstream out; + out << id; + std::string id_string = out.str(); + markerFrame += id_string; + geometry_msgs::msg::TransformStamped camToMarker; + camToMarker.header.stamp = image_msg.header.stamp; + camToMarker.header.frame_id = image_msg.header.frame_id; + camToMarker.child_frame_id = markerFrame; + + geometry_msgs::msg::Vector3 trans; + trans.x = px; + trans.y = py; + trans.z = pz; + geometry_msgs::msg::Quaternion rot; + rot.x = qx; + rot.y = qy; + rot.z = qz; + rot.w = qw; + + camToMarker.transform.translation = trans; + camToMarker.transform.rotation = rot; + tf_broadcaster_->sendTransform(camToMarker); + + //Create the rviz visualization message + rvizMarker->pose.position.x = markerPose.getOrigin().getX(); + rvizMarker->pose.position.y = markerPose.getOrigin().getY(); + rvizMarker->pose.position.z = markerPose.getOrigin().getZ(); + rvizMarker->pose.orientation.x = markerPose.getRotation().getX(); + rvizMarker->pose.orientation.y = markerPose.getRotation().getY(); + rvizMarker->pose.orientation.z = markerPose.getRotation().getZ(); + rvizMarker->pose.orientation.w = markerPose.getRotation().getW(); + + rvizMarker->header.frame_id = image_msg.header.frame_id; + rvizMarker->header.stamp = image_msg.header.stamp; + rvizMarker->id = id; + + rvizMarker->scale.x = 1.0 * marker_size/100.0; + rvizMarker->scale.y = 1.0 * marker_size/100.0; + rvizMarker->scale.z = 0.2 * marker_size/100.0; + + if(type==MAIN_MARKER) + rvizMarker->ns = "main_shapes"; + else + rvizMarker->ns = "basic_shapes"; + + + rvizMarker->type = visualization_msgs::msg::Marker::CUBE; + rvizMarker->action = visualization_msgs::msg::Marker::ADD; + + //Determine a color and opacity, based on marker type + if(type==MAIN_MARKER){ + rvizMarker->color.r = 1.0f; + rvizMarker->color.g = 0.0f; + rvizMarker->color.b = 0.0f; + rvizMarker->color.a = 1.0; + } + else if(type==VISIBLE_MARKER){ + rvizMarker->color.r = 0.0f; + rvizMarker->color.g = 1.0f; + rvizMarker->color.b = 0.0f; + rvizMarker->color.a = 0.7; + } + else if(type==GHOST_MARKER){ + rvizMarker->color.r = 0.0f; + rvizMarker->color.g = 0.0f; + rvizMarker->color.b = 1.0f; + rvizMarker->color.a = 0.5; + } + + rvizMarker->lifetime = rclcpp::Duration (0.1); - rvizMarker->lifetime = ros::Duration (0.1); + // Only publish the pose of the master tag in each bundle, since that's all we really care about aside from visualization + if(type==MAIN_MARKER){ + //Take the pose of the tag in the camera frame and convert to the output frame (usually torso_lift_link for the PR2) + //tf2::Transform tagPoseOutput = CamToOutput * markerPose; - // Only publish the pose of the master tag in each bundle, since that's all we really care about aside from visualization - if(type==MAIN_MARKER){ - //Take the pose of the tag in the camera frame and convert to the output frame (usually torso_lift_link for the PR2) - tf::Transform tagPoseOutput = CamToOutput * markerPose; + // Create the pose marker message + // tf2::poseTFToMsg (tagPoseOutput, ar_pose_marker->pose.pose); - //Create the pose marker message - tf::poseTFToMsg (tagPoseOutput, ar_pose_marker->pose.pose); - ar_pose_marker->header.frame_id = output_frame; - ar_pose_marker->header.stamp = image_msg->header.stamp; - ar_pose_marker->id = id; - ar_pose_marker->confidence = confidence; + tf2::doTransform(ar_pose_marker->pose, ar_pose_marker->pose,CamToOutput); + ar_pose_marker->header.frame_id = output_frame; + ar_pose_marker->header.stamp = image_msg.header.stamp; + ar_pose_marker->id = id; + ar_pose_marker->confidence = confidence; + } + else + ar_pose_marker = NULL; } - else - ar_pose_marker = NULL; -} -//Callback to handle getting kinect point clouds and processing them -void getPointCloudCallback (const sensor_msgs::PointCloud2ConstPtr &msg) -{ - sensor_msgs::ImagePtr image_msg(new sensor_msgs::Image); + //Callback to handle getting kinect point clouds and processing them + void getPointCloudCallback (const sensor_msgs::msg::PointCloud2::SharedPtr msg) + { + sensor_msgs::msg::Image image_msg; - //If we've already gotten the cam info, then go ahead - if(cam->getCamInfo_){ - try{ - //Get the transformation from the Camera to the output frame for this image capture - tf::StampedTransform CamToOutput; + //If we've already gotten the cam info, then go ahead + if(cam->getCamInfo_){ try{ - tf_listener->waitForTransform(output_frame, msg->header.frame_id, msg->header.stamp, ros::Duration(1.0)); - tf_listener->lookupTransform(output_frame, msg->header.frame_id, msg->header.stamp, CamToOutput); - } - catch (tf::TransformException ex){ - ROS_ERROR("%s",ex.what()); - } + //Get the transformation from the Camera to the output frame for this image capture + std::string tf_error; + geometry_msgs::msg::TransformStamped CamToOutput; + try + { + tf2::TimePoint tf2_time = tf2_ros::fromMsg(image_msg.header.stamp); + CamToOutput = tf2_->lookupTransform(output_frame, image_msg.header.frame_id,tf2_time,tf2_time - prev_stamp_); + } + catch (tf2::TransformException ex){ + RCLCPP_ERROR(this->get_logger(), "%s",ex.what()); + } - //Init and clear visualization markers - visualization_msgs::Marker rvizMarker; - ar_track_alvar_msgs::AlvarMarker ar_pose_marker; - arPoseMarkers_.markers.clear (); + //Init and clear visualization markers + visualization_msgs::msg::Marker rvizMarker; + ar_track_alvar_msgs::msg::AlvarMarker ar_pose_marker; + arPoseMarkers_.markers.clear (); - //Convert cloud to PCL - ARCloud cloud; - pcl::fromROSMsg(*msg, cloud); - - //Get an OpenCV image from the cloud - pcl::toROSMsg (*msg, *image_msg); - image_msg->header.stamp = msg->header.stamp; - image_msg->header.frame_id = msg->header.frame_id; - - //Convert the image - cv_ptr_ = cv_bridge::toCvCopy(image_msg, sensor_msgs::image_encodings::BGR8); - - //Get the estimated pose of the main markers by using all the markers in each bundle - - // GetMultiMarkersPoses expects an IplImage*, but as of ros groovy, cv_bridge gives - // us a cv::Mat. I'm too lazy to change to cv::Mat throughout right now, so I - // do this conversion here -jbinney - IplImage ipl_image = cv_ptr_->image; - GetMultiMarkerPoses(&ipl_image, cloud); - - for (size_t i=0; isize(); i++) - { - int id = (*(marker_detector.markers))[i].GetId(); - - // Draw if id is valid - if(id >= 0){ - - // Don't draw if it is a master tag...we do this later, a bit differently - bool should_draw = true; - for(int j=0; j 0){ - makeMarkerMsgs(MAIN_MARKER, master_id[i], bundlePoses[i], image_msg, CamToOutput, &rvizMarker, &ar_pose_marker, bundles_seen[i]); - rvizMarkerPub_.publish (rvizMarker); - arPoseMarkers_.markers.push_back (ar_pose_marker); - } - } - - //Publish the marker messages - arMarkerPub_.publish (arPoseMarkers_); - } - catch (cv_bridge::Exception& e){ - ROS_ERROR ("ar_track_alvar: Image error: %s", image_msg->encoding.c_str ()); - } - } -} + //Convert cloud to PCL + ARCloud cloud; + pcl::fromROSMsg(*msg, cloud); + //Get an OpenCV image from the cloud + pcl::toROSMsg (*msg, image_msg); + image_msg.header.stamp = msg->header.stamp; + image_msg.header.frame_id = msg->header.frame_id; + + //Convert the image + cv_ptr_ = cv_bridge::toCvCopy(image_msg, sensor_msgs::image_encodings::BGR8); -//Create a ROS frame out of the known corners of a tag in the weird marker coord frame used by Alvar markers (x right y forward z up) -//p0-->p1 should point in Alvar's pos X direction -//p1-->p2 should point in Alvar's pos Y direction -int makeMasterTransform (const CvPoint3D64f& p0, const CvPoint3D64f& p1, - const CvPoint3D64f& p2, const CvPoint3D64f& p3, - tf::Transform &retT) - { - const tf::Vector3 q0(p0.x, p0.y, p0.z); - const tf::Vector3 q1(p1.x, p1.y, p1.z); - const tf::Vector3 q2(p2.x, p2.y, p2.z); - const tf::Vector3 q3(p3.x, p3.y, p3.z); - - // (inverse) matrix with the given properties - const tf::Vector3 v = (q1-q0).normalized(); - const tf::Vector3 w = (q2-q1).normalized(); - const tf::Vector3 n = v.cross(w); - tf::Matrix3x3 m(v[0], v[1], v[2], w[0], w[1], w[2], n[0], n[1], n[2]); - m = m.inverse(); - - //Translate to quaternion - if(m.determinant() <= 0) - return -1; - - //Use Eigen for this part instead, because the ROS version of bullet appears to have a bug - Eigen::Matrix3f eig_m; - for(int i=0; i<3; i++){ - for(int j=0; j<3; j++){ - eig_m(i,j) = m[i][j]; - } - } - Eigen::Quaternion eig_quat(eig_m); - - // Translate back to bullet - tfScalar ex = eig_quat.x(); - tfScalar ey = eig_quat.y(); - tfScalar ez = eig_quat.z(); - tfScalar ew = eig_quat.w(); - tf::Quaternion quat(ex,ey,ez,ew); - quat = quat.normalized(); - - double qx = (q0.x() + q1.x() + q2.x() + q3.x()) / 4.0; - double qy = (q0.y() + q1.y() + q2.y() + q3.y()) / 4.0; - double qz = (q0.z() + q1.z() + q2.z() + q3.z()) / 4.0; - tf::Vector3 origin (qx,qy,qz); - - tf::Transform tform (quat, origin); //transform from master to marker - retT = tform; - - return 0; - } + //Get the estimated pose of the main markers by using all the markers in each bundle + // GetMultiMarkersPoses expects an IplImage*, but as of ros groovy, cv_bridge gives + // us a cv::Mat. I'm too lazy to change to cv::Mat throughout right now, so I + // do this conversion here -jbinney + cv::Mat ipl_image = cv_ptr_->image; + GetMultiMarkerPoses(&ipl_image, cloud); -//Find the coordinates of the Master marker with respect to the coord frame of each of it's child markers -//This data is used for later estimation of the Master marker pose from the child poses -int calcAndSaveMasterCoords(MultiMarkerBundle &master) -{ - int mast_id = master.master_id; - std::vector rel_corner_coords; - - //Go through all the markers associated with this bundle - for (size_t i=0; isize(); i++) + { + int id = (*(marker_detector.markers))[i].GetId(); + + // Draw if id is valid + if(id >= 0) + { + + // Don't draw if it is a master tag...we do this later, a bit differently + bool should_draw = true; + for(int j=0; jpublish (rvizMarker); + } + } } - //Use them to find a transform from the master frame to the child frame - tf::Transform tform; - makeMasterTransform(mark_corners[0], mark_corners[1], mark_corners[2], mark_corners[3], tform); - - //Finally, find the coords of the corners of the master in the child frame - for(int j=0; j<4; j++){ - - CvPoint3D64f corner_coord = master.pointcloud[master.pointcloud_index(mast_id, j)]; - double px = corner_coord.x; - double py = corner_coord.y; - double pz = corner_coord.z; - - tf::Vector3 corner_vec (px, py, pz); - tf::Vector3 ans = (tform.inverse()) * corner_vec; - rel_corner_coords.push_back(ans); + //Draw the main markers, whether they are visible or not -- but only if at least 1 marker from their bundle is currently seen + for(int i=0; i 0) + { + makeMarkerMsgs(MAIN_MARKER, master_id[i], bundlePoses[i], image_msg, CamToOutput, &rvizMarker, &ar_pose_marker, bundles_seen[i]); + rvizMarkerPub_->publish (rvizMarker); + arPoseMarkers_.markers.push_back (ar_pose_marker); + } } - - master.rel_corners.push_back(rel_corner_coords); - } - - return 0; -} + //Publish the marker messages + arMarkerPub_->publish (arPoseMarkers_); + } -int main(int argc, char *argv[]) -{ - ros::init (argc, argv, "marker_detect"); - ros::NodeHandle n; - - if(argc < 9){ - std::cout << std::endl; - cout << "Not enough arguments provided." << endl; - cout << "Usage: ./findMarkerBundles " << endl; - std::cout << std::endl; - return 0; + catch (cv_bridge::Exception& e) + { + RCLCPP_ERROR (this->get_logger(),"ar_track_alvar: Image error: %s", image_msg.encoding.c_str ()); + } + prev_stamp_ = tf2_ros::fromMsg(image_msg.header.stamp); + } } - // Get params from command line - marker_size = atof(argv[1]); - max_new_marker_error = atof(argv[2]); - max_track_error = atof(argv[3]); - cam_image_topic = argv[4]; - cam_info_topic = argv[5]; - output_frame = argv[6]; - med_filt_size = atoi(argv[7]); - int n_args_before_list = 8; - n_bundles = argc - n_args_before_list; - - marker_detector.SetMarkerSize(marker_size); - multi_marker_bundles = new MultiMarkerBundle*[n_bundles]; - bundlePoses = new Pose[n_bundles]; - master_id = new int[n_bundles]; - bundle_indices = new std::vector[n_bundles]; - bundles_seen = new int[n_bundles]; - master_visible = new bool[n_bundles]; - - //Create median filters - med_filts = new ata::MedianFilter*[n_bundles]; - for(int i=0; i id_vector = loadHelper.getIndices(); - multi_marker_bundles[i] = new MultiMarkerBundle(id_vector); - multi_marker_bundles[i]->Load(argv[i + n_args_before_list], FILE_FORMAT_XML); - master_id[i] = multi_marker_bundles[i]->getMasterId(); - bundle_indices[i] = multi_marker_bundles[i]->getIndices(); - calcAndSaveMasterCoords(*(multi_marker_bundles[i])); - } - else{ - cout<<"Cannot load file "<< argv[i + n_args_before_list] << endl; + //Create a ROS frame out of the known corners of a tag in the weird marker coord frame used by Alvar markers (x right y forward z up) + //p0-->p1 should point in Alvar's pos X direction + //p1-->p2 should point in Alvar's pos Y direction + int makeMasterTransform (const cv::Point3f& p0, const cv::Point3f& p1, + const cv::Point3f& p2, const cv::Point3f& p3, + tf2::Transform &retT) + { + const tf2::Vector3 q0(p0.x, p0.y, p0.z); + const tf2::Vector3 q1(p1.x, p1.y, p1.z); + const tf2::Vector3 q2(p2.x, p2.y, p2.z); + const tf2::Vector3 q3(p3.x, p3.y, p3.z); + + // (inverse) matrix with the given properties + const tf2::Vector3 v = (q1-q0).normalized(); + const tf2::Vector3 w = (q2-q1).normalized(); + const tf2::Vector3 n = v.cross(w); + tf2::Matrix3x3 m(v[0], v[1], v[2], w[0], w[1], w[2], n[0], n[1], n[2]); + m = m.inverse(); + + //Translate to quaternion + if(m.determinant() <= 0) + return -1; + + //Use Eigen for this part instead, because the ROS version of bullet appears to have a bug + Eigen::Matrix3f eig_m; + for(int i=0; i<3; i++){ + for(int j=0; j<3; j++){ + eig_m(i,j) = m[i][j]; + } + } + Eigen::Quaternion eig_quat(eig_m); + + // Translate back to bullet + tf2Scalar ex = eig_quat.x(); + tf2Scalar ey = eig_quat.y(); + tf2Scalar ez = eig_quat.z(); + tf2Scalar ew = eig_quat.w(); + tf2::Quaternion quat(ex,ey,ez,ew); + quat = quat.normalized(); + + double qx = (q0.x() + q1.x() + q2.x() + q3.x()) / 4.0; + double qy = (q0.y() + q1.y() + q2.y() + q3.y()) / 4.0; + double qz = (q0.z() + q1.z() + q2.z() + q3.z()) / 4.0; + tf2::Vector3 origin (qx,qy,qz); + + tf2::Transform tform (quat, origin); //transform from master to marker + retT = tform; + return 0; - } - } - - // Set up camera, listeners, and broadcasters - cam = new Camera(n, cam_info_topic); - tf_listener = new tf::TransformListener(n); - tf_broadcaster = new tf::TransformBroadcaster(); - arMarkerPub_ = n.advertise < ar_track_alvar_msgs::AlvarMarkers > ("ar_pose_marker", 0); - rvizMarkerPub_ = n.advertise < visualization_msgs::Marker > ("visualization_marker", 0); - rvizMarkerPub2_ = n.advertise < visualization_msgs::Marker > ("ARmarker_points", 0); - - //Give tf a chance to catch up before the camera callback starts asking for transforms - ros::Duration(1.0).sleep(); - ros::spinOnce(); - - //Subscribe to topics and set up callbacks - ROS_INFO ("Subscribing to image topic"); - cloud_sub_ = n.subscribe(cam_image_topic, 1, &getPointCloudCallback); + } - ros::spin(); + //Find the coordinates of the Master marker with respect to the coord frame of each of it's child markers + //This data is used for later estimation of the Master marker pose from the child poses + int calcAndSaveMasterCoords(MultiMarkerBundle &master) + { + int mast_id = master.master_id; + std::vector rel_corner_coords; + + //Go through all the markers associated with this bundle + for (size_t i=0; i(argc,argv)); + rclcpp::shutdown(); return 0; } diff --git a/ar_track_alvar/nodes/FindMarkerBundlesNoKinect.cpp b/ar_track_alvar/nodes/FindMarkerBundlesNoKinect.cpp index 013588b..dba92de 100644 --- a/ar_track_alvar/nodes/FindMarkerBundlesNoKinect.cpp +++ b/ar_track_alvar/nodes/FindMarkerBundlesNoKinect.cpp @@ -41,11 +41,15 @@ #include "ar_track_alvar/MultiMarkerInitializer.h" #include "ar_track_alvar/Shared.h" #include -#include -#include -#include -#include -#include +#include +#include +#include "tf2_ros/buffer.h" +#include "tf2_ros/transform_listener.h" +#include +#include +#include +#include +#include "tf2_ros/create_timer_ros.h" using namespace alvar; using namespace std; @@ -54,292 +58,384 @@ using namespace std; #define VISIBLE_MARKER 2 #define GHOST_MARKER 3 -Camera *cam; -cv_bridge::CvImagePtr cv_ptr_; -image_transport::Subscriber cam_sub_; -ros::Publisher arMarkerPub_; -ros::Publisher rvizMarkerPub_; -ar_track_alvar_msgs::AlvarMarkers arPoseMarkers_; -tf::TransformListener *tf_listener; -tf::TransformBroadcaster *tf_broadcaster; -MarkerDetector marker_detector; -MultiMarkerBundle **multi_marker_bundles=NULL; -Pose *bundlePoses; -int *master_id; -bool *bundles_seen; -std::vector *bundle_indices; -bool init = true; - -double marker_size; -double max_new_marker_error; -double max_track_error; -std::string cam_image_topic; -std::string cam_info_topic; -std::string output_frame; -int n_bundles = 0; - -void GetMultiMarkerPoses(IplImage *image); -void getCapCallback (const sensor_msgs::ImageConstPtr & image_msg); -void makeMarkerMsgs(int type, int id, Pose &p, sensor_msgs::ImageConstPtr image_msg, tf::StampedTransform &CamToOutput, visualization_msgs::Marker *rvizMarker, ar_track_alvar_msgs::AlvarMarker *ar_pose_marker); - - -// Updates the bundlePoses of the multi_marker_bundles by detecting markers and using all markers in a bundle to infer the master tag's position -void GetMultiMarkerPoses(IplImage *image) { - - if (marker_detector.Detect(image, cam, true, false, max_new_marker_error, max_track_error, CVSEQ, true)){ - for(int i=0; iUpdate(marker_detector.markers, cam, bundlePoses[i]); - - if(marker_detector.DetectAdditional(image, cam, false) > 0){ - for(int i=0; iSetTrackMarkers(marker_detector, cam, bundlePoses[i], image) > 0)) - multi_marker_bundles[i]->Update(marker_detector.markers, cam, bundlePoses[i]); - } - } - } -} +class FindMarkerBundlesNoKinect : public rclcpp::Node +{ + private: + + Camera *cam; + cv_bridge::CvImagePtr cv_ptr_; + image_transport::Subscriber cam_sub_; + ar_track_alvar_msgs::msg::AlvarMarkers arPoseMarkers_; + + tf2::TimePoint prev_stamp_; + std::shared_ptr tf2_; + std::shared_ptr tf_listener_; + std::shared_ptr tf_broadcaster_; + + rclcpp::Publisher::SharedPtr arMarkerPub_; + rclcpp::Publisher::SharedPtr rvizMarkerPub_; + rclcpp::Subscription::SharedPtr info_sub_; + + + MarkerDetector marker_detector; + MultiMarkerBundle **multi_marker_bundles=NULL; + Pose *bundlePoses; + int *master_id; + bool *bundles_seen; + std::vector *bundle_indices; + bool init = true; + + double marker_size; + double max_new_marker_error; + double max_track_error; + std::string cam_image_topic; + std::string cam_info_topic; + std::string output_frame; + int n_bundles = 0; + + public: + + FindMarkerBundlesNoKinect(int argc, char* argv[]):Node("marker_detect") + { + + rclcpp::Clock::SharedPtr clock = this->get_clock(); + tf2_ = std::make_shared(clock); + tf_listener_ = std::make_shared(*tf2_); + tf_broadcaster_ = std::make_shared(*this); + prev_stamp_ = tf2::get_now(); + + if(argc < 8) + { + std::cout << std::endl; + cout << "Not enough arguments provided." << endl; + cout << "Usage: ./findMarkerBundles " << endl; + std::cout << std::endl; + exit(0); + } + + // Get params from command line + marker_size = atof(argv[1]); + max_new_marker_error = atof(argv[2]); + max_track_error = atof(argv[3]); + cam_image_topic = argv[4]; + cam_info_topic = argv[5]; + output_frame = argv[6]; + int n_args_before_list = 7; + n_bundles =0; + std::string argument; + // stop before --ros-arg argument + for (int j=n_args_before_list; j[n_bundles]; + bundles_seen = new bool[n_bundles]; + + // Load the marker bundle XML files + for(int i=0; i id_vector = loadHelper.getIndices(); + multi_marker_bundles[i] = new MultiMarkerBundle(id_vector); + multi_marker_bundles[i]->Load(argv[i + n_args_before_list], FILE_FORMAT_XML); + master_id[i] = multi_marker_bundles[i]->getMasterId(); + bundle_indices[i] = multi_marker_bundles[i]->getIndices(); + } + else{ + cout<<"Cannot load file "<< argv[i + n_args_before_list] << endl; + exit(0); + } + } + + + // Set up camera, listeners, and broadcasters + cam = new Camera(); + + // Publishers + rclcpp::Publisher::SharedPtr arMarkerPub_; + rclcpp::Publisher::SharedPtr rvizMarkerPub_; + + + //Give tf a chance to catch up before the camera callback starts asking for transforms + //TODO: come back to this, there's probably a better way to do this + rclcpp::Rate loop_rate(100); + loop_rate.sleep(); + + + //Subscribe to topics and set up callbacks + RCLCPP_INFO (this->get_logger(),"Subscribing to image topic"); + cam_sub_ = image_transport::create_subscription(this, cam_image_topic, [&](auto& msg) { this->getCapCallback(msg); },"raw"); + + RCLCPP_INFO(this->get_logger(),"Subscribing to info topic"); + info_sub_ = this->create_subscription(cam_info_topic, 1, std::bind(&FindMarkerBundlesNoKinect::InfoCallback, this, std::placeholders::_1)); -// Given the pose of a marker, builds the appropriate ROS messages for later publishing -void makeMarkerMsgs(int type, int id, Pose &p, sensor_msgs::ImageConstPtr image_msg, tf::StampedTransform &CamToOutput, visualization_msgs::Marker *rvizMarker, ar_track_alvar_msgs::AlvarMarker *ar_pose_marker){ - double px,py,pz,qx,qy,qz,qw; - - px = p.translation[0]/100.0; - py = p.translation[1]/100.0; - pz = p.translation[2]/100.0; - qx = p.quaternion[1]; - qy = p.quaternion[2]; - qz = p.quaternion[3]; - qw = p.quaternion[0]; - - //Get the marker pose in the camera frame - tf::Quaternion rotation (qx,qy,qz,qw); - tf::Vector3 origin (px,py,pz); - tf::Transform t (rotation, origin); //transform from cam to marker - - tf::Vector3 markerOrigin (0, 0, 0); - tf::Transform m (tf::Quaternion::getIdentity (), markerOrigin); - tf::Transform markerPose = t * m; - - //Publish the cam to marker transform for main marker in each bundle - if(type==MAIN_MARKER){ - std::string markerFrame = "ar_marker_"; - std::stringstream out; - out << id; - std::string id_string = out.str(); - markerFrame += id_string; - tf::StampedTransform camToMarker (t, image_msg->header.stamp, image_msg->header.frame_id, markerFrame.c_str()); - tf_broadcaster->sendTransform(camToMarker); - } - //Create the rviz visualization message - tf::poseTFToMsg (markerPose, rvizMarker->pose); - rvizMarker->header.frame_id = image_msg->header.frame_id; - rvizMarker->header.stamp = image_msg->header.stamp; - rvizMarker->id = id; + } - rvizMarker->scale.x = 1.0 * marker_size/100.0; - rvizMarker->scale.y = 1.0 * marker_size/100.0; - rvizMarker->scale.z = 0.2 * marker_size/100.0; + // Updates the bundlePoses of the multi_marker_bundles by detecting markers and using all markers in a bundle to infer the master tag's position + void GetMultiMarkerPoses(cv::Mat *image) { + + if (marker_detector.Detect(*image, cam, true, false, max_new_marker_error, max_track_error, CVSEQ, true)){ + for(int i=0; iUpdate(marker_detector.markers, cam, bundlePoses[i]); + + if(marker_detector.DetectAdditional(*image, cam, false) > 0){ + for(int i=0; iSetTrackMarkers(marker_detector, cam, bundlePoses[i], *image) > 0)) + multi_marker_bundles[i]->Update(marker_detector.markers, cam, bundlePoses[i]); + } + } + } + } - if(type==MAIN_MARKER) - rvizMarker->ns = "main_shapes"; - else - rvizMarker->ns = "basic_shapes"; + void InfoCallback (const sensor_msgs::msg::CameraInfo::SharedPtr cam_info) + { + RCLCPP_INFO(this->get_logger(),"this executed"); + if (!cam->getCamInfo_) + { + cam->SetCameraInfo(cam_info); + cam->getCamInfo_ = true; + info_sub_.reset(); + } + } - rvizMarker->type = visualization_msgs::Marker::CUBE; - rvizMarker->action = visualization_msgs::Marker::ADD; + // Given the pose of a marker, builds the appropriate ROS messages for later publishing + void makeMarkerMsgs(int type, int id, Pose &p, sensor_msgs::msg::Image::ConstSharedPtr image_msg, geometry_msgs::msg::TransformStamped &CamToOutput, visualization_msgs::msg::Marker *rvizMarker, ar_track_alvar_msgs::msg::AlvarMarker *ar_pose_marker){ + double px,py,pz,qx,qy,qz,qw; + + px = p.translation[0]/100.0; + py = p.translation[1]/100.0; + pz = p.translation[2]/100.0; + qx = p.quaternion[1]; + qy = p.quaternion[2]; + qz = p.quaternion[3]; + qw = p.quaternion[0]; + + //Get the marker pose in the camera frame + tf2::Quaternion rotation (qx,qy,qz,qw); + tf2::Vector3 origin (px,py,pz); + tf2::Transform t (rotation, origin); //transform from cam to marker + + tf2::Vector3 markerOrigin (0, 0, 0); + tf2::Transform m (tf2::Quaternion::getIdentity (), markerOrigin); + tf2::Transform markerPose = t * m; + + //Publish the cam to marker transform for main marker in each bundle + if(type==MAIN_MARKER){ + std::string markerFrame = "ar_marker_"; + std::stringstream out; + out << id; + std::string id_string = out.str(); + markerFrame += id_string; + geometry_msgs::msg::TransformStamped camToMarker; + camToMarker.header.stamp = image_msg->header.stamp; + camToMarker.header.frame_id = image_msg->header.frame_id; + camToMarker.child_frame_id = markerFrame; + + geometry_msgs::msg::Vector3 trans; + trans.x = px; + trans.y = py; + trans.z = pz; + geometry_msgs::msg::Quaternion rot; + rot.x = qx; + rot.y = qy; + rot.z = qz; + rot.w = qw; + + camToMarker.transform.translation = trans; + camToMarker.transform.rotation = rot; + tf_broadcaster_->sendTransform(camToMarker); + } - //Determine a color and opacity, based on marker type - if(type==MAIN_MARKER){ - rvizMarker->color.r = 1.0f; - rvizMarker->color.g = 0.0f; - rvizMarker->color.b = 0.0f; - rvizMarker->color.a = 1.0; - } - else if(type==VISIBLE_MARKER){ - rvizMarker->color.r = 0.0f; - rvizMarker->color.g = 1.0f; - rvizMarker->color.b = 0.0f; - rvizMarker->color.a = 0.7; - } - else if(type==GHOST_MARKER){ - rvizMarker->color.r = 0.0f; - rvizMarker->color.g = 0.0f; - rvizMarker->color.b = 1.0f; - rvizMarker->color.a = 0.5; - } + //Create the rviz visualization message + rvizMarker->pose.position.x = markerPose.getOrigin().getX(); + rvizMarker->pose.position.y = markerPose.getOrigin().getY(); + rvizMarker->pose.position.z = markerPose.getOrigin().getZ(); + rvizMarker->pose.orientation.x = markerPose.getRotation().getX(); + rvizMarker->pose.orientation.y = markerPose.getRotation().getY(); + rvizMarker->pose.orientation.z = markerPose.getRotation().getZ(); + rvizMarker->pose.orientation.w = markerPose.getRotation().getW(); + rvizMarker->header.frame_id = image_msg->header.frame_id; + rvizMarker->header.stamp = image_msg->header.stamp; + rvizMarker->id = id; + + rvizMarker->scale.x = 1.0 * marker_size/100.0; + rvizMarker->scale.y = 1.0 * marker_size/100.0; + rvizMarker->scale.z = 0.2 * marker_size/100.0; + + if(type==MAIN_MARKER) + rvizMarker->ns = "main_shapes"; + else + rvizMarker->ns = "basic_shapes"; + + + rvizMarker->type = visualization_msgs::msg::Marker::CUBE; + rvizMarker->action = visualization_msgs::msg::Marker::ADD; + + //Determine a color and opacity, based on marker type + if(type==MAIN_MARKER){ + rvizMarker->color.r = 1.0f; + rvizMarker->color.g = 0.0f; + rvizMarker->color.b = 0.0f; + rvizMarker->color.a = 1.0; + } + else if(type==VISIBLE_MARKER){ + rvizMarker->color.r = 0.0f; + rvizMarker->color.g = 1.0f; + rvizMarker->color.b = 0.0f; + rvizMarker->color.a = 0.7; + } + else if(type==GHOST_MARKER){ + rvizMarker->color.r = 0.0f; + rvizMarker->color.g = 0.0f; + rvizMarker->color.b = 1.0f; + rvizMarker->color.a = 0.5; + } - rvizMarker->lifetime = ros::Duration (1.0); + rvizMarker->lifetime = rclcpp::Duration (1.0); - // Only publish the pose of the master tag in each bundle, since that's all we really care about aside from visualization - if(type==MAIN_MARKER){ - //Take the pose of the tag in the camera frame and convert to the output frame (usually torso_lift_link for the PR2) - tf::Transform tagPoseOutput = CamToOutput * markerPose; + // Only publish the pose of the master tag in each bundle, since that's all we really care about aside from visualization + if(type==MAIN_MARKER){ + //Take the pose of the tag in the camera frame and convert to the output frame (usually torso_lift_link for the PR2) + // tf2::Transform tagPoseOutput = CamToOutput * markerPose; - //Create the pose marker message - tf::poseTFToMsg (tagPoseOutput, ar_pose_marker->pose.pose); - ar_pose_marker->header.frame_id = output_frame; - ar_pose_marker->header.stamp = image_msg->header.stamp; - ar_pose_marker->id = id; - } - else - ar_pose_marker = NULL; -} + // //Create the pose marker message + // tf2::poseTFToMsg (tagPoseOutput, ar_pose_marker->pose.pose); + tf2::doTransform(ar_pose_marker->pose, ar_pose_marker->pose,CamToOutput); + ar_pose_marker->header.frame_id = output_frame; + ar_pose_marker->header.stamp = image_msg->header.stamp; + ar_pose_marker->id = id; + } + else + ar_pose_marker = NULL; + } -//Callback to handle getting video frames and processing them -void getCapCallback (const sensor_msgs::ImageConstPtr & image_msg) -{ - //If we've already gotten the cam info, then go ahead - if(cam->getCamInfo_){ - try{ - //Get the transformation from the Camera to the output frame for this image capture - tf::StampedTransform CamToOutput; - try{ - tf_listener->waitForTransform(output_frame, image_msg->header.frame_id, image_msg->header.stamp, ros::Duration(1.0)); - tf_listener->lookupTransform(output_frame, image_msg->header.frame_id, image_msg->header.stamp, CamToOutput); - } - catch (tf::TransformException ex){ - ROS_ERROR("%s",ex.what()); + //Callback to handle getting video frames and processing them + void getCapCallback (const sensor_msgs::msg::Image::ConstSharedPtr & image_msg) + { + //If we've already gotten the cam info, then go ahead + if(cam->getCamInfo_) + { + try + { + //Get the transformation from the Camera to the output frame for this image capture + geometry_msgs::msg::TransformStamped CamToOutput; + std::string tf_error; + try{ + tf2::TimePoint tf2_time = tf2_ros::fromMsg(image_msg->header.stamp); + CamToOutput = tf2_->lookupTransform(output_frame, image_msg->header.frame_id,tf2_time,tf2_time - prev_stamp_); + } + catch (tf2::TransformException ex){ + RCLCPP_ERROR(rclcpp::get_logger("ArTrackAlvar"), "%s",ex.what()); + } + + visualization_msgs::msg::Marker rvizMarker; + ar_track_alvar_msgs::msg::AlvarMarker ar_pose_marker; + arPoseMarkers_.markers.clear (); + + + //Convert the image + cv_ptr_ = cv_bridge::toCvCopy(image_msg, sensor_msgs::image_encodings::BGR8); + + //Get the estimated pose of the main markers by using all the markers in each bundle + + // GetMultiMarkersPoses expects an IplImage*, but as of ros groovy, cv_bridge gives + // us a cv::Mat. I'm too lazy to change to cv::Mat throughout right now, so I + // do this conversion here -jbinney + cv::Mat ipl_image = cv_ptr_->image; + GetMultiMarkerPoses(&ipl_image); + + //Draw the observed markers that are visible and note which bundles have at least 1 marker seen + for(int i=0; isize(); i++) + { + int id = (*(marker_detector.markers))[i].GetId(); + + // Draw if id is valid + if(id >= 0){ + + //Mark the bundle that marker belongs to as "seen" + for(int j=0; jpublish (rvizMarker); + } } - - visualization_msgs::Marker rvizMarker; - ar_track_alvar_msgs::AlvarMarker ar_pose_marker; - arPoseMarkers_.markers.clear (); - - - //Convert the image - cv_ptr_ = cv_bridge::toCvCopy(image_msg, sensor_msgs::image_encodings::BGR8); - - //Get the estimated pose of the main markers by using all the markers in each bundle - - // GetMultiMarkersPoses expects an IplImage*, but as of ros groovy, cv_bridge gives - // us a cv::Mat. I'm too lazy to change to cv::Mat throughout right now, so I - // do this conversion here -jbinney - IplImage ipl_image = cv_ptr_->image; - GetMultiMarkerPoses(&ipl_image); - - //Draw the observed markers that are visible and note which bundles have at least 1 marker seen - for(int i=0; isize(); i++) - { - int id = (*(marker_detector.markers))[i].GetId(); - - // Draw if id is valid - if(id >= 0){ - - //Mark the bundle that marker belongs to as "seen" - for(int j=0; jencoding.c_str ()); + + //Draw the main markers, whether they are visible or not -- but only if at least 1 marker from their bundle is currently seen + for(int i=0; ipublish (rvizMarker); + arPoseMarkers_.markers.push_back (ar_pose_marker); + } + } + + //Publish the marker messages + arMarkerPub_->publish (arPoseMarkers_); + } + catch (cv_bridge::Exception& e) + { + RCLCPP_ERROR (this->get_logger(),"Could not convert from '%s' to 'rgb8'.", image_msg->encoding.c_str ()); + } + prev_stamp_ = tf2_ros::fromMsg(image_msg->header.stamp); } } -} +}; + int main(int argc, char *argv[]) { - ros::init (argc, argv, "marker_detect"); - ros::NodeHandle n; - - if(argc < 8){ - std::cout << std::endl; - cout << "Not enough arguments provided." << endl; - cout << "Usage: ./findMarkerBundles " << endl; - std::cout << std::endl; - return 0; - } - - // Get params from command line - marker_size = atof(argv[1]); - max_new_marker_error = atof(argv[2]); - max_track_error = atof(argv[3]); - cam_image_topic = argv[4]; - cam_info_topic = argv[5]; - output_frame = argv[6]; - int n_args_before_list = 7; - n_bundles = argc - n_args_before_list; - - marker_detector.SetMarkerSize(marker_size); - multi_marker_bundles = new MultiMarkerBundle*[n_bundles]; - bundlePoses = new Pose[n_bundles]; - master_id = new int[n_bundles]; - bundle_indices = new std::vector[n_bundles]; - bundles_seen = new bool[n_bundles]; - - // Load the marker bundle XML files - for(int i=0; i id_vector = loadHelper.getIndices(); - multi_marker_bundles[i] = new MultiMarkerBundle(id_vector); - multi_marker_bundles[i]->Load(argv[i + n_args_before_list], FILE_FORMAT_XML); - master_id[i] = multi_marker_bundles[i]->getMasterId(); - bundle_indices[i] = multi_marker_bundles[i]->getIndices(); - } - else{ - cout<<"Cannot load file "<< argv[i + n_args_before_list] << endl; - return 0; - } - } - - // Set up camera, listeners, and broadcasters - cam = new Camera(n, cam_info_topic); - tf_listener = new tf::TransformListener(n); - tf_broadcaster = new tf::TransformBroadcaster(); - arMarkerPub_ = n.advertise < ar_track_alvar_msgs::AlvarMarkers > ("ar_pose_marker", 0); - rvizMarkerPub_ = n.advertise < visualization_msgs::Marker > ("visualization_marker", 0); - - //Give tf a chance to catch up before the camera callback starts asking for transforms - ros::Duration(1.0).sleep(); - ros::spinOnce(); - - //Subscribe to topics and set up callbacks - ROS_INFO ("Subscribing to image topic"); - image_transport::ImageTransport it_(n); - cam_sub_ = it_.subscribe (cam_image_topic, 1, &getCapCallback); - - ros::spin(); + rclcpp::init(argc, argv); + rclcpp::spin(std::make_shared(argc,argv)); + rclcpp::shutdown(); return 0; } diff --git a/ar_track_alvar/nodes/IndividualMarkers.cpp b/ar_track_alvar/nodes/IndividualMarkers.cpp index a44a23f..67b81d9 100644 --- a/ar_track_alvar/nodes/IndividualMarkers.cpp +++ b/ar_track_alvar/nodes/IndividualMarkers.cpp @@ -39,575 +39,596 @@ #include "ar_track_alvar/MarkerDetector.h" #include "ar_track_alvar/Shared.h" #include -#include -#include -#include -#include -#include +#include +#include +#include "tf2_ros/buffer.h" +#include "tf2_ros/create_timer_ros.h" +#include "tf2_ros/transform_listener.h" +#include +#include +#include +#include #include -#include -#include +#include "rclcpp/rclcpp.hpp" #include +#include -namespace gm=geometry_msgs; -namespace ata=ar_track_alvar; - -typedef pcl::PointXYZRGB ARPoint; -typedef pcl::PointCloud ARCloud; using namespace alvar; using namespace std; using boost::make_shared; - -bool init=true; -Camera *cam; -cv_bridge::CvImagePtr cv_ptr_; -image_transport::Subscriber cam_sub_; -ros::Subscriber cloud_sub_; -ros::Publisher arMarkerPub_; -ros::Publisher rvizMarkerPub_; -ros::Publisher rvizMarkerPub2_; -ar_track_alvar_msgs::AlvarMarkers arPoseMarkers_; -visualization_msgs::Marker rvizMarker_; -tf::TransformListener *tf_listener; -tf::TransformBroadcaster *tf_broadcaster; -MarkerDetector marker_detector; - -bool enableSwitched = false; -bool enabled = true; -bool output_frame_from_msg; -double max_frequency; -double marker_size; -double max_new_marker_error; -double max_track_error; -std::string cam_image_topic; -std::string cam_info_topic; -std::string output_frame; -int marker_resolution = 5; // default marker resolution -int marker_margin = 2; // default marker margin - - -//Debugging utility function -void draw3dPoints(ARCloud::Ptr cloud, string frame, int color, int id, double rad) -{ - visualization_msgs::Marker rvizMarker; - - rvizMarker.header.frame_id = frame; - rvizMarker.header.stamp = ros::Time::now(); - rvizMarker.id = id; - rvizMarker.ns = "3dpts"; - - rvizMarker.scale.x = rad; - rvizMarker.scale.y = rad; - rvizMarker.scale.z = rad; - - rvizMarker.type = visualization_msgs::Marker::SPHERE_LIST; - rvizMarker.action = visualization_msgs::Marker::ADD; - - if(color==1){ - rvizMarker.color.r = 0.0f; - rvizMarker.color.g = 1.0f; - rvizMarker.color.b = 1.0f; - rvizMarker.color.a = 1.0; - } - if(color==2){ - rvizMarker.color.r = 1.0f; - rvizMarker.color.g = 0.0f; - rvizMarker.color.b = 1.0f; - rvizMarker.color.a = 1.0; - } - if(color==3){ - rvizMarker.color.r = 1.0f; - rvizMarker.color.g = 1.0f; - rvizMarker.color.b = 0.0f; - rvizMarker.color.a = 1.0; - } - - gm::Point p; - for(int i=0; ipoints.size(); i++){ - p.x = cloud->points[i].x; - p.y = cloud->points[i].y; - p.z = cloud->points[i].z; - rvizMarker.points.push_back(p); - } - - rvizMarker.lifetime = ros::Duration (1.0); - rvizMarkerPub2_.publish (rvizMarker); -} +namespace gm=geometry_msgs; +namespace ata=ar_track_alvar; +typedef pcl::PointXYZRGB ARPoint; +typedef pcl::PointCloud ARCloud; -void drawArrow(gm::Point start, tf::Matrix3x3 mat, string frame, int color, int id) +class IndividualMarkers : public rclcpp::Node { - visualization_msgs::Marker rvizMarker; - - rvizMarker.header.frame_id = frame; - rvizMarker.header.stamp = ros::Time::now(); - rvizMarker.id = id; - rvizMarker.ns = "arrow"; - - rvizMarker.scale.x = 0.01; - rvizMarker.scale.y = 0.01; - rvizMarker.scale.z = 0.1; - - rvizMarker.type = visualization_msgs::Marker::ARROW; - rvizMarker.action = visualization_msgs::Marker::ADD; - - for(int i=0; i<3; i++){ - rvizMarker.points.clear(); - rvizMarker.points.push_back(start); - gm::Point end; - end.x = start.x + mat[0][i]; - end.y = start.y + mat[1][i]; - end.z = start.z + mat[2][i]; - rvizMarker.points.push_back(end); - rvizMarker.id += 10*i; - rvizMarker.lifetime = ros::Duration (1.0); - - if(color==1){ - rvizMarker.color.r = 1.0f; - rvizMarker.color.g = 0.0f; - rvizMarker.color.b = 0.0f; - rvizMarker.color.a = 1.0; - } - if(color==2){ - rvizMarker.color.r = 0.0f; - rvizMarker.color.g = 1.0f; - rvizMarker.color.b = 0.0f; - rvizMarker.color.a = 1.0; - } - if(color==3){ - rvizMarker.color.r = 0.0f; - rvizMarker.color.g = 0.0f; - rvizMarker.color.b = 1.0f; - rvizMarker.color.a = 1.0; - } - color += 1; - - rvizMarkerPub2_.publish (rvizMarker); - } -} - - -int PlaneFitPoseImprovement(int id, const ARCloud &corners_3D, ARCloud::Ptr selected_points, const ARCloud &cloud, Pose &p){ - - ata::PlaneFitResult res = ata::fitPlane(selected_points); - gm::PoseStamped pose; - pose.header.stamp = pcl_conversions::fromPCL(cloud.header).stamp; - pose.header.frame_id = cloud.header.frame_id; - pose.pose.position = ata::centroid(*res.inliers); - - draw3dPoints(selected_points, cloud.header.frame_id, 1, id, 0.005); - - //Get 2 points that point forward in marker x direction - int i1,i2; - if(isnan(corners_3D[0].x) || isnan(corners_3D[0].y) || isnan(corners_3D[0].z) || - isnan(corners_3D[3].x) || isnan(corners_3D[3].y) || isnan(corners_3D[3].z)) + private: + bool init=true; + Camera *cam; + cv_bridge::CvImagePtr cv_ptr_; + + rclcpp::Subscription::SharedPtr cloud_sub_; + rclcpp::Publisher::SharedPtr arMarkerPub_; + rclcpp::Publisher::SharedPtr rvizMarkerPub_; + rclcpp::Publisher::SharedPtr rvizMarkerPub2_; + rclcpp::Subscription::SharedPtr info_sub_; + + + tf2::TimePoint prev_stamp_; + std::shared_ptr tf2_; + std::shared_ptr tf_listener_; + std::shared_ptr tf_broadcaster_; + + + double parameters_set = false; + rclcpp::TimerBase::SharedPtr timer_; + + ar_track_alvar_msgs::msg::AlvarMarkers arPoseMarkers_; + visualization_msgs::msg::Marker rvizMarker_; + + + MarkerDetector marker_detector; + bool enableSwitched = false; + bool output_frame_from_msg; + double max_frequency; + double marker_size; + double max_new_marker_error; + double max_track_error; + std::string cam_image_topic; + std::string cam_info_topic; + std::string output_frame; + int marker_resolution = 5; // default marker resolution + int marker_margin = 2; // default marker margin + + + + + public: + IndividualMarkers(int argc, char* argv[]):Node("marker_detect") + { + + rclcpp::Clock::SharedPtr clock = this->get_clock(); + tf2_ = std::make_shared(clock); + tf_listener_ = std::make_shared(*tf2_); + tf_broadcaster_ = std::make_shared(*this); + prev_stamp_ = tf2::get_now(); + + this->declare_parameter("marker_size", 10.0); + this->declare_parameter("max_new_marker_error", 0.08); + this->declare_parameter("max_track_error", 0.2); + this->declare_parameter("max_frequency", 8.0); + this->declare_parameter("marker_resolution", 5); + this->declare_parameter("marker_margin", 2); + this->declare_parameter("output_frame_from_msg", false); + this->declare_parameter("output_frame", ""); + + // Camera input topics. Use remapping to map to your camera topics. + cam_image_topic = "camera_image"; + cam_info_topic = "camera_info"; + + marker_detector.SetMarkerSize(marker_size, marker_resolution, marker_margin); + cam = new Camera(); + + + arMarkerPub_ = this->create_publisher ("ar_pose_marker", 0); + rvizMarkerPub_ = this->create_publisher ("visualization_marker", 0); + rvizMarkerPub2_ = this->create_publisher ("ARmarker_points", 0); + + + //Give tf a chance to catch up before the camera callback starts asking for transforms + // It will also reconfigure parameters for the first time, setting the default values + //TODO: come back to this, there's probably a better way to do this + rclcpp::Rate loop_rate(100); + loop_rate.sleep(); + + RCLCPP_INFO(this->get_logger(), "Subscribing to image topic"); + + // subscribe to the point cloud + cloud_sub_ = this->create_subscription(cam_image_topic, 1, + std::bind(&IndividualMarkers::getPointCloudCallback, this, std::placeholders::_1)); + + // subscribe to the info topic + RCLCPP_INFO(this->get_logger(),"Subscribing to info topic"); + info_sub_ = this->create_subscription(cam_info_topic, 1, std::bind(&IndividualMarkers::InfoCallback, this, std::placeholders::_1)); + + // create parameter callback + timer_ = this->create_wall_timer(1000ms, std::bind(&IndividualMarkers::set_parameters, this)); + } + + void InfoCallback (const sensor_msgs::msg::CameraInfo::SharedPtr cam_info) { - if(isnan(corners_3D[1].x) || isnan(corners_3D[1].y) || isnan(corners_3D[1].z) || - isnan(corners_3D[2].x) || isnan(corners_3D[2].y) || isnan(corners_3D[2].z)) - { - return -1; - } - else{ - i1 = 1; - i2 = 2; + if (!cam->getCamInfo_) + { + cam->SetCameraInfo(cam_info); + cam->getCamInfo_ = true; + info_sub_.reset(); } } - else{ - i1 = 0; - i2 = 3; - } - - //Get 2 points the point forward in marker y direction - int i3,i4; - if(isnan(corners_3D[0].x) || isnan(corners_3D[0].y) || isnan(corners_3D[0].z) || - isnan(corners_3D[1].x) || isnan(corners_3D[1].y) || isnan(corners_3D[1].z)) + + void set_parameters() { - if(isnan(corners_3D[3].x) || isnan(corners_3D[3].y) || isnan(corners_3D[3].z) || - isnan(corners_3D[2].x) || isnan(corners_3D[2].y) || isnan(corners_3D[2].z)) - { - return -1; - } - else{ - i3 = 2; - i4 = 3; - } + if (!output_frame_from_msg && output_frame.empty()) + { + RCLCPP_ERROR(this->get_logger(), "Param 'output_frame' has to be set if the output frame is not " + "derived from the point cloud message."); + exit(0); + } + else + { + parameters_set=true; + } + + this->get_parameter("marker_size", marker_size); + this->get_parameter("max_new_marker_error", max_new_marker_error); + this->get_parameter("max_track_error", max_track_error); + this->get_parameter("max_frequency", max_frequency); + this->get_parameter("marker_resolution", marker_resolution); + this->get_parameter("marker_margin", marker_margin); + this->get_parameter("output_frame_from_msg", output_frame_from_msg); + this->get_parameter("output_frame", output_frame); } - else{ - i3 = 1; - i4 = 0; - } - - ARCloud::Ptr orient_points(new ARCloud()); - orient_points->points.push_back(corners_3D[i1]); - draw3dPoints(orient_points, cloud.header.frame_id, 3, id+1000, 0.008); - - orient_points->clear(); - orient_points->points.push_back(corners_3D[i2]); - draw3dPoints(orient_points, cloud.header.frame_id, 2, id+2000, 0.008); - - int succ; - succ = ata::extractOrientation(res.coeffs, corners_3D[i1], corners_3D[i2], corners_3D[i3], corners_3D[i4], pose.pose.orientation); - if(succ < 0) return -1; - tf::Matrix3x3 mat; - succ = ata::extractFrame(res.coeffs, corners_3D[i1], corners_3D[i2], corners_3D[i3], corners_3D[i4], mat); - if(succ < 0) return -1; + //Debugging utility function + void draw3dPoints(ARCloud::Ptr cloud, string frame, int color, int id, double rad) + { + visualization_msgs::msg::Marker rvizMarker; - drawArrow(pose.pose.position, mat, cloud.header.frame_id, 1, id); + rvizMarker.header.frame_id = frame; + rvizMarker.header.stamp = this->get_clock()->now(); + rvizMarker.id = id; + rvizMarker.ns = "3dpts"; - p.translation[0] = pose.pose.position.x * 100.0; - p.translation[1] = pose.pose.position.y * 100.0; - p.translation[2] = pose.pose.position.z * 100.0; - p.quaternion[1] = pose.pose.orientation.x; - p.quaternion[2] = pose.pose.orientation.y; - p.quaternion[3] = pose.pose.orientation.z; - p.quaternion[0] = pose.pose.orientation.w; + rvizMarker.scale.x = rad; + rvizMarker.scale.y = rad; + rvizMarker.scale.z = rad; - return 0; -} + rvizMarker.type = visualization_msgs::msg::Marker::SPHERE_LIST; + rvizMarker.action = visualization_msgs::msg::Marker::ADD; + if(color==1){ + rvizMarker.color.r = 0.0f; + rvizMarker.color.g = 1.0f; + rvizMarker.color.b = 1.0f; + rvizMarker.color.a = 1.0; + } + if(color==2){ + rvizMarker.color.r = 1.0f; + rvizMarker.color.g = 0.0f; + rvizMarker.color.b = 1.0f; + rvizMarker.color.a = 1.0; + } + if(color==3){ + rvizMarker.color.r = 1.0f; + rvizMarker.color.g = 1.0f; + rvizMarker.color.b = 0.0f; + rvizMarker.color.a = 1.0; + } -void GetMarkerPoses(IplImage *image, ARCloud &cloud) { + gm::msg::Point p; + for(int i=0; ipoints.size(); i++){ + p.x = cloud->points[i].x; + p.y = cloud->points[i].y; + p.z = cloud->points[i].z; + rvizMarker.points.push_back(p); + } - //Detect and track the markers - if (marker_detector.Detect(image, cam, true, false, max_new_marker_error, - max_track_error, CVSEQ, true)) - { - ROS_DEBUG_STREAM("--------------------------"); - for (size_t i=0; isize(); i++) - { - vector > pixels; - Marker *m = &((*marker_detector.markers)[i]); - int id = m->GetId(); - ROS_DEBUG_STREAM("******* ID: " << id); - - int resol = m->GetRes(); - int ori = m->ros_orientation; - - PointDouble pt1, pt2, pt3, pt4; - pt4 = m->ros_marker_points_img[0]; - pt3 = m->ros_marker_points_img[resol-1]; - pt1 = m->ros_marker_points_img[(resol*resol)-resol]; - pt2 = m->ros_marker_points_img[(resol*resol)-1]; - - m->ros_corners_3D[0] = cloud(pt1.x, pt1.y); - m->ros_corners_3D[1] = cloud(pt2.x, pt2.y); - m->ros_corners_3D[2] = cloud(pt3.x, pt3.y); - m->ros_corners_3D[3] = cloud(pt4.x, pt4.y); - - if(ori >= 0 && ori < 4){ - if(ori != 0){ - std::rotate(m->ros_corners_3D.begin(), m->ros_corners_3D.begin() + ori, m->ros_corners_3D.end()); - } - } - else - ROS_ERROR("FindMarkerBundles: Bad Orientation: %i for ID: %i", ori, id); - - //Get the 3D marker points - BOOST_FOREACH (const PointDouble& p, m->ros_marker_points_img) - pixels.push_back(cv::Point(p.x, p.y)); - ARCloud::Ptr selected_points = ata::filterCloud(cloud, pixels); - - //Use the kinect data to find a plane and pose for the marker - int ret = PlaneFitPoseImprovement(i, m->ros_corners_3D, selected_points, cloud, m->pose); - } + rvizMarker.lifetime = rclcpp::Duration (1.0); + rvizMarkerPub2_->publish (rvizMarker); } -} - -void getPointCloudCallback (const sensor_msgs::PointCloud2ConstPtr &msg) -{ - sensor_msgs::ImagePtr image_msg(new sensor_msgs::Image); - - // If desired, use the frame in the message's header. - if (output_frame_from_msg) - output_frame = msg->header.frame_id; - - //If we've already gotten the cam info, then go ahead - if(cam->getCamInfo_){ - //Convert cloud to PCL - ARCloud cloud; - pcl::fromROSMsg(*msg, cloud); - - //Get an OpenCV image from the cloud - pcl::toROSMsg (*msg, *image_msg); - image_msg->header.stamp = msg->header.stamp; - image_msg->header.frame_id = msg->header.frame_id; - - - //Convert the image - cv_ptr_ = cv_bridge::toCvCopy(image_msg, sensor_msgs::image_encodings::BGR8); - - //Get the estimated pose of the main markers by using all the markers in each bundle - - // GetMultiMarkersPoses expects an IplImage*, but as of ros groovy, cv_bridge gives - // us a cv::Mat. I'm too lazy to change to cv::Mat throughout right now, so I - // do this conversion here -jbinney - IplImage ipl_image = cv_ptr_->image; - - - //Use the kinect to improve the pose - Pose ret_pose; - GetMarkerPoses(&ipl_image, cloud); - - tf::StampedTransform CamToOutput; - if (image_msg->header.frame_id == output_frame) { - CamToOutput.setIdentity(); - } else { - try { - tf_listener->waitForTransform(output_frame, image_msg->header.frame_id, - image_msg->header.stamp, ros::Duration(1.0)); - tf_listener->lookupTransform(output_frame, image_msg->header.frame_id, - image_msg->header.stamp, CamToOutput); - } catch (tf::TransformException ex) { - ROS_ERROR("%s",ex.what()); + void drawArrow(gm::msg::Point start, tf2::Matrix3x3 mat, string frame, int color, int id) + { + visualization_msgs::msg::Marker rvizMarker; + + rvizMarker.header.frame_id = frame; + rvizMarker.header.stamp = this->get_clock()->now(); + rvizMarker.id = id; + rvizMarker.ns = "arrow"; + + rvizMarker.scale.x = 0.01; + rvizMarker.scale.y = 0.01; + rvizMarker.scale.z = 0.1; + + rvizMarker.type = visualization_msgs::msg::Marker::ARROW; + rvizMarker.action = visualization_msgs::msg::Marker::ADD; + + for(int i=0; i<3; i++){ + rvizMarker.points.clear(); + rvizMarker.points.push_back(start); + gm::msg::Point end; + end.x = start.x + mat[0][i]; + end.y = start.y + mat[1][i]; + end.z = start.z + mat[2][i]; + rvizMarker.points.push_back(end); + rvizMarker.id += 10*i; + rvizMarker.lifetime = rclcpp::Duration (1.0); + + if(color==1){ + rvizMarker.color.r = 1.0f; + rvizMarker.color.g = 0.0f; + rvizMarker.color.b = 0.0f; + rvizMarker.color.a = 1.0; + } + if(color==2){ + rvizMarker.color.r = 0.0f; + rvizMarker.color.g = 1.0f; + rvizMarker.color.b = 0.0f; + rvizMarker.color.a = 1.0; + } + if(color==3){ + rvizMarker.color.r = 0.0f; + rvizMarker.color.g = 0.0f; + rvizMarker.color.b = 1.0f; + rvizMarker.color.a = 1.0; + } + color += 1; + + rvizMarkerPub2_->publish (rvizMarker); + } } - } - try{ - - - arPoseMarkers_.markers.clear (); - for (size_t i=0; isize(); i++) - { - //Get the pose relative to the camera - int id = (*(marker_detector.markers))[i].GetId(); - Pose p = (*(marker_detector.markers))[i].pose; - - double px = p.translation[0]/100.0; - double py = p.translation[1]/100.0; - double pz = p.translation[2]/100.0; - double qx = p.quaternion[1]; - double qy = p.quaternion[2]; - double qz = p.quaternion[3]; - double qw = p.quaternion[0]; - - tf::Quaternion rotation (qx,qy,qz,qw); - tf::Vector3 origin (px,py,pz); - tf::Transform t (rotation, origin); - tf::Vector3 markerOrigin (0, 0, 0); - tf::Transform m (tf::Quaternion::getIdentity (), markerOrigin); - tf::Transform markerPose = t * m; // marker pose in the camera frame - - //Publish the transform from the camera to the marker - std::string markerFrame = "ar_marker_"; - std::stringstream out; - out << id; - std::string id_string = out.str(); - markerFrame += id_string; - tf::StampedTransform camToMarker (t, image_msg->header.stamp, image_msg->header.frame_id, markerFrame.c_str()); - tf_broadcaster->sendTransform(camToMarker); - - //Create the rviz visualization messages - tf::poseTFToMsg (markerPose, rvizMarker_.pose); - rvizMarker_.header.frame_id = image_msg->header.frame_id; - rvizMarker_.header.stamp = image_msg->header.stamp; - rvizMarker_.id = id; - - rvizMarker_.scale.x = 1.0 * marker_size/100.0; - rvizMarker_.scale.y = 1.0 * marker_size/100.0; - rvizMarker_.scale.z = 0.2 * marker_size/100.0; - rvizMarker_.ns = "basic_shapes"; - rvizMarker_.type = visualization_msgs::Marker::CUBE; - rvizMarker_.action = visualization_msgs::Marker::ADD; - switch (id) - { - case 0: - rvizMarker_.color.r = 0.0f; - rvizMarker_.color.g = 0.0f; - rvizMarker_.color.b = 1.0f; - rvizMarker_.color.a = 1.0; - break; - case 1: - rvizMarker_.color.r = 1.0f; - rvizMarker_.color.g = 0.0f; - rvizMarker_.color.b = 0.0f; - rvizMarker_.color.a = 1.0; - break; - case 2: - rvizMarker_.color.r = 0.0f; - rvizMarker_.color.g = 1.0f; - rvizMarker_.color.b = 0.0f; - rvizMarker_.color.a = 1.0; - break; - case 3: - rvizMarker_.color.r = 0.0f; - rvizMarker_.color.g = 0.5f; - rvizMarker_.color.b = 0.5f; - rvizMarker_.color.a = 1.0; - break; - case 4: - rvizMarker_.color.r = 0.5f; - rvizMarker_.color.g = 0.5f; - rvizMarker_.color.b = 0.0; - rvizMarker_.color.a = 1.0; - break; - default: - rvizMarker_.color.r = 0.5f; - rvizMarker_.color.g = 0.0f; - rvizMarker_.color.b = 0.5f; - rvizMarker_.color.a = 1.0; - break; - } - rvizMarker_.lifetime = ros::Duration (1.0); - rvizMarkerPub_.publish (rvizMarker_); - - //Get the pose of the tag in the camera frame, then the output frame (usually torso) - tf::Transform tagPoseOutput = CamToOutput * markerPose; - - //Create the pose marker messages - ar_track_alvar_msgs::AlvarMarker ar_pose_marker; - tf::poseTFToMsg (tagPoseOutput, ar_pose_marker.pose.pose); - ar_pose_marker.header.frame_id = output_frame; - ar_pose_marker.header.stamp = image_msg->header.stamp; - ar_pose_marker.id = id; - arPoseMarkers_.markers.push_back (ar_pose_marker); - } - arPoseMarkers_.header.stamp = image_msg->header.stamp; - arMarkerPub_.publish (arPoseMarkers_); - } - catch (cv_bridge::Exception& e){ - ROS_ERROR ("Could not convert from '%s' to 'rgb8'.", image_msg->encoding.c_str ()); - } - } -} - -void configCallback(ar_track_alvar::ParamsConfig &config, uint32_t level) -{ - ROS_INFO("AR tracker reconfigured: %s %.2f %.2f %.2f %.2f", config.enabled ? "ENABLED" : "DISABLED", - config.max_frequency, config.marker_size, config.max_new_marker_error, config.max_track_error); + int PlaneFitPoseImprovement(int id, const ARCloud& corners_3D, + ARCloud::Ptr selected_points, const ARCloud& cloud, + Pose& p) + { + ata::PlaneFitResult res = ata::fitPlane(selected_points); + gm::msg::PoseStamped pose; + pose.header.stamp = pcl_conversions::fromPCL(cloud.header).stamp; + pose.header.frame_id = cloud.header.frame_id; + pose.pose.position = ata::centroid(*res.inliers); + + draw3dPoints(selected_points, cloud.header.frame_id, 1, id, 0.005); + + // Get 2 points that point forward in marker x direction + int i1, i2; + if (isnan(corners_3D[0].x) || isnan(corners_3D[0].y) || + isnan(corners_3D[0].z) || isnan(corners_3D[3].x) || + isnan(corners_3D[3].y) || isnan(corners_3D[3].z)) + { + if (isnan(corners_3D[1].x) || isnan(corners_3D[1].y) || + isnan(corners_3D[1].z) || isnan(corners_3D[2].x) || + isnan(corners_3D[2].y) || isnan(corners_3D[2].z)) + { + return -1; + } + else + { + i1 = 1; + i2 = 2; + } + } + else + { + i1 = 0; + i2 = 3; + } - enableSwitched = enabled != config.enabled; + // Get 2 points the point forward in marker y direction + int i3, i4; + if (isnan(corners_3D[0].x) || isnan(corners_3D[0].y) || + isnan(corners_3D[0].z) || isnan(corners_3D[1].x) || + isnan(corners_3D[1].y) || isnan(corners_3D[1].z)) + { + if (isnan(corners_3D[3].x) || isnan(corners_3D[3].y) || + isnan(corners_3D[3].z) || isnan(corners_3D[2].x) || + isnan(corners_3D[2].y) || isnan(corners_3D[2].z)) + { + return -1; + } + else + { + i3 = 2; + i4 = 3; + } + } + else + { + i3 = 1; + i4 = 0; + } - enabled = config.enabled; - max_frequency = config.max_frequency; - marker_size = config.marker_size; - max_new_marker_error = config.max_new_marker_error; - max_track_error = config.max_track_error; -} + ARCloud::Ptr orient_points(new ARCloud()); + orient_points->points.push_back(corners_3D[i1]); + draw3dPoints(orient_points, cloud.header.frame_id, 3, id + 1000, 0.008); + + orient_points->clear(); + orient_points->points.push_back(corners_3D[i2]); + draw3dPoints(orient_points, cloud.header.frame_id, 2, id + 2000, 0.008); + + int succ; + succ = ata::extractOrientation(res.coeffs, corners_3D[i1], corners_3D[i2], + corners_3D[i3], corners_3D[i4], + pose.pose.orientation); + if (succ < 0) + return -1; + + tf2::Matrix3x3 mat; + succ = ata::extractFrame(res.coeffs, corners_3D[i1], corners_3D[i2], + corners_3D[i3], corners_3D[i4], mat); + if (succ < 0) + return -1; + + drawArrow(pose.pose.position, mat, cloud.header.frame_id, 1, id); + + p.translation[0] = pose.pose.position.x * 100.0; + p.translation[1] = pose.pose.position.y * 100.0; + p.translation[2] = pose.pose.position.z * 100.0; + p.quaternion[1] = pose.pose.orientation.x; + p.quaternion[2] = pose.pose.orientation.y; + p.quaternion[3] = pose.pose.orientation.z; + p.quaternion[0] = pose.pose.orientation.w; -int main(int argc, char *argv[]) -{ - ros::init (argc, argv, "marker_detect"); - ros::NodeHandle n, pn("~"); - - if(argc > 1) { - ROS_WARN("Command line arguments are deprecated. Consider using ROS parameters and remappings."); - - if(argc < 7){ - std::cout << std::endl; - cout << "Not enough arguments provided." << endl; - cout << "Usage: ./individualMarkers " - << " [ ]"; - std::cout << std::endl; return 0; } - // Get params from command line - marker_size = atof(argv[1]); - max_new_marker_error = atof(argv[2]); - max_track_error = atof(argv[3]); - cam_image_topic = argv[4]; - cam_info_topic = argv[5]; - output_frame = argv[6]; - - if (argc > 7) { - max_frequency = atof(argv[7]); - pn.setParam("max_frequency", max_frequency); - } - if (argc > 8) - marker_resolution = atoi(argv[8]); - if (argc > 9) - marker_margin = atoi(argv[9]); - - } else { - // Get params from ros param server. - pn.param("marker_size", marker_size, 10.0); - pn.param("max_new_marker_error", max_new_marker_error, 0.08); - pn.param("max_track_error", max_track_error, 0.2); - pn.param("max_frequency", max_frequency, 8.0); - pn.setParam("max_frequency", max_frequency); // in case it was not set. - pn.param("marker_resolution", marker_resolution, 5); - pn.param("marker_margin", marker_margin, 2); - pn.param("output_frame_from_msg", output_frame_from_msg, false); - - if (!output_frame_from_msg && !pn.getParam("output_frame", output_frame)) { - ROS_ERROR("Param 'output_frame' has to be set if the output frame is not " - "derived from the point cloud message."); - exit(EXIT_FAILURE); - } - - // Camera input topics. Use remapping to map to your camera topics. - cam_image_topic = "camera_image"; - cam_info_topic = "camera_info"; - } - - // Set dynamically configurable parameters so they don't get replaced by default values - pn.setParam("marker_size", marker_size); - pn.setParam("max_new_marker_error", max_new_marker_error); - pn.setParam("max_track_error", max_track_error); - - marker_detector.SetMarkerSize(marker_size, marker_resolution, marker_margin); - - cam = new Camera(n, cam_info_topic); - - if (!output_frame_from_msg) { - // TF listener is only required when output frame != camera frame. - tf_listener = new tf::TransformListener(n); - } - tf_broadcaster = new tf::TransformBroadcaster(); - arMarkerPub_ = n.advertise < ar_track_alvar_msgs::AlvarMarkers > ("ar_pose_marker", 0); - rvizMarkerPub_ = n.advertise < visualization_msgs::Marker > ("visualization_marker", 0); - rvizMarkerPub2_ = n.advertise < visualization_msgs::Marker > ("ARmarker_points", 0); - - // Prepare dynamic reconfiguration - dynamic_reconfigure::Server < ar_track_alvar::ParamsConfig > server; - dynamic_reconfigure::Server::CallbackType f; - - f = boost::bind(&configCallback, _1, _2); - server.setCallback(f); - - //Give tf a chance to catch up before the camera callback starts asking for transforms - // It will also reconfigure parameters for the first time, setting the default values - ros::Duration(1.0).sleep(); - ros::spinOnce(); - - if (enabled == true) - { - // This always happens, as enable is true by default - ROS_INFO("Subscribing to image topic"); - cloud_sub_ = n.subscribe(cam_image_topic, 1, &getPointCloudCallback); - } - - // Run at the configured rate, discarding pointcloud msgs if necessary - ros::Rate rate(max_frequency); - - while (ros::ok()) - { - ros::spinOnce(); - rate.sleep(); - - if (std::abs((rate.expectedCycleTime() - ros::Duration(1.0 / max_frequency)).toSec()) > 0.001) - { - // Change rate dynamically; if must be above 0, as 0 will provoke a segfault on next spinOnce - ROS_DEBUG("Changing frequency from %.2f to %.2f", 1.0 / rate.expectedCycleTime().toSec(), max_frequency); - rate = ros::Rate(max_frequency); - } - - if (enableSwitched == true) - { - // Enable/disable switch: subscribe/unsubscribe to make use of pointcloud processing nodelet - // lazy publishing policy; in CPU-scarce computer as TurtleBot's laptop this is a huge saving - if (enabled == false) - cloud_sub_.shutdown(); - else - cloud_sub_ = n.subscribe(cam_image_topic, 1, &getPointCloudCallback); - - enableSwitched = false; - } - } + void GetMarkerPoses(cv::Mat * image, ARCloud &cloud) + { + + //Detect and track the markers + if (marker_detector.Detect(*image, cam, true, false, max_new_marker_error, max_track_error, CVSEQ, true)) + { + + RCLCPP_INFO(this->get_logger(), "-----------------------------------"); + for (size_t i=0; isize(); i++) + { + vector > pixels; + Marker *m = &((*marker_detector.markers)[i]); + int id = m->GetId(); + + RCLCPP_INFO(this->get_logger(), "******* ID: %d",id); + + + int resol = m->GetRes(); + int ori = m->ros_orientation; + + PointDouble pt1, pt2, pt3, pt4; + pt4 = m->ros_marker_points_img[0]; + pt3 = m->ros_marker_points_img[resol-1]; + pt1 = m->ros_marker_points_img[(resol*resol)-resol]; + pt2 = m->ros_marker_points_img[(resol*resol)-1]; + + m->ros_corners_3D[0] = cloud(pt1.x, pt1.y); + m->ros_corners_3D[1] = cloud(pt2.x, pt2.y); + m->ros_corners_3D[2] = cloud(pt3.x, pt3.y); + m->ros_corners_3D[3] = cloud(pt4.x, pt4.y); + + if(ori >= 0 && ori < 4) + { + if(ori != 0) + { + std::rotate(m->ros_corners_3D.begin(), m->ros_corners_3D.begin() + ori, m->ros_corners_3D.end()); + } + } + else + { + RCLCPP_ERROR(this->get_logger(), "FindMarkerBundles: Bad Orientation: %i for ID: %i", ori, id); + + //Get the 3D marker points + BOOST_FOREACH (const PointDouble& p, m->ros_marker_points_img) + { + pixels.push_back(cv::Point(p.x, p.y)); + } + ARCloud::Ptr selected_points = ata::filterCloud(cloud, pixels); + + //Use the kinect data to find a plane and pose for the marker + int ret = PlaneFitPoseImprovement(i, m->ros_corners_3D, selected_points, cloud, m->pose); + } + } + }} + + void getPointCloudCallback (const sensor_msgs::msg::PointCloud2::SharedPtr msg) + { + sensor_msgs::msg::Image image_msg; + + // If desired, use the frame in the message's header. + if (output_frame_from_msg) + output_frame = msg->header.frame_id; + + //If we've already gotten the cam info, then go ahead + if(cam->getCamInfo_){ + //Convert cloud to PCL + ARCloud cloud; + pcl::fromROSMsg(*msg, cloud); + //Get an OpenCV image from the cloud + pcl::toROSMsg (*msg, image_msg); + + + image_msg.header.stamp = msg->header.stamp; + image_msg.header.frame_id = msg->header.frame_id; + + + //Convert the image + cv_ptr_ = cv_bridge::toCvCopy(image_msg, sensor_msgs::image_encodings::BGR8); + + //Get the estimated pose of the main markers by using all the markers in each bundle + + // GetMultiMarkersPoses expects an IplImage*, but as of ros groovy, cv_bridge gives + // us a cv::Mat. I'm too lazy to change to cv::Mat throughout right now, so I + // do this conversion here -jbinney + cv::Mat ipl_image = cv_ptr_->image; + + + //Use the kinect to improve the pose + Pose ret_pose; + GetMarkerPoses(&ipl_image, cloud); + std::string tf_error; + geometry_msgs::msg::TransformStamped CamToOutput; + try + { + tf2::TimePoint tf2_time = tf2_ros::fromMsg(image_msg.header.stamp); + CamToOutput = tf2_->lookupTransform(output_frame, image_msg.header.frame_id,tf2_time,tf2_time - prev_stamp_); + + } catch (tf2::TransformException ex) { + RCLCPP_ERROR(this->get_logger(), "%s",ex.what()); + } + + try + { + arPoseMarkers_.markers.clear (); + for (size_t i=0; isize(); i++) + { + //Get the pose relative to the camera + int id = (*(marker_detector.markers))[i].GetId(); + Pose p = (*(marker_detector.markers))[i].pose; + + double px = p.translation[0]/100.0; + double py = p.translation[1]/100.0; + double pz = p.translation[2]/100.0; + double qx = p.quaternion[1]; + double qy = p.quaternion[2]; + double qz = p.quaternion[3]; + double qw = p.quaternion[0]; + + tf2::Quaternion rotation (qx,qy,qz,qw); + tf2::Vector3 origin (px,py,pz); + tf2::Transform t (rotation, origin); + tf2::Vector3 markerOrigin (0, 0, 0); + tf2::Transform m (tf2::Quaternion::getIdentity (), markerOrigin); + tf2::Transform markerPose = t * m; // marker pose in the camera frame + + //Publish the transform from the camera to the marker + std::string markerFrame = "ar_marker_"; + std::stringstream out; + out << id; + std::string id_string = out.str(); + markerFrame += id_string; + geometry_msgs::msg::TransformStamped camToMarker; + camToMarker.header.stamp = image_msg.header.stamp; + camToMarker.header.frame_id = image_msg.header.frame_id; + camToMarker.child_frame_id = markerFrame; + + geometry_msgs::msg::Vector3 trans; + trans.x = px; + trans.y = py; + trans.z = pz; + geometry_msgs::msg::Quaternion rot; + rot.x = qx; + rot.y = qy; + rot.z = qz; + rot.w = qw; + + + camToMarker.transform.translation = trans; + camToMarker.transform.rotation = rot; + tf_broadcaster_->sendTransform(camToMarker); + + + //Create the rviz visualization messages + rvizMarker_.pose.position.x = markerPose.getOrigin().getX(); + rvizMarker_.pose.position.y = markerPose.getOrigin().getY(); + rvizMarker_.pose.position.z = markerPose.getOrigin().getZ(); + rvizMarker_.pose.orientation.x = markerPose.getRotation().getX(); + rvizMarker_.pose.orientation.y = markerPose.getRotation().getY(); + rvizMarker_.pose.orientation.z = markerPose.getRotation().getZ(); + rvizMarker_.pose.orientation.w = markerPose.getRotation().getW(); + + rvizMarker_.header.frame_id = image_msg.header.frame_id; + rvizMarker_.header.stamp = image_msg.header.stamp; + rvizMarker_.id = id; + + rvizMarker_.scale.x = 1.0 * marker_size/100.0; + rvizMarker_.scale.y = 1.0 * marker_size/100.0; + rvizMarker_.scale.z = 0.2 * marker_size/100.0; + rvizMarker_.ns = "basic_shapes"; + rvizMarker_.type = visualization_msgs::msg::Marker::CUBE; + rvizMarker_.action = visualization_msgs::msg::Marker::ADD; + switch (id) + { + case 0: + rvizMarker_.color.r = 0.0f; + rvizMarker_.color.g = 0.0f; + rvizMarker_.color.b = 1.0f; + rvizMarker_.color.a = 1.0; + break; + case 1: + rvizMarker_.color.r = 1.0f; + rvizMarker_.color.g = 0.0f; + rvizMarker_.color.b = 0.0f; + rvizMarker_.color.a = 1.0; + break; + case 2: + rvizMarker_.color.r = 0.0f; + rvizMarker_.color.g = 1.0f; + rvizMarker_.color.b = 0.0f; + rvizMarker_.color.a = 1.0; + break; + case 3: + rvizMarker_.color.r = 0.0f; + rvizMarker_.color.g = 0.5f; + rvizMarker_.color.b = 0.5f; + rvizMarker_.color.a = 1.0; + break; + case 4: + rvizMarker_.color.r = 0.5f; + rvizMarker_.color.g = 0.5f; + rvizMarker_.color.b = 0.0; + rvizMarker_.color.a = 1.0; + break; + default: + rvizMarker_.color.r = 0.5f; + rvizMarker_.color.g = 0.0f; + rvizMarker_.color.b = 0.5f; + rvizMarker_.color.a = 1.0; + break; + } + + rvizMarker_.lifetime = rclcpp::Duration (1.0); + rvizMarkerPub_->publish (rvizMarker_); + + //Get the pose of the tag in the camera frame, then the output frame (usually torso) + + + + // tf2::Transform tagPoseOutput = CamToOutput * markerPose; + //Create the pose marker messages + ar_track_alvar_msgs::msg::AlvarMarker ar_pose_marker; + tf2::doTransform(ar_pose_marker.pose, ar_pose_marker.pose,CamToOutput); + ar_pose_marker.header.frame_id = output_frame; + ar_pose_marker.header.stamp = image_msg.header.stamp; + ar_pose_marker.id = id; + arPoseMarkers_.markers.push_back (ar_pose_marker); + } + arPoseMarkers_.header.stamp = image_msg.header.stamp; + arMarkerPub_->publish (arPoseMarkers_); + } + catch (cv_bridge::Exception& e) + { + RCLCPP_ERROR(this->get_logger(),"Could not convert from '%s' to 'rgb8'.", image_msg.encoding.c_str ()); + } + prev_stamp_ = tf2_ros::fromMsg(image_msg.header.stamp); + } + } +}; +int main(int argc, char *argv[]) +{ + rclcpp::init(argc, argv); + rclcpp::spin(std::make_shared(argc,argv)); + rclcpp::shutdown(); return 0; + } diff --git a/ar_track_alvar/nodes/IndividualMarkersNoKinect.cpp b/ar_track_alvar/nodes/IndividualMarkersNoKinect.cpp index d5a439e..3258722 100644 --- a/ar_track_alvar/nodes/IndividualMarkersNoKinect.cpp +++ b/ar_track_alvar/nodes/IndividualMarkersNoKinect.cpp @@ -35,317 +35,331 @@ */ -#include +#include #include "ar_track_alvar/CvTestbed.h" #include "ar_track_alvar/MarkerDetector.h" #include "ar_track_alvar/Shared.h" #include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include "tf2/convert.h" +#include "tf2_geometry_msgs/tf2_geometry_msgs.h" +#include "tf2/LinearMath/Transform.h" +#include "tf2_ros/buffer.h" +#include "tf2_ros/message_filter.h" +#include "tf2_ros/transform_broadcaster.h" +#include "tf2_ros/transform_listener.h" +#include "tf2_ros/create_timer_ros.h" +#include +#include using namespace alvar; using namespace std; -bool init=true; -Camera *cam; -cv_bridge::CvImagePtr cv_ptr_; -image_transport::Subscriber cam_sub_; -ros::Publisher arMarkerPub_; -ros::Publisher rvizMarkerPub_; -ar_track_alvar_msgs::AlvarMarkers arPoseMarkers_; -visualization_msgs::Marker rvizMarker_; -tf::TransformListener *tf_listener; -tf::TransformBroadcaster *tf_broadcaster; -MarkerDetector marker_detector; - -bool enableSwitched = false; -bool enabled = true; -double max_frequency; -double marker_size; -double max_new_marker_error; -double max_track_error; -std::string cam_image_topic; -std::string cam_info_topic; -std::string output_frame; -int marker_resolution = 5; // default marker resolution -int marker_margin = 2; // default marker margin - -void getCapCallback (const sensor_msgs::ImageConstPtr & image_msg); - - -void getCapCallback (const sensor_msgs::ImageConstPtr & image_msg) +class IndividualMarkersNoKinect : public rclcpp::Node { - //If we've already gotten the cam info, then go ahead - if(cam->getCamInfo_){ - try{ - tf::StampedTransform CamToOutput; - try{ - tf_listener->waitForTransform(output_frame, image_msg->header.frame_id, image_msg->header.stamp, ros::Duration(1.0)); - tf_listener->lookupTransform(output_frame, image_msg->header.frame_id, image_msg->header.stamp, CamToOutput); - } - catch (tf::TransformException ex){ - ROS_ERROR("%s",ex.what()); - } + + + private: + bool init=true; + Camera *cam; + cv_bridge::CvImagePtr cv_ptr_; + + ar_track_alvar_msgs::msg::AlvarMarkers arPoseMarkers_; + visualization_msgs::msg::Marker rvizMarker_; + + + rclcpp::Subscription::SharedPtr cam_sub_; + + + // image_transport::Subscriber cam_sub_; + + + rclcpp::Publisher::SharedPtr arMarkerPub_; + rclcpp::Publisher::SharedPtr rvizMarkerPub_; + rclcpp::Subscription::SharedPtr info_sub_; + + tf2::TimePoint prev_stamp_; + std::shared_ptr tf2_; + std::shared_ptr tf_listener_; + std::shared_ptr tf_broadcaster_; + + + MarkerDetector marker_detector; + + double parameters_set = false; + rclcpp::TimerBase::SharedPtr timer_; + double max_frequency=8.0; + double marker_size; + double max_new_marker_error; + double max_track_error; + std::string cam_image_topic; + std::string cam_info_topic; + std::string output_frame; + int marker_resolution = 5; // default marker resolution + int marker_margin = 2; // default marker margin + + + public: + IndividualMarkersNoKinect(int argc, char* argv[]) : Node("marker_detect") //, tf2_(this->get_clock()), tf_listener_(tf2_), tf_broadcaster_(this)//, it_(this) + { + rclcpp::Clock::SharedPtr clock = this->get_clock(); + tf2_ = std::make_shared(clock); + tf_listener_ = std::make_shared(*tf2_); + tf_broadcaster_ = std::make_shared(*this); + + // Get parameters. + this->declare_parameter("marker_size", 10.0); + this->declare_parameter("max_new_marker_error", 0.08); + this->declare_parameter("max_track_error", 0.2); + this->declare_parameter("max_frequency", 8.0); + this->declare_parameter("marker_resolution", 5); + this->declare_parameter("marker_margin", 2); + this->declare_parameter("output_frame", ""); - //Convert the image - cv_ptr_ = cv_bridge::toCvCopy(image_msg, sensor_msgs::image_encodings::BGR8); - - //Get the estimated pose of the main markers by using all the markers in each bundle - - // GetMultiMarkersPoses expects an IplImage*, but as of ros groovy, cv_bridge gives - // us a cv::Mat. I'm too lazy to change to cv::Mat throughout right now, so I - // do this conversion here -jbinney - IplImage ipl_image = cv_ptr_->image; - - marker_detector.Detect(&ipl_image, cam, true, false, max_new_marker_error, max_track_error, CVSEQ, true); - arPoseMarkers_.markers.clear (); - for (size_t i=0; isize(); i++) - { - //Get the pose relative to the camera - int id = (*(marker_detector.markers))[i].GetId(); - Pose p = (*(marker_detector.markers))[i].pose; - double px = p.translation[0]/100.0; - double py = p.translation[1]/100.0; - double pz = p.translation[2]/100.0; - double qx = p.quaternion[1]; - double qy = p.quaternion[2]; - double qz = p.quaternion[3]; - double qw = p.quaternion[0]; - - tf::Quaternion rotation (qx,qy,qz,qw); - tf::Vector3 origin (px,py,pz); - tf::Transform t (rotation, origin); - tf::Vector3 markerOrigin (0, 0, 0); - tf::Transform m (tf::Quaternion::getIdentity (), markerOrigin); - tf::Transform markerPose = t * m; // marker pose in the camera frame - - tf::Vector3 z_axis_cam = tf::Transform(rotation, tf::Vector3(0,0,0)) * tf::Vector3(0, 0, 1); -// ROS_INFO("%02i Z in cam frame: %f %f %f",id, z_axis_cam.x(), z_axis_cam.y(), z_axis_cam.z()); - /// as we can't see through markers, this one is false positive detection - if (z_axis_cam.z() > 0) - { - continue; - } - - //Publish the transform from the camera to the marker - std::string markerFrame = "ar_marker_"; - std::stringstream out; - out << id; - std::string id_string = out.str(); - markerFrame += id_string; - tf::StampedTransform camToMarker (t, image_msg->header.stamp, image_msg->header.frame_id, markerFrame.c_str()); - tf_broadcaster->sendTransform(camToMarker); - - //Create the rviz visualization messages - tf::poseTFToMsg (markerPose, rvizMarker_.pose); - rvizMarker_.header.frame_id = image_msg->header.frame_id; - rvizMarker_.header.stamp = image_msg->header.stamp; - rvizMarker_.id = id; - - rvizMarker_.scale.x = 1.0 * marker_size/100.0; - rvizMarker_.scale.y = 1.0 * marker_size/100.0; - rvizMarker_.scale.z = 0.2 * marker_size/100.0; - rvizMarker_.ns = "basic_shapes"; - rvizMarker_.type = visualization_msgs::Marker::CUBE; - rvizMarker_.action = visualization_msgs::Marker::ADD; - switch (id) - { - case 0: - rvizMarker_.color.r = 0.0f; - rvizMarker_.color.g = 0.0f; - rvizMarker_.color.b = 1.0f; - rvizMarker_.color.a = 1.0; - break; - case 1: - rvizMarker_.color.r = 1.0f; - rvizMarker_.color.g = 0.0f; - rvizMarker_.color.b = 0.0f; - rvizMarker_.color.a = 1.0; - break; - case 2: - rvizMarker_.color.r = 0.0f; - rvizMarker_.color.g = 1.0f; - rvizMarker_.color.b = 0.0f; - rvizMarker_.color.a = 1.0; - break; - case 3: - rvizMarker_.color.r = 0.0f; - rvizMarker_.color.g = 0.5f; - rvizMarker_.color.b = 0.5f; - rvizMarker_.color.a = 1.0; - break; - case 4: - rvizMarker_.color.r = 0.5f; - rvizMarker_.color.g = 0.5f; - rvizMarker_.color.b = 0.0; - rvizMarker_.color.a = 1.0; - break; - default: - rvizMarker_.color.r = 0.5f; - rvizMarker_.color.g = 0.0f; - rvizMarker_.color.b = 0.5f; - rvizMarker_.color.a = 1.0; - break; - } - rvizMarker_.lifetime = ros::Duration (1.0); - rvizMarkerPub_.publish (rvizMarker_); - - //Get the pose of the tag in the camera frame, then the output frame (usually torso) - tf::Transform tagPoseOutput = CamToOutput * markerPose; - - //Create the pose marker messages - ar_track_alvar_msgs::AlvarMarker ar_pose_marker; - tf::poseTFToMsg (tagPoseOutput, ar_pose_marker.pose.pose); - ar_pose_marker.header.frame_id = output_frame; - ar_pose_marker.header.stamp = image_msg->header.stamp; - ar_pose_marker.id = id; - arPoseMarkers_.markers.push_back (ar_pose_marker); - } - arMarkerPub_.publish (arPoseMarkers_); - } - catch (cv_bridge::Exception& e){ - ROS_ERROR ("Could not convert from '%s' to 'rgb8'.", image_msg->encoding.c_str ()); - } - } -} -void configCallback(ar_track_alvar::ParamsConfig &config, uint32_t level) -{ - ROS_INFO("AR tracker reconfigured: %s %.2f %.2f %.2f %.2f", config.enabled ? "ENABLED" : "DISABLED", - config.max_frequency, config.marker_size, config.max_new_marker_error, config.max_track_error); + // Camera input topics. Use remapping to map to your camera topics. + cam_image_topic = "camera_image"; + cam_info_topic = "camera_info"; - enableSwitched = enabled != config.enabled; + marker_detector.SetMarkerSize(marker_size, marker_resolution, marker_margin); + cam = new Camera(); + + prev_stamp_ = tf2::get_now(); - enabled = config.enabled; - max_frequency = config.max_frequency; - marker_size = config.marker_size; - max_new_marker_error = config.max_new_marker_error; - max_track_error = config.max_track_error; -} + //Give tf a chance to catch up before the camera callback starts asking for transforms + // It will also reconfigure parameters for the first time, setting the default values + //TODO: come back to this, there's probably a better way to do this + rclcpp::Rate loop_rate(100); + loop_rate.sleep(); -void enableCallback(const std_msgs::BoolConstPtr& msg) -{ - enableSwitched = enabled != msg->data; - enabled = msg->data; -} + arMarkerPub_ = this->create_publisher ("ar_pose_marker", 0); + rvizMarkerPub_ = this->create_publisher ("visualization_marker", 0); -int main(int argc, char *argv[]) -{ - ros::init (argc, argv, "marker_detect"); - ros::NodeHandle n, pn("~"); - - if(argc > 1) { - ROS_WARN("Command line arguments are deprecated. Consider using ROS parameters and remappings."); - - if(argc < 7){ - std::cout << std::endl; - cout << "Not enough arguments provided." << endl; - cout << "Usage: ./individualMarkersNoKinect " - << " [ ]"; - std::cout << std::endl; - return 0; - } + cam_sub_ = this->create_subscription(cam_image_topic, 1, + std::bind(&IndividualMarkersNoKinect::getCapCallback, this, std::placeholders::_1)); - // Get params from command line - marker_size = atof(argv[1]); - max_new_marker_error = atof(argv[2]); - max_track_error = atof(argv[3]); - cam_image_topic = argv[4]; - cam_info_topic = argv[5]; - output_frame = argv[6]; - - if (argc > 7) { - max_frequency = atof(argv[7]); - pn.setParam("max_frequency", max_frequency); - } - if (argc > 8) - marker_resolution = atoi(argv[8]); - if (argc > 9) - marker_margin = atoi(argv[9]); - - } else { - // Get params from ros param server. - pn.param("marker_size", marker_size, 10.0); - pn.param("max_new_marker_error", max_new_marker_error, 0.08); - pn.param("max_track_error", max_track_error, 0.2); - pn.param("max_frequency", max_frequency, 8.0); - pn.setParam("max_frequency", max_frequency); // in case it was not set. - pn.param("marker_resolution", marker_resolution, 5); - pn.param("marker_margin", marker_margin, 2); - if (!pn.getParam("output_frame", output_frame)) { - ROS_ERROR("Param 'output_frame' has to be set."); - exit(EXIT_FAILURE); - } - // Camera input topics. Use remapping to map to your camera topics. - cam_image_topic = "camera_image"; - cam_info_topic = "camera_info"; - } + RCLCPP_INFO(this->get_logger(),"Subscribing to info topic"); + info_sub_ = this->create_subscription(cam_info_topic, 1, std::bind(&IndividualMarkersNoKinect::InfoCallback, this, std::placeholders::_1)); - // Set dynamically configurable parameters so they don't get replaced by default values - pn.setParam("marker_size", marker_size); - pn.setParam("max_new_marker_error", max_new_marker_error); - pn.setParam("max_track_error", max_track_error); + timer_ = this->create_wall_timer(1000ms, std::bind(&IndividualMarkersNoKinect::set_parameters, this)); - marker_detector.SetMarkerSize(marker_size, marker_resolution, marker_margin); + } - cam = new Camera(n, cam_info_topic); - tf_listener = new tf::TransformListener(n); - tf_broadcaster = new tf::TransformBroadcaster(); - arMarkerPub_ = n.advertise < ar_track_alvar_msgs::AlvarMarkers > ("ar_pose_marker", 0); - rvizMarkerPub_ = n.advertise < visualization_msgs::Marker > ("visualization_marker", 0); - // Prepare dynamic reconfiguration - dynamic_reconfigure::Server < ar_track_alvar::ParamsConfig > server; - dynamic_reconfigure::Server::CallbackType f; + void InfoCallback (const sensor_msgs::msg::CameraInfo::SharedPtr cam_info) + { + if (!cam->getCamInfo_) + { + cam->SetCameraInfo(cam_info); + cam->getCamInfo_ = true; + info_sub_.reset(); + } + } - f = boost::bind(&configCallback, _1, _2); - server.setCallback(f); - //Give tf a chance to catch up before the camera callback starts asking for transforms - // It will also reconfigure parameters for the first time, setting the default values - ros::Duration(1.0).sleep(); - ros::spinOnce(); + void set_parameters() + { + if (output_frame.empty()) + { + RCLCPP_INFO(this->get_logger(), "Param 'output_frame' has to be set. output frame %s",output_frame.c_str()); + } + else + { + parameters_set=true; + } + + this->get_parameter("marker_size", marker_size); + this->get_parameter("max_new_marker_error", max_new_marker_error); + this->get_parameter("max_track_error", max_track_error); + this->get_parameter("max_frequency", max_frequency); + this->get_parameter("marker_resolution", marker_resolution); + this->get_parameter("marker_margin", marker_margin); + this->get_parameter("output_frame", output_frame); + } - image_transport::ImageTransport it_(n); - // Run at the configured rate, discarding pointcloud msgs if necessary - ros::Rate rate(max_frequency); + void getCapCallback (const sensor_msgs::msg::Image::SharedPtr image_msg) + //void getCapCallback(const sensor_msgs::msg::Image::ConstSharedPtr& image_msg) + { + std::string tf_error; + + //If we've already gotten the cam info, then go ahead + if(cam->getCamInfo_ && parameters_set){ + try + { + geometry_msgs::msg::TransformStamped CamToOutput; + try + { + tf2::TimePoint tf2_time = tf2_ros::fromMsg(image_msg->header.stamp); + CamToOutput = tf2_->lookupTransform(output_frame, image_msg->header.frame_id,tf2_time,tf2_time - prev_stamp_); + } + catch (tf2::TransformException ex){ + RCLCPP_ERROR(this->get_logger(), "%s",ex.what()); + tf_error.clear(); + } - /// Subscriber for enable-topic so that a user can turn off the detection if it is not used without - /// having to use the reconfigure where he has to know all parameters - ros::Subscriber enable_sub_ = pn.subscribe("enable_detection", 1, &enableCallback); + //Convert the image + cv_ptr_ = cv_bridge::toCvCopy(image_msg, sensor_msgs::image_encodings::BGR8); + + //Get the estimated pose of the main markers by using all the markers in each bundle + // GetMultiMarkersPoses expects an IplImage*, but as of ros groovy, cv_bridge gives + // us a cv::Mat. I'm too lazy to change to cv::Mat throughout right now, so I + // do this conversion here -jbinney + + cv::Mat ipl_image = cv_ptr_->image; + + marker_detector.Detect(cv_ptr_->image, cam, true, false, + max_new_marker_error, max_track_error, CVSEQ, + true); + arPoseMarkers_.markers.clear(); + + for (size_t i = 0; i < marker_detector.markers->size(); i++) + { + // Get the pose relative to the camera + int id = (*(marker_detector.markers))[i].GetId(); + Pose p = (*(marker_detector.markers))[i].pose; + double px = p.translation[0] / 100.0; + double py = p.translation[1] / 100.0; + double pz = p.translation[2] / 100.0; + + cv::Mat quat =cv::Mat(4, 1, CV_64F); + p.GetQuaternion(quat); + double qx = quat.at(1,0); //p.quaternion[1]; #leaving these for the record, this was a bug in the original repo + double qy = quat.at(2,0); //p.quaternion[2]; + double qz = quat.at(3,0); //p.quaternion[3]; + double qw = quat.at(0,0); //p.quaternion[0]; + tf2::Quaternion rotation (qx,qy,qz,qw); + tf2::Vector3 origin (px,py,pz); + tf2::Transform t (rotation, origin); + tf2::Vector3 markerOrigin (0, 0, 0); + tf2::Transform m (tf2::Quaternion::getIdentity (), markerOrigin); + tf2::Transform markerPose = t * m; // marker pose in the camera frame + + + tf2::Vector3 z_axis_cam = tf2::Transform(rotation, tf2::Vector3(0,0,0)) * tf2::Vector3(0, 0, 1); + + /// as we can't see through markers, this one is false positive detection + if (z_axis_cam.z() > 0) + { + continue; + } + + //Publish the transform from the camera to the marker + std::string markerFrame = "ar_marker_"; + std::stringstream out; + out << id; + std::string id_string = out.str(); + markerFrame += id_string; + + geometry_msgs::msg::TransformStamped camToMarker; + camToMarker.header.stamp = image_msg->header.stamp; + camToMarker.header.frame_id = image_msg->header.frame_id; + camToMarker.child_frame_id = markerFrame; + + geometry_msgs::msg::Vector3 trans; + trans.x = px; + trans.y = py; + trans.z = pz; + geometry_msgs::msg::Quaternion rot; + rot.x = qx; + rot.y = qy; + rot.z = qz; + rot.w = qw; + + camToMarker.transform.translation = trans; + camToMarker.transform.rotation = rot; + tf_broadcaster_->sendTransform(camToMarker); + + //Create the rviz visualization messages + rvizMarker_.pose.position.x = markerPose.getOrigin().getX(); + rvizMarker_.pose.position.y = markerPose.getOrigin().getY(); + rvizMarker_.pose.position.z = markerPose.getOrigin().getZ(); + rvizMarker_.pose.orientation.x = markerPose.getRotation().getX(); + rvizMarker_.pose.orientation.y = markerPose.getRotation().getY(); + rvizMarker_.pose.orientation.z = markerPose.getRotation().getZ(); + rvizMarker_.pose.orientation.w = markerPose.getRotation().getW(); + rvizMarker_.header.frame_id = image_msg->header.frame_id; + rvizMarker_.header.stamp = image_msg->header.stamp; + rvizMarker_.id = id; + + rvizMarker_.scale.x = 1.0 * marker_size/100.0; + rvizMarker_.scale.y = 1.0 * marker_size/100.0; + rvizMarker_.scale.z = 0.2 * marker_size/100.0; + rvizMarker_.ns = "basic_shapes"; + rvizMarker_.type = visualization_msgs::msg::Marker::CUBE; + rvizMarker_.action = visualization_msgs::msg::Marker::ADD; + switch (id) + { + case 0: + rvizMarker_.color.r = 0.0f; + rvizMarker_.color.g = 0.0f; + rvizMarker_.color.b = 1.0f; + rvizMarker_.color.a = 1.0; + break; + case 1: + rvizMarker_.color.r = 1.0f; + rvizMarker_.color.g = 0.0f; + rvizMarker_.color.b = 0.0f; + rvizMarker_.color.a = 1.0; + break; + case 2: + rvizMarker_.color.r = 0.0f; + rvizMarker_.color.g = 1.0f; + rvizMarker_.color.b = 0.0f; + rvizMarker_.color.a = 1.0; + break; + case 3: + rvizMarker_.color.r = 0.0f; + rvizMarker_.color.g = 0.5f; + rvizMarker_.color.b = 0.5f; + rvizMarker_.color.a = 1.0; + break; + case 4: + rvizMarker_.color.r = 0.5f; + rvizMarker_.color.g = 0.5f; + rvizMarker_.color.b = 0.0; + rvizMarker_.color.a = 1.0; + break; + default: + rvizMarker_.color.r = 0.5f; + rvizMarker_.color.g = 0.0f; + rvizMarker_.color.b = 0.5f; + rvizMarker_.color.a = 1.0; + break; + } + rvizMarker_.lifetime = rclcpp::Duration (1.0); + rvizMarkerPub_->publish (rvizMarker_); + + //Create the pose marker messages + ar_track_alvar_msgs::msg::AlvarMarker ar_pose_marker; + //Get the pose of the tag in the camera frame, then the output frame (usually torso) + tf2::doTransform(ar_pose_marker.pose, ar_pose_marker.pose,CamToOutput); + ar_pose_marker.header.frame_id = output_frame; + ar_pose_marker.header.stamp = image_msg->header.stamp; + ar_pose_marker.id = id; + arPoseMarkers_.markers.push_back (ar_pose_marker); + } + arMarkerPub_->publish (arPoseMarkers_); + } + catch (cv_bridge::Exception& e) + { + RCLCPP_ERROR(this->get_logger(),"Could not convert from '%s' to 'rgb8'.", image_msg->encoding.c_str ()); + } + prev_stamp_ = tf2_ros::fromMsg(image_msg->header.stamp); + } + } - enableSwitched = true; - while (ros::ok()) - { - ros::spinOnce(); - rate.sleep(); +}; - if (std::abs((rate.expectedCycleTime() - ros::Duration(1.0 / max_frequency)).toSec()) > 0.001) - { - // Change rate dynamically; if must be above 0, as 0 will provoke a segfault on next spinOnce - ROS_DEBUG("Changing frequency from %.2f to %.2f", 1.0 / rate.expectedCycleTime().toSec(), max_frequency); - rate = ros::Rate(max_frequency); - } +int main(int argc, char *argv[]) +{ - if (enableSwitched) - { - // Enable/disable switch: subscribe/unsubscribe to make use of pointcloud processing nodelet - // lazy publishing policy; in CPU-scarce computer as TurtleBot's laptop this is a huge saving - if (enabled) - cam_sub_ = it_.subscribe(cam_image_topic, 1, &getCapCallback); - else - cam_sub_.shutdown(); - enableSwitched = false; - } - } + rclcpp::init(argc, argv); + rclcpp::spin(std::make_shared(argc,argv)); + rclcpp::shutdown(); + return 0; - return 0; } diff --git a/ar_track_alvar/nodes/TrainMarkerBundle.cpp b/ar_track_alvar/nodes/TrainMarkerBundle.cpp index ac15a41..7b3b1e8 100644 --- a/ar_track_alvar/nodes/TrainMarkerBundle.cpp +++ b/ar_track_alvar/nodes/TrainMarkerBundle.cpp @@ -41,12 +41,17 @@ #include "ar_track_alvar/MultiMarkerInitializer.h" #include "ar_track_alvar/Shared.h" #include -#include -#include -#include -#include -#include +#include +#include +#include "tf2_ros/buffer.h" +#include "tf2_ros/transform_listener.h" +#include +#include +#include +#include "tf2_ros/create_timer_ros.h" +#include #include +#include using namespace alvar; using namespace std; @@ -55,360 +60,438 @@ using namespace std; #define VISIBLE_MARKER 2 #define GHOST_MARKER 3 -Camera *cam; -cv_bridge::CvImagePtr cv_ptr_; -image_transport::Subscriber cam_sub_; -ros::Publisher arMarkerPub_; -ros::Publisher rvizMarkerPub_; -ar_track_alvar_msgs::AlvarMarkers arPoseMarkers_; -tf::TransformListener *tf_listener; -tf::TransformBroadcaster *tf_broadcaster; -MarkerDetector marker_detector; -MultiMarkerInitializer *multi_marker_init=NULL; -MultiMarkerBundle *multi_marker_bundle=NULL; -int auto_count; -bool auto_collect; - -bool init=true; -bool add_measurement=false; -bool optimize = false; -bool optimize_done = false; - -double marker_size; -double max_new_marker_error; -double max_track_error; -std::string cam_image_topic; -std::string cam_info_topic; -std::string output_frame; -int nof_markers; - -double GetMultiMarkerPose(IplImage *image, Pose &pose); -void getCapCallback (const sensor_msgs::ImageConstPtr & image_msg); -int keyCallback(int key); -void makeMarkerMsgs(int type, int id, Pose &p, sensor_msgs::ImageConstPtr image_msg, tf::StampedTransform &CamToOutput, visualization_msgs::Marker *rvizMarker, ar_track_alvar_msgs::AlvarMarker *ar_pose_marker); - - -double GetMultiMarkerPose(IplImage *image, Pose &pose) { - static bool init=true; - - if (init) { - init=false; - vector id_vector; - for(int i = 0; i < nof_markers; ++i) - id_vector.push_back(i); - // We make the initialization for MultiMarkerBundle using MultiMarkerInitializer - // Each marker needs to be visible in at least two images and at most 64 image are used. - multi_marker_init = new MultiMarkerInitializer(id_vector, 2, 64); - pose.Reset(); - multi_marker_init->PointCloudAdd(id_vector[0], marker_size, pose); - multi_marker_bundle = new MultiMarkerBundle(id_vector); - marker_detector.SetMarkerSize(marker_size); - } - - double error = -1; - if (!optimize_done) { - if (marker_detector.Detect(image, cam, true, false, max_new_marker_error, max_track_error, CVSEQ, true)) { - error = multi_marker_init->Update(marker_detector.markers, cam, pose); - } - } - else { - if (marker_detector.Detect(image, cam, true, false, max_new_marker_error, max_track_error, CVSEQ, true)) { - error = multi_marker_bundle->Update(marker_detector.markers, cam, pose); - if ((multi_marker_bundle->SetTrackMarkers(marker_detector, cam, pose, image) > 0) && (marker_detector.DetectAdditional(image, cam, false) > 0)) - { - error = multi_marker_bundle->Update(marker_detector.markers, cam, pose); - } - } - } - - if (add_measurement) { - cout << "Markers seen: " << marker_detector.markers->size() << "\n"; - if (marker_detector.markers->size() >= 2) { - cout<<"Adding measurement..."<MeasurementsAdd(marker_detector.markers); - } - else{ - cout << "Not enough markers to capture measurement\n"; - } - add_measurement=false; - } +class TrainMarkerBundle : public rclcpp::Node +{ + private: + Camera *cam; + cv_bridge::CvImagePtr cv_ptr_; + image_transport::Subscriber cam_sub_; + rclcpp::Publisher::SharedPtr arMarkerPub_; + rclcpp::Publisher::SharedPtr rvizMarkerPub_; + + + tf2::TimePoint prev_stamp_; + std::shared_ptr tf2_; + std::shared_ptr tf_listener_; + std::shared_ptr tf_broadcaster_; + + + MarkerDetector marker_detector; + MultiMarkerInitializer *multi_marker_init=NULL; + MultiMarkerBundle *multi_marker_bundle=NULL; + + ar_track_alvar_msgs::msg::AlvarMarkers arPoseMarkers_; + visualization_msgs::msg::Marker rvizMarker_; + rclcpp::Subscription::SharedPtr info_sub_; + + + int auto_count; + bool auto_collect; + bool init=true; + bool add_measurement=false; + bool optimize = false; + bool optimize_done = false; + + double marker_size; + double max_new_marker_error; + double max_track_error; + std::string cam_image_topic; + std::string cam_info_topic; + std::string output_frame; + int nof_markers; + + + public: + TrainMarkerBundle(int argc, char* argv[]):Node("marker_detect") + { + + rclcpp::Clock::SharedPtr clock = this->get_clock(); + tf2_ = std::make_shared(clock); + tf_listener_ = std::make_shared(*tf2_); + tf_broadcaster_ = std::make_shared(*this); + prev_stamp_ = tf2::get_now(); + + bool testing = false; + if(argc < 8){ + std::cout << std::endl; + cout << "Not enough arguments provided." << endl; + cout << "Usage: ./trainMarkerBundle " << endl; + std::cout << std::endl; + exit(0); + } + else if(argc>8){ + testing=true; + } - if (optimize) { - cout<<"Initializing..."<Initialize(cam)) { - cout<<"Initialization failed, add some more measurements."<Reset(); - multi_marker_bundle->MeasurementsReset(); - // Copy all measurements into the bundle adjuster. - for (int i = 0; i < multi_marker_init->getMeasurementCount(); ++i) { - Pose p2; - multi_marker_init->getMeasurementPose(i, cam, p2); - const std::vector > markers = multi_marker_init->getMeasurementMarkers(i); - multi_marker_bundle->MeasurementsAdd(&markers, p2); - } - // Initialize the bundle adjuster with initial marker poses. - multi_marker_bundle->PointCloudCopy(multi_marker_init); - cout<<"Optimizing..."<Optimize(cam, 0.01, 20)) { - cout<<"Optimizing done"<header.stamp, image_msg->header.frame_id, markerFrame.c_str()); - tf_broadcaster->sendTransform(camToMarker); - } + if(!testing) + cv::namedWindow("Command input window", cv::WINDOW_AUTOSIZE); + //Give tf a chance to catch up before the camera callback starts asking for transforms + // It will also reconfigure parameters for the first time, setting the default values + //TODO: come back to this, there's probably a better way to do this + rclcpp::Rate loop_rate(100); + loop_rate.sleep(); - //Create the rviz visualization message - tf::poseTFToMsg (markerPose, rvizMarker->pose); - rvizMarker->header.frame_id = image_msg->header.frame_id; - rvizMarker->header.stamp = image_msg->header.stamp; - rvizMarker->id = id; - - rvizMarker->scale.x = 1.0 * marker_size/100.0; - rvizMarker->scale.y = 1.0 * marker_size/100.0; - rvizMarker->scale.z = 0.2 * marker_size/100.0; - rvizMarker->ns = "basic_shapes"; - rvizMarker->type = visualization_msgs::Marker::CUBE; - rvizMarker->action = visualization_msgs::Marker::ADD; - - //Determine a color and opacity, based on marker type - if(type==MAIN_MARKER){ - rvizMarker->color.r = 1.0f; - rvizMarker->color.g = 0.0f; - rvizMarker->color.b = 0.0f; - rvizMarker->color.a = 1.0; - } - else if(type==VISIBLE_MARKER){ - rvizMarker->color.r = 0.0f; - rvizMarker->color.g = 1.0f; - rvizMarker->color.b = 0.0f; - rvizMarker->color.a = 1.0; - } - else if(type==GHOST_MARKER){ - rvizMarker->color.r = 0.0f; - rvizMarker->color.g = 0.0f; - rvizMarker->color.b = 1.0f; - rvizMarker->color.a = 0.5; - } - rvizMarker->lifetime = ros::Duration (1.0); + arMarkerPub_ = this->create_publisher ("ar_pose_marker", 0); - //Get the pose of the tag in the camera frame, then convert to the output frame (usually torso) - tf::Transform tagPoseOutput = CamToOutput * markerPose; + rvizMarkerPub_ = this->create_publisher ("visualization_marker", 0); - //Create the pose marker message - tf::poseTFToMsg (tagPoseOutput, ar_pose_marker->pose.pose); - ar_pose_marker->header.frame_id = output_frame; - ar_pose_marker->header.stamp = image_msg->header.stamp; - ar_pose_marker->id = id; -} + //Subscribe to camera message + RCLCPP_INFO(this->get_logger(),"Subscribing to image topic"); + cam_sub_ = image_transport::create_subscription(this, cam_image_topic, [&](auto& msg) { this->getCapCallback(msg); },"raw"); -void getCapCallback (const sensor_msgs::ImageConstPtr & image_msg) -{ - //Check if automatic measurement collection should be triggered - if(auto_collect){ - auto_count++; - add_measurement = true; - if(auto_count >= 5) - auto_collect = false; - } + RCLCPP_INFO(this->get_logger(),"Subscribing to info topic"); + info_sub_ = this->create_subscription(cam_info_topic, 1, std::bind(&TrainMarkerBundle::InfoCallback, this, std::placeholders::_1)); + + } + + void InfoCallback (const sensor_msgs::msg::CameraInfo::SharedPtr cam_info) + { + RCLCPP_INFO(this->get_logger(),"this executed"); + if (!cam->getCamInfo_) + { + cam->SetCameraInfo(cam_info); + cam->getCamInfo_ = true; + info_sub_.reset(); + } + } - //If we've already gotten the cam info, then go ahead - if(cam->getCamInfo_){ - try{ - //Get the transformation from the Camera to the output frame for this image capture - tf::StampedTransform CamToOutput; - try{ - tf_listener->waitForTransform(image_msg->header.frame_id, output_frame, image_msg->header.stamp, ros::Duration(1.0)); - tf_listener->lookupTransform(image_msg->header.frame_id, output_frame, image_msg->header.stamp, CamToOutput); - } - catch (tf::TransformException ex){ - ROS_ERROR("%s",ex.what()); - } - - visualization_msgs::Marker rvizMarker; - ar_track_alvar_msgs::AlvarMarker ar_pose_marker; - arPoseMarkers_.markers.clear (); - - //Convert the image - cv_ptr_ = cv_bridge::toCvCopy(image_msg, sensor_msgs::image_encodings::BGR8); - - //Get the estimated pose of the main markers by using all the markers in each bundle - - // GetMultiMarkersPoses expects an IplImage*, but as of ros groovy, cv_bridge gives - // us a cv::Mat. I'm too lazy to change to cv::Mat throughout right now, so I - // do this conversion here -jbinney - IplImage ipl_image = cv_ptr_->image; - - //Get the estimated pose of the main marker using the whole bundle - static Pose bundlePose; - double error = GetMultiMarkerPose(&ipl_image, bundlePose); - - if (optimize_done){ - //Draw the main marker - makeMarkerMsgs(MAIN_MARKER, 0, bundlePose, image_msg, CamToOutput, &rvizMarker, &ar_pose_marker); - rvizMarkerPub_.publish (rvizMarker); - arPoseMarkers_.markers.push_back (ar_pose_marker); + void getCapCallback (const sensor_msgs::msg::Image::ConstSharedPtr& image_msg) + { + //Check if automatic measurement collection should be triggered + if(auto_collect){ + auto_count++; + add_measurement = true; + if(auto_count >= 5) + auto_collect = false; } - //Now grab the poses of the other markers that are visible - for (size_t i=0; isize(); i++) + + //If we've already gotten the cam info, then go ahead + if(cam->getCamInfo_){ + try{ + //Get the transformation from the Camera to the output frame for this image capture + geometry_msgs::msg::TransformStamped CamToOutput; + try{ + tf2::TimePoint tf2_time = tf2_ros::fromMsg(image_msg->header.stamp); + CamToOutput = tf2_->lookupTransform(output_frame, image_msg->header.frame_id,tf2_time,tf2_time - prev_stamp_); + } + catch (tf2::TransformException ex){ + RCLCPP_ERROR(rclcpp::get_logger("ArTrackAlvar"), "%s",ex.what()); + } + + visualization_msgs::msg::Marker rvizMarker; + ar_track_alvar_msgs::msg::AlvarMarker ar_pose_marker; + arPoseMarkers_.markers.clear (); + + //Convert the image + cv_ptr_ = cv_bridge::toCvCopy(image_msg, sensor_msgs::image_encodings::BGR8); + + //Get the estimated pose of the main markers by using all the markers in each bundle + + // GetMultiMarkersPoses expects an cv::Mat*, but as of ros groovy, cv_bridge gives + // us a cv::Mat. I'm too lazy to change to cv::Mat throughout right now, so I + // do this conversion here -jbinney + cv::Mat ipl_image = cv_ptr_->image; + + //Get the estimated pose of the main marker using the whole bundle + static Pose bundlePose; + double error = GetMultiMarkerPose(&ipl_image, bundlePose); + + if (optimize_done){ + //Draw the main marker + makeMarkerMsgs(MAIN_MARKER, 0, bundlePose, image_msg, CamToOutput, &rvizMarker, &ar_pose_marker); + rvizMarkerPub_->publish (rvizMarker); + arPoseMarkers_.markers.push_back (ar_pose_marker); + } + //Now grab the poses of the other markers that are visible + for (size_t i=0; isize(); i++) + { + int id = (*(marker_detector.markers))[i].GetId(); + + //Don't do the main marker (id=0) if we've already drawn it + if(id > 0 || ((!optimize_done) && id==0)){ + Pose p = (*(marker_detector.markers))[i].pose; + makeMarkerMsgs(VISIBLE_MARKER, id, p, image_msg, CamToOutput, &rvizMarker, &ar_pose_marker); + rvizMarkerPub_->publish (rvizMarker); + arPoseMarkers_.markers.push_back (ar_pose_marker); + } + } + arMarkerPub_->publish (arPoseMarkers_); + } + catch (cv_bridge::Exception& e){ + RCLCPP_ERROR (this->get_logger(),"Could not convert from '%s' to 'rgb8'.", image_msg->encoding.c_str ()); + } + prev_stamp_ = tf2_ros::fromMsg(image_msg->header.stamp); + } + + //Sleep if we are auto collecting + if(auto_collect) + usleep(1000000); + } + + double GetMultiMarkerPose(cv::Mat *image, Pose &pose) + { + static bool init=true; + + if (init) { + init=false; + vector id_vector; + for(int i = 0; i < nof_markers; ++i) + id_vector.push_back(i); + // We make the initialization for MultiMarkerBundle using MultiMarkerInitializer + // Each marker needs to be visible in at least two images and at most 64 image are used. + multi_marker_init = new MultiMarkerInitializer(id_vector, 2, 64); + pose.Reset(); + multi_marker_init->PointCloudAdd(id_vector[0], marker_size, pose); + multi_marker_bundle = new MultiMarkerBundle(id_vector); + marker_detector.SetMarkerSize(marker_size); + } + + double error = -1; + if (!optimize_done) { + if (marker_detector.Detect(*image, cam, true, false, max_new_marker_error, max_track_error, CVSEQ, true)) { + error = multi_marker_init->Update(marker_detector.markers, cam, pose); + } + } + else { + if (marker_detector.Detect(*image, cam, true, false, max_new_marker_error, max_track_error, CVSEQ, true)) { + error = multi_marker_bundle->Update(marker_detector.markers, cam, pose); + if ((multi_marker_bundle->SetTrackMarkers(marker_detector, cam, pose, *image) > 0) && (marker_detector.DetectAdditional(*image, cam, false) > 0)) + { + error = multi_marker_bundle->Update(marker_detector.markers, cam, pose); + } + } + } + + if (add_measurement) { + cout << "Markers seen: " << marker_detector.markers->size() << "\n"; + if (marker_detector.markers->size() >= 2) { + cout<<"Adding measurement..."<MeasurementsAdd(marker_detector.markers); + } + else{ + cout << "Not enough markers to capture measurement\n"; + } + add_measurement=false; + } + + if (optimize) { + cout<<"Initializing..."<Initialize(cam)) { + cout<<"Initialization failed, add some more measurements."<Reset(); + multi_marker_bundle->MeasurementsReset(); + // Copy all measurements into the bundle adjuster. + for (int i = 0; i < multi_marker_init->getMeasurementCount(); ++i) { + Pose p2; + multi_marker_init->getMeasurementPose(i, cam, p2); + const std::vector > markers = multi_marker_init->getMeasurementMarkers(i); + multi_marker_bundle->MeasurementsAdd(&markers, p2); + } + // Initialize the bundle adjuster with initial marker poses. + multi_marker_bundle->PointCloudCopy(multi_marker_init); + cout<<"Optimizing..."<Optimize(cam, 0.01, 20)) { + cout<<"Optimizing done"<header.stamp; + camToMarker.header.frame_id = image_msg->header.frame_id; + camToMarker.child_frame_id = markerFrame; + + geometry_msgs::msg::Vector3 trans; + trans.x = px; + trans.y = py; + trans.z = pz; + geometry_msgs::msg::Quaternion rot; + rot.x = qx; + rot.y = qy; + rot.z = qz; + rot.w = qw; + + camToMarker.transform.translation = trans; + camToMarker.transform.rotation = rot; + tf_broadcaster_->sendTransform(camToMarker); + } + + //Create the rviz visualization message + rvizMarker_.pose.position.x = markerPose.getOrigin().getX(); + rvizMarker_.pose.position.y = markerPose.getOrigin().getY(); + rvizMarker_.pose.position.z = markerPose.getOrigin().getZ(); + rvizMarker_.pose.orientation.x = markerPose.getRotation().getX(); + rvizMarker_.pose.orientation.y = markerPose.getRotation().getY(); + rvizMarker_.pose.orientation.z = markerPose.getRotation().getZ(); + rvizMarker_.pose.orientation.w = markerPose.getRotation().getW(); + rvizMarker->header.frame_id = image_msg->header.frame_id; + rvizMarker->header.stamp = image_msg->header.stamp; + rvizMarker->id = id; + + rvizMarker->scale.x = 1.0 * marker_size/100.0; + rvizMarker->scale.y = 1.0 * marker_size/100.0; + rvizMarker->scale.z = 0.2 * marker_size/100.0; + rvizMarker->ns = "basic_shapes"; + rvizMarker->type = visualization_msgs::msg::Marker::CUBE; + rvizMarker->action = visualization_msgs::msg::Marker::ADD; + + //Determine a color and opacity, based on marker type + if(type==MAIN_MARKER){ + rvizMarker->color.r = 1.0f; + rvizMarker->color.g = 0.0f; + rvizMarker->color.b = 0.0f; + rvizMarker->color.a = 1.0; + } + else if(type==VISIBLE_MARKER){ + rvizMarker->color.r = 0.0f; + rvizMarker->color.g = 1.0f; + rvizMarker->color.b = 0.0f; + rvizMarker->color.a = 1.0; + } + else if(type==GHOST_MARKER){ + rvizMarker->color.r = 0.0f; + rvizMarker->color.g = 0.0f; + rvizMarker->color.b = 1.0f; + rvizMarker->color.a = 0.5; + } + + rvizMarker->lifetime = rclcpp::Duration (1.0); + + + //Create the pose marker messages + //ar_track_alvar_msgs::msg::AlvarMarker ar_pose_marker; + //Get the pose of the tag in the camera frame, then the output frame (usually torso) + tf2::doTransform(ar_pose_marker->pose, ar_pose_marker->pose,CamToOutput); + + + //Get the pose of the tag in the camera frame, then convert to the output frame (usually torso) + //tf2::Transform tagPoseOutput = CamToOutput * markerPose; + + //Create the pose marker message + // tf::poseTFToMsg (tagPoseOutput, ar_pose_marker->pose.pose); + ar_pose_marker->header.frame_id = output_frame; + ar_pose_marker->header.stamp = image_msg->header.stamp; + ar_pose_marker->id = id; + } + + //Do something based on keystrokes from menu + int keyProcess(int key) + { + if(key == 'r') + { + cout<<"Reseting multi marker"<Reset(); + multi_marker_init->MeasurementsReset(); + multi_marker_bundle->Reset(); + multi_marker_bundle->MeasurementsReset(); + add_measurement = false; + optimize = false; + optimize_done = false; + } + else if(key == 'l') + { + if(multi_marker_bundle->Load("mmarker.xml", FILE_FORMAT_XML)) + { + cout<<"Multi marker loaded"<PointCloudCopy(multi_marker_bundle); + optimize_done = true; + } + else + cout<<"Cannot load multi marker"<Save("mmarker.xml", FILE_FORMAT_XML)) + cout<<"Multi marker saved"< 0 || ((!optimize_done) && id==0)){ - Pose p = (*(marker_detector.markers))[i].pose; - makeMarkerMsgs(VISIBLE_MARKER, id, p, image_msg, CamToOutput, &rvizMarker, &ar_pose_marker); - rvizMarkerPub_.publish (rvizMarker); - arPoseMarkers_.markers.push_back (ar_pose_marker); - } + add_measurement=true; } - arMarkerPub_.publish (arPoseMarkers_); + else if(key == 'a') + { + auto_count = 0; + auto_collect = true; + } + else if(key == 'o') + { + optimize=true; + } + else if(key == 'q') + { + exit(0); + } + else return key; + + return 0; } - catch (cv_bridge::Exception& e){ - ROS_ERROR ("Could not convert from '%s' to 'rgb8'.", image_msg->encoding.c_str ()); - } - } - //Sleep if we are auto collecting - if(auto_collect) - usleep(1000000); -} +}; -//Do something based on keystrokes from menu -int keyProcess(int key) -{ - if(key == 'r') - { - cout<<"Reseting multi marker"<Reset(); - multi_marker_init->MeasurementsReset(); - multi_marker_bundle->Reset(); - multi_marker_bundle->MeasurementsReset(); - add_measurement = false; - optimize = false; - optimize_done = false; - } - else if(key == 'l') - { - if(multi_marker_bundle->Load("mmarker.xml", FILE_FORMAT_XML)) - { - cout<<"Multi marker loaded"<PointCloudCopy(multi_marker_bundle); - optimize_done = true; - } - else - cout<<"Cannot load multi marker"<Save("mmarker.xml", FILE_FORMAT_XML)) - cout<<"Multi marker saved"< " << endl; - std::cout << std::endl; - return 0; - } + rclcpp::init(argc, argv); - // Get params from command line - nof_markers = atoi(argv[1]); - marker_size = atof(argv[2]); - max_new_marker_error = atof(argv[3]); - max_track_error = atof(argv[4]); - cam_image_topic = argv[5]; - cam_info_topic = argv[6]; - output_frame = argv[7]; - marker_detector.SetMarkerSize(marker_size); - - cam = new Camera(n, cam_info_topic); - tf_listener = new tf::TransformListener(n); - tf_broadcaster = new tf::TransformBroadcaster(); - arMarkerPub_ = n.advertise < ar_track_alvar_msgs::AlvarMarkers > ("ar_pose_marker", 0); - rvizMarkerPub_ = n.advertise < visualization_msgs::Marker > ("visualization_marker", 0); - - //Give tf a chance to catch up before the camera callback starts asking for transforms - ros::Duration(1.0).sleep(); - ros::spinOnce(); - - //Subscribe to camera message - ROS_INFO ("Subscribing to image topic"); - image_transport::ImageTransport it_(n); - cam_sub_ = it_.subscribe (cam_image_topic, 1, &getCapCallback); + auto node = std::make_shared(argc,argv); - // Output usage message + // Output usage message std::cout << std::endl; std::cout << "Keyboard Shortcuts:" << std::endl; std::cout << " l: load marker configuration from mmarker.txt" << std::endl; @@ -422,14 +505,16 @@ int main(int argc, char *argv[]) std::cout << "Please type commands with the openCV window selected" << std::endl; std::cout << std::endl; - cvNamedWindow("Command input window", CV_WINDOW_AUTOSIZE); + while(1){ - int key = cvWaitKey(20); + int key = cv::waitKey(20); if(key >= 0) - keyProcess(key); - ros::spinOnce(); + node->keyProcess(key); + rclcpp::spin_some(node); } - return 0; + rclcpp::shutdown(); + return 0; + } diff --git a/ar_track_alvar/package.xml b/ar_track_alvar/package.xml index e5184c0..306f8ee 100644 --- a/ar_track_alvar/package.xml +++ b/ar_track_alvar/package.xml @@ -1,4 +1,4 @@ - + ar_track_alvar 0.7.1 @@ -10,48 +10,42 @@ This package is a ROS wrapper for Alvar, an open source AR tag tracking library. LGPL-2.1 http://ros.org/wiki/ar_track_alvar - catkin + ament_cmake +ar_track_alvar_msgs +cv_bridge +geometry_msgs +image_transport +pcl_conversions +perception_pcl +rclcpp +rclpy +resource_retriever +sensor_msgs +std_msgs +tf2 +rosbag2_bag_v2_plugins +tf2_ros +tf2_geometry_msgs +tinyxml2 +visualization_msgs +libopencv-dev +eigen +libpcl-all-dev - ar_track_alvar_msgs - cmake_modules - cv_bridge - geometry_msgs - image_transport - message_generation - pcl_ros - pcl_conversions - resource_retriever - rospy - roscpp - sensor_msgs - std_msgs - tf - tf2 - tinyxml - visualization_msgs - dynamic_reconfigure - ar_track_alvar_msgs - cv_bridge - geometry_msgs - image_transport - message_runtime - pcl_ros - pcl_conversions - resource_retriever - rospy - roscpp - sensor_msgs - std_msgs - tf - tf2 - tinyxml - visualization_msgs - dynamic_reconfigure - - rosbag - rostest + ament_cmake_pytest + ament_lint_auto + launch + launch_ros + launch_testing + launch_testing_ament_cmake + python3-opencv + python_cmake_module + rclpy + ros_testing + rosbag2 + +ament_cmake + - - diff --git a/ar_track_alvar/src/Alvar.cpp b/ar_track_alvar/src/Alvar.cpp index fbf2433..5d30cbf 100644 --- a/ar_track_alvar/src/Alvar.cpp +++ b/ar_track_alvar/src/Alvar.cpp @@ -25,21 +25,27 @@ #include -namespace alvar { - +namespace alvar +{ void alvarInfo() { - std::cerr << "ALVAR " << ALVAR_VERSION << " - A Library for Virtual and Augmented Reality" << std::endl; - std::cerr << "Copyright 2007-2012 VTT Technical Research Centre of Finland" << std::endl; - std::cerr << "Licensed under the GNU Lesser General Public License" << std::endl; - std::cerr << "Built on " << ALVAR_DATE << " for " << ALVAR_SYSTEM << std::endl; - std::cerr << std::endl; + std::cerr << "ALVAR " << ALVAR_VERSION + << " - A Library for Virtual and Augmented Reality" << std::endl; + std::cerr << "Copyright 2007-2012 VTT Technical Research Centre of Finland" + << std::endl; + std::cerr << "Licensed under the GNU Lesser General Public License" + << std::endl; + std::cerr << "Built on " << ALVAR_DATE << " for " << ALVAR_SYSTEM + << std::endl; + std::cerr << std::endl; } -struct AlvarLoader { - AlvarLoader() { - alvarInfo(); - } +struct AlvarLoader +{ + AlvarLoader() + { + alvarInfo(); + } } alvarBasicLoader; -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/Bitset.cpp b/ar_track_alvar/src/Bitset.cpp index be6c55c..da5a615 100644 --- a/ar_track_alvar/src/Bitset.cpp +++ b/ar_track_alvar/src/Bitset.cpp @@ -25,306 +25,408 @@ using namespace std; -namespace alvar { +namespace alvar +{ using namespace std; -int Bitset::Length() { - return bits.size(); +int Bitset::Length() +{ + return bits.size(); +} +ostream& Bitset::Output(ostream& os) const +{ + deque::const_iterator iter = bits.begin(); + while (iter != bits.end()) + { + if (*iter) + os << "1"; + else + os << "0"; + iter++; + } + return os; +} +void Bitset::clear() +{ + bits.clear(); } -ostream &Bitset::Output(ostream &os) const { - deque::const_iterator iter=bits.begin(); - while (iter != bits.end()) { - if (*iter) os<<"1"; - else os<<"0"; - iter++; - } - return os; +void Bitset::push_back(const bool bit) +{ + bits.push_back(bit); } -void Bitset::clear() { bits.clear(); } -void Bitset::push_back(const bool bit) { bits.push_back(bit); } -void Bitset::push_back(const unsigned char b, int bit_count /*=8*/) { - push_back((const unsigned long)b, bit_count); +void Bitset::push_back(const unsigned char b, int bit_count /*=8*/) +{ + push_back((const unsigned long)b, bit_count); } -void Bitset::push_back(const unsigned short s, int bit_count /*=16*/) { - push_back((const unsigned long)s, bit_count); +void Bitset::push_back(const unsigned short s, int bit_count /*=16*/) +{ + push_back((const unsigned long)s, bit_count); } -void Bitset::push_back(const unsigned long l, int bit_count /*=32*/) { - unsigned long mask; - if ((bit_count > 32) || (bit_count == 0)) bit_count=32; - mask = 0x01<<(bit_count-1); - for (int i=0; i>=1; - } +void Bitset::push_back(const unsigned long l, int bit_count /*=32*/) +{ + unsigned long mask; + if ((bit_count > 32) || (bit_count == 0)) + bit_count = 32; + mask = 0x01 << (bit_count - 1); + for (int i = 0; i < bit_count; i++) + { + if (l & mask) + push_back(true); + else + push_back(false); + mask >>= 1; + } } -void Bitset::push_back_meaningful(const unsigned long l) { - int bit_count = 1; - for (int i=0; i<32; i++) { - unsigned long mask = 0x01< 0x08) bitpos >>= 4; - for (size_t i=0; i < bits.size(); i++) { - if (bits[i]) b = b | bitpos; - else b = b & (0x0f ^ bitpos); - bitpos >>= 1; - if (bitpos == 0x00) { - bitpos = 0x08; - ss << b; - } - } - return ss.str(); + stringstream ss; + ss.unsetf(std::ios_base::dec); + ss.setf(std::ios_base::hex); + unsigned long b = 0; + int bitpos = (0x08 << (bits.size() % 4)); + if (bitpos > 0x08) + bitpos >>= 4; + for (size_t i = 0; i < bits.size(); i++) + { + if (bits[i]) + b = b | bitpos; + else + b = b & (0x0f ^ bitpos); + bitpos >>= 1; + if (bitpos == 0x00) + { + bitpos = 0x08; + ss << b; + } + } + return ss.str(); } unsigned long Bitset::ulong() { - //if(bits.size() > (sizeof(unsigned long)*8)) - // throw "code too big for unsigned long\n"; - stringstream ss; - ss << setbase(16) << hex(); - unsigned long v; - ss >> v; - return v; + // if(bits.size() > (sizeof(unsigned long)*8)) + // throw "code too big for unsigned long\n"; + stringstream ss; + ss << setbase(16) << hex(); + unsigned long v; + ss >> v; + return v; } unsigned char Bitset::uchar() { - //if(bits.size() > (sizeof(unsigned char)*8)) - // throw "code too big for unsigned char\n"; - stringstream ss; - ss << setbase(16) << hex(); - unsigned long v; //ttehop: Note, that this cannot be char - ss >> v; - return (unsigned char)v; + // if(bits.size() > (sizeof(unsigned char)*8)) + // throw "code too big for unsigned char\n"; + stringstream ss; + ss << setbase(16) << hex(); + unsigned long v; // ttehop: Note, that this cannot be char + ss >> v; + return (unsigned char)v; } -void BitsetExt::hamming_enc_block(unsigned long block_len, deque::iterator &iter) { - if (verbose) cout<<"hamming_enc_block: "; - unsigned long next_parity=1; - for (unsigned long i=1; i<=block_len; i++) { - // Add a parity bit if this a place for such - if (i == next_parity) { - if (verbose) cout<<"p"; - next_parity <<= 1; - iter = bits.insert(iter, false); - } - // Otherwise if this bit is 1 change all related parity bits - else { - if (iter == bits.end()) { - block_len = i-1; - break; - } - if (verbose) cout<<(*iter?1:0); - if (*iter) { - unsigned long parity = next_parity>>1; - while (parity) { - if (i & parity) { - deque::iterator parity_iter=(iter - (i - parity)); - *parity_iter = !*parity_iter; - } - parity >>= 1; - } - } - } - iter++; - } - // Update the last parity bit if we have one - // Note, that the last parity bit can safely be removed from the code if it is not desired... - if (block_len == (next_parity >> 1)) { - // If the last bit is parity bit - make parity over the previous data - for (unsigned long ii=1; ii "; - for (unsigned long ii=block_len; ii>=1; ii--) { - cout<<(*(iter-ii)?1:0); - } - cout<<" block_len: "<::iterator& iter) +{ + if (verbose) + cout << "hamming_enc_block: "; + unsigned long next_parity = 1; + for (unsigned long i = 1; i <= block_len; i++) + { + // Add a parity bit if this a place for such + if (i == next_parity) + { + if (verbose) + cout << "p"; + next_parity <<= 1; + iter = bits.insert(iter, false); + } + // Otherwise if this bit is 1 change all related parity bits + else + { + if (iter == bits.end()) + { + block_len = i - 1; + break; + } + if (verbose) + cout << (*iter ? 1 : 0); + if (*iter) + { + unsigned long parity = next_parity >> 1; + while (parity) + { + if (i & parity) + { + deque::iterator parity_iter = (iter - (i - parity)); + *parity_iter = !*parity_iter; + } + parity >>= 1; + } + } + } + iter++; + } + // Update the last parity bit if we have one + // Note, that the last parity bit can safely be removed from the code if it is + // not desired... + if (block_len == (next_parity >> 1)) + { + // If the last bit is parity bit - make parity over the previous data + for (unsigned long ii = 1; ii < block_len; ii++) + { + if (*(iter - ii - 1)) + *(iter - 1) = !*(iter - 1); + } + } + if (verbose) + { + cout << " -> "; + for (unsigned long ii = block_len; ii >= 1; ii--) + { + cout << (*(iter - ii) ? 1 : 0); + } + cout << " block_len: " << block_len << endl; + } } -int BitsetExt::hamming_dec_block(unsigned long block_len, deque::iterator &iter) { - if (verbose) cout<<"hamming_dec_block: "; - bool potentially_double_error = false; - unsigned long total_parity=0; - unsigned long parity=0; - unsigned long next_parity=1; - for (unsigned long i=1; i<=block_len; i++) { - if (iter == bits.end()) { - // ttehop: - // At 3.12.2009 I changed the following line because - // it crashed with 7x7 markers. However, I didn't fully - // understand the reason why it should be so. Lets - // give more thought to it when we have more time. - // old version: block_len = i-1; - block_len = i; - break; - } - if (*iter) { - parity = parity ^ i; - total_parity = total_parity ^ 1; - } - if (i == next_parity) { - if (verbose) cout<<"("<<*iter<<")"; - next_parity <<= 1; - iter = bits.erase(iter); - } else { - if (verbose) cout<<*iter; - iter++; - } - } - if (block_len < 3) { - if (verbose) cout<<" too short"<> 1)) { - parity = parity & ~(next_parity >> 1); // The last parity bit shouldn't be included in the other parity tests (TODO: Better solution) - if (total_parity == 0) { - potentially_double_error = true; - } - } - int steps=0; - if (verbose) cout<<" parity: "<= parity) { - steps++; - } - } - iter[-steps] = !iter[-steps]; - if (verbose) cout<<" corrected"<::iterator& iter) +{ + if (verbose) + cout << "hamming_dec_block: "; + bool potentially_double_error = false; + unsigned long total_parity = 0; + unsigned long parity = 0; + unsigned long next_parity = 1; + for (unsigned long i = 1; i <= block_len; i++) + { + if (iter == bits.end()) + { + // ttehop: + // At 3.12.2009 I changed the following line because + // it crashed with 7x7 markers. However, I didn't fully + // understand the reason why it should be so. Lets + // give more thought to it when we have more time. + // old version: block_len = i-1; + block_len = i; + break; + } + if (*iter) + { + parity = parity ^ i; + total_parity = total_parity ^ 1; + } + if (i == next_parity) + { + if (verbose) + cout << "(" << *iter << ")"; + next_parity <<= 1; + iter = bits.erase(iter); + } + else + { + if (verbose) + cout << *iter; + iter++; + } + } + if (block_len < 3) + { + if (verbose) + cout << " too short" << endl; + return 0; + } + if (block_len == (next_parity >> 1)) + { + parity = parity & ~(next_parity >> 1); // The last parity bit shouldn't be + // included in the other parity + // tests (TODO: Better solution) + if (total_parity == 0) + { + potentially_double_error = true; + } + } + int steps = 0; + if (verbose) + cout << " parity: " << parity; + if (parity) + { + if (potentially_double_error) + { + if (verbose) + cout << " double error" << endl; + return -1; + } + next_parity = 1; + for (unsigned long i = 1; i <= block_len; i++) + { + if (i == next_parity) + { + next_parity <<= 1; + if (i == parity) + { + if (verbose) + cout << " parity bit error" << endl; + return 1; // Only parity bit was erroneous + } + } + else if (i >= parity) + { + steps++; + } + } + iter[-steps] = !iter[-steps]; + if (verbose) + cout << " corrected" << endl; + return 1; + } + if (verbose) + cout << " ok" << endl; + return 0; } -BitsetExt::BitsetExt() { - SetVerbose(false); +BitsetExt::BitsetExt() +{ + SetVerbose(false); } -BitsetExt::BitsetExt(bool _verbose) { - SetVerbose(_verbose); +BitsetExt::BitsetExt(bool _verbose) +{ + SetVerbose(_verbose); } /* void BitsetExt::str_8to6bit() { - // TODO: Assume that the bitset contains 8-bit ASCII chars and change them into 6-bit chars + // TODO: Assume that the bitset contains 8-bit ASCII chars and change them +into 6-bit chars } void BitsetExt::str_6to8bit() { - // TODO: Assume that the bitset contains 6-bit chars and change them into 8-bit ASCII chars + // TODO: Assume that the bitset contains 6-bit chars and change them into +8-bit ASCII chars } void BitsetExt::crc_enc(int crc_len) { - // TODO: Add crc_len-bit checksum for the bitset + // TODO: Add crc_len-bit checksum for the bitset } bool BitsetExt::crc_dec(int crc_len) { - // TODO: Check the CRC - return false; + // TODO: Check the CRC + return false; } */ -void BitsetExt::SetVerbose(bool _verbose) { - verbose = _verbose; +void BitsetExt::SetVerbose(bool _verbose) +{ + verbose = _verbose; } -int BitsetExt::count_hamming_enc_len(int block_len, int dec_len) { - int parity_len=0; - int dec_len_count = dec_len; - while (dec_len_count > 0) { - unsigned long next_parity = 1; - for (unsigned long i=1; i<=(unsigned long)block_len; i++) { - if (i == next_parity) { - parity_len++; - next_parity <<= 1; - } else { - dec_len_count--; - } - if (dec_len_count == 0) break; - } - } - return dec_len + parity_len; +int BitsetExt::count_hamming_enc_len(int block_len, int dec_len) +{ + int parity_len = 0; + int dec_len_count = dec_len; + while (dec_len_count > 0) + { + unsigned long next_parity = 1; + for (unsigned long i = 1; i <= (unsigned long)block_len; i++) + { + if (i == next_parity) + { + parity_len++; + next_parity <<= 1; + } + else + { + dec_len_count--; + } + if (dec_len_count == 0) + break; + } + } + return dec_len + parity_len; } -int BitsetExt::count_hamming_dec_len(int block_len, int enc_len) { - int parity_len=0; - int enc_len_count = enc_len; - while (enc_len_count > 0) { - unsigned long next_parity = 1; - unsigned long i; - for (i=1; i<=(unsigned long)block_len; i++) { - if (i == next_parity) { - parity_len++; - next_parity <<= 1; - } - enc_len_count--; - if (enc_len_count == 0) break; - } - } - return enc_len - parity_len; +int BitsetExt::count_hamming_dec_len(int block_len, int enc_len) +{ + int parity_len = 0; + int enc_len_count = enc_len; + while (enc_len_count > 0) + { + unsigned long next_parity = 1; + unsigned long i; + for (i = 1; i <= (unsigned long)block_len; i++) + { + if (i == next_parity) + { + parity_len++; + next_parity <<= 1; + } + enc_len_count--; + if (enc_len_count == 0) + break; + } + } + return enc_len - parity_len; } -void BitsetExt::hamming_enc(int block_len) { - deque::iterator iter=bits.begin(); - while (iter != bits.end()) { - hamming_enc_block(block_len, iter); - } +void BitsetExt::hamming_enc(int block_len) +{ + deque::iterator iter = bits.begin(); + while (iter != bits.end()) + { + hamming_enc_block(block_len, iter); + } } // Returns number of corrected errors (or -1 if there were unrecoverable error) -int BitsetExt::hamming_dec(int block_len) { - int error_count=0; - deque::iterator iter=bits.begin(); - while (iter != bits.end()) { - int error=hamming_dec_block(block_len, iter); - if ((error == -1) || (error_count == -1)) error_count=-1; - else error_count += error; - } - return error_count; +int BitsetExt::hamming_dec(int block_len) +{ + int error_count = 0; + deque::iterator iter = bits.begin(); + while (iter != bits.end()) + { + int error = hamming_dec_block(block_len, iter); + if ((error == -1) || (error_count == -1)) + error_count = -1; + else + error_count += error; + } + return error_count; } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/Camera.cpp b/ar_track_alvar/src/Camera.cpp index e96f2f4..0fcc26f 100644 --- a/ar_track_alvar/src/Camera.cpp +++ b/ar_track_alvar/src/Camera.cpp @@ -25,878 +25,822 @@ #include "ar_track_alvar/Camera.h" #include "ar_track_alvar/FileFormatUtils.h" #include +#include +#include using namespace std; -namespace alvar { +namespace alvar +{ using namespace std; - -void ProjPoints::Reset() { - object_points.clear(); - image_points.clear(); - point_counts.clear(); +void ProjPoints::Reset() +{ + object_points.clear(); + image_points.clear(); + point_counts.clear(); } // TODO: Does it matter what the etalon_square_size is??? -bool ProjPoints::AddPointsUsingChessboard(IplImage *image, double etalon_square_size, int etalon_rows, int etalon_columns, bool visualize) { - if (image->width == 0) return false; - IplImage *gray = cvCreateImage(cvSize(image->width, image->height), IPL_DEPTH_8U, 1); - CvPoint2D32f *corners = new CvPoint2D32f[etalon_rows*etalon_columns]; - if (image->nChannels == 1) - cvCopy(image, gray); - else - cvCvtColor(image, gray, CV_RGB2GRAY); - width = image->width; - height = image->height; - - int point_count = 0; - int pattern_was_found = cvFindChessboardCorners(gray, cvSize(etalon_rows, etalon_columns), corners, &point_count); - if (!pattern_was_found) point_count=0; - if (point_count > 0) { - cvFindCornerSubPix(gray, corners, point_count, cvSize(5,5), cvSize(-1,-1), - cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 10, 0.01f)); - for (int i=0; i 0) return true; - return false; -} - -bool ProjPoints::AddPointsUsingMarkers(vector &marker_corners, vector &marker_corners_img, IplImage* image) -{ - width = image->width; - height = image->height; - if ((marker_corners.size() == marker_corners_img.size()) && - (marker_corners.size() == 4)) - { - for (size_t p=0; p corners; + if (image.channels() == 1) + image.copyTo(gray); + else + cv::cvtColor(image, gray, cv::COLOR_RGB2GRAY); + + width = image.cols; + height = image.rows; + + std::size_t point_count = 0; + + int pattern_was_found = cv::findChessboardCorners( + gray, cv::Size(etalon_rows, etalon_columns), corners); + point_count = !pattern_was_found ? 0 : corners.size(); + if (point_count > 0) + { + cv::cornerSubPix( + gray, corners, cv::Size(5, 5), cv::Size(-1, -1), + cv::TermCriteria(cv::TermCriteria::MAX_ITER | cv::TermCriteria::EPS, 10, + 0.01f)); + for (int i = 0; i < point_count; i++) + { + cv::Point3d po; + cv::Point2d pi; + po.x = etalon_square_size * (i % etalon_rows); + po.y = etalon_square_size * (i / etalon_rows); + po.z = 0; + pi.x = corners[i].x; + pi.y = corners[i].y; + object_points.push_back(po); + image_points.push_back(pi); + } + point_counts.push_back(point_count); + } + if (visualize) + { + cv::drawChessboardCorners(image, cv::Size(etalon_rows, etalon_columns), + corners, false); + } + corners.clear(); + gray.release(); + if (point_count > 0) + return true; + return false; +} -void Camera::SetSimpleCalib(int _x_res, int _y_res, double f_fac) +bool ProjPoints::AddPointsUsingMarkers(vector& marker_corners, + vector& marker_corners_img, + cv::Mat& image) { - memset(calib_K_data,0,sizeof(double)*3*3); - memset(calib_D_data,0,sizeof(double)*4); - calib_K_data[0][0] = _x_res*f_fac; // Just some focal length by default - calib_K_data[1][1] = _x_res*f_fac; // Just some focal length by default - calib_K_data[0][2] = _x_res/2; - calib_K_data[1][2] = _y_res/2; - calib_K_data[2][2] = 1; - calib_x_res = _x_res; - calib_y_res = _y_res; -} - -bool Camera::LoadCalibXML(const char *calibfile) { - TiXmlDocument document; - if (!document.LoadFile(calibfile)) return false; - TiXmlElement *xml_root = document.RootElement(); - - return - xml_root->QueryIntAttribute("width", &calib_x_res) == TIXML_SUCCESS && - xml_root->QueryIntAttribute("height", &calib_y_res) == TIXML_SUCCESS && - FileFormatUtils::parseXMLMatrix(xml_root->FirstChildElement("intrinsic_matrix"), &calib_K) && - FileFormatUtils::parseXMLMatrix(xml_root->FirstChildElement("distortion"), &calib_D); -} - -bool Camera::LoadCalibOpenCV(const char *calibfile) { - cvSetErrMode(CV_ErrModeSilent); - CvFileStorage* fs = cvOpenFileStorage(calibfile, 0, CV_STORAGE_READ); - cvSetErrMode(CV_ErrModeLeaf); - if(fs){ - CvFileNode* root_node = cvGetRootFileNode(fs); - // K Intrinsic - CvFileNode* intrinsic_mat_node = cvGetFileNodeByName(fs, root_node, "intrinsic_matrix"); - CvMat* intrinsic_mat = reinterpret_cast(cvRead(fs, intrinsic_mat_node)); - cvmSet(&calib_K, 0, 0, cvmGet(intrinsic_mat, 0, 0)); - cvmSet(&calib_K, 0, 1, cvmGet(intrinsic_mat, 0, 1)); - cvmSet(&calib_K, 0, 2, cvmGet(intrinsic_mat, 0, 2)); - cvmSet(&calib_K, 1, 0, cvmGet(intrinsic_mat, 1, 0)); - cvmSet(&calib_K, 1, 1, cvmGet(intrinsic_mat, 1, 1)); - cvmSet(&calib_K, 1, 2, cvmGet(intrinsic_mat, 1, 2)); - cvmSet(&calib_K, 2, 0, cvmGet(intrinsic_mat, 2, 0)); - cvmSet(&calib_K, 2, 1, cvmGet(intrinsic_mat, 2, 1)); - cvmSet(&calib_K, 2, 2, cvmGet(intrinsic_mat, 2, 2)); - - // D Distortion - CvFileNode* dist_mat_node = cvGetFileNodeByName(fs, root_node, "distortion"); - CvMat* dist_mat = reinterpret_cast(cvRead(fs, dist_mat_node)); - cvmSet(&calib_D, 0, 0, cvmGet(dist_mat, 0, 0)); - cvmSet(&calib_D, 1, 0, cvmGet(dist_mat, 1, 0)); - cvmSet(&calib_D, 2, 0, cvmGet(dist_mat, 2, 0)); - cvmSet(&calib_D, 3, 0, cvmGet(dist_mat, 3, 0)); - - // Resolution - CvFileNode* width_node = cvGetFileNodeByName(fs, root_node, "width"); - CvFileNode* height_node = cvGetFileNodeByName(fs, root_node, "height"); - calib_x_res = width_node->data.i; - calib_y_res = height_node->data.i; - cvReleaseFileStorage(&fs); - return true; - } - // reset error status - cvSetErrStatus(CV_StsOk); - return false; -} - -void Camera::SetCameraInfo(const sensor_msgs::CameraInfo &camInfo) -{ - cam_info_ = camInfo; - - calib_x_res = cam_info_.width; - calib_y_res = cam_info_.height; - x_res = calib_x_res; - y_res = calib_y_res; - - cvmSet(&calib_K, 0, 0, cam_info_.K[0]); - cvmSet(&calib_K, 0, 1, cam_info_.K[1]); - cvmSet(&calib_K, 0, 2, cam_info_.K[2]); - cvmSet(&calib_K, 1, 0, cam_info_.K[3]); - cvmSet(&calib_K, 1, 1, cam_info_.K[4]); - cvmSet(&calib_K, 1, 2, cam_info_.K[5]); - cvmSet(&calib_K, 2, 0, cam_info_.K[6]); - cvmSet(&calib_K, 2, 1, cam_info_.K[7]); - cvmSet(&calib_K, 2, 2, cam_info_.K[8]); - - if (cam_info_.D.size() >= 4) { - cvmSet(&calib_D, 0, 0, cam_info_.D[0]); - cvmSet(&calib_D, 1, 0, cam_info_.D[1]); - cvmSet(&calib_D, 2, 0, cam_info_.D[2]); - cvmSet(&calib_D, 3, 0, cam_info_.D[3]); - } else { - cvmSet(&calib_D, 0, 0, 0); - cvmSet(&calib_D, 1, 0, 0); - cvmSet(&calib_D, 2, 0, 0); - cvmSet(&calib_D, 3, 0, 0); + width = image.cols; + height = image.rows; + if ((marker_corners.size() == marker_corners_img.size()) && + (marker_corners.size() == 4)) + { + for (size_t p = 0; p < marker_corners.size(); p++) + { + cv::Point3d po; + cv::Point2d pi; + po.x = marker_corners[p].x; + po.y = marker_corners[p].y; + po.z = 0; + pi.x = marker_corners_img[p].x; + pi.y = marker_corners_img[p].y; + object_points.push_back(po); + image_points.push_back(pi); } + point_counts.push_back(marker_corners.size()); + } + + return true; } -void Camera::camInfoCallback (const sensor_msgs::CameraInfoConstPtr & cam_info) +Camera::Camera() +{ + calib_K = cv::Mat(3, 3, CV_64F, calib_K_data); + calib_D = cv::Mat(4, 1, CV_64F, calib_D_data); + memset(calib_K_data, 0, sizeof(double) * 3 * 3); + memset(calib_D_data, 0, sizeof(double) * 4); + calib_K_data[0][0] = 550; // Just some focal length by default + calib_K_data[1][1] = 550; // Just some focal length by default + calib_K_data[0][2] = 320; + calib_K_data[1][2] = 240; + calib_K_data[2][2] = 1; + calib_x_res = 640; + calib_y_res = 480; + x_res = 640; + y_res = 480; + getCamInfo_ = false; +} + +void Camera::SetSimpleCalib(int _x_res, int _y_res, double f_fac) +{ + memset(calib_K_data, 0, sizeof(double) * 3 * 3); + memset(calib_D_data, 0, sizeof(double) * 4); + calib_K_data[0][0] = _x_res * f_fac; // Just some focal length by default + calib_K_data[1][1] = _x_res * f_fac; // Just some focal length by default + calib_K_data[0][2] = _x_res / 2; + calib_K_data[1][2] = _y_res / 2; + calib_K_data[2][2] = 1; + calib_x_res = _x_res; + calib_y_res = _y_res; +} + +bool Camera::LoadCalibXML(const char* calibfile) +{ + TiXmlDocument document; + if (!document.LoadFile(calibfile)) + return false; + TiXmlElement* xml_root = document.RootElement(); + + return xml_root->QueryIntAttribute("width", &calib_x_res) == TIXML_SUCCESS && + xml_root->QueryIntAttribute("height", &calib_y_res) == TIXML_SUCCESS && + FileFormatUtils::parseXMLMatrix(xml_root->FirstChildElement("intrinsic" + "_matrix"), + calib_K) && + FileFormatUtils::parseXMLMatrix(xml_root->FirstChildElement("distortio" + "n"), + calib_D); +} + +bool Camera::LoadCalibOpenCV(const char* calibfile) +{ + cv::FileStorage fs; + bool success = fs.open(calibfile, cv::FileStorage::READ); + + if (success) + { + // K Intrinsic + cv::FileNode intrinsic_mat_node = fs["intrinsic_matrix"]; + cv::Mat intrinsic_mat; + cv::read(intrinsic_mat_node, intrinsic_mat); + calib_K.at(0, 0) = intrinsic_mat.at(0, 0); + calib_K.at(0, 1) = intrinsic_mat.at(0, 1); + calib_K.at(0, 2) = intrinsic_mat.at(0, 2); + calib_K.at(1, 0) = intrinsic_mat.at(1, 0); + calib_K.at(1, 1) = intrinsic_mat.at(1, 1); + calib_K.at(1, 2) = intrinsic_mat.at(1, 2); + calib_K.at(2, 0) = intrinsic_mat.at(2, 0); + calib_K.at(2, 1) = intrinsic_mat.at(2, 1); + calib_K.at(2, 2) = intrinsic_mat.at(2, 2); + + // D Distortion + cv::FileNode dist_mat_node = fs["distortion"]; + cv::Mat dist_mat; + cv::read(dist_mat_node, dist_mat); + calib_D.at(0, 0) = dist_mat.at(0, 0); + calib_D.at(1, 0) = dist_mat.at(1, 0); + calib_D.at(2, 0) = dist_mat.at(2, 0); + calib_D.at(3, 0) = dist_mat.at(3, 0); + + // Resolution + cv::FileNode width_node = fs["width"]; + cv::FileNode height_node = fs["height"]; + cv::read(width_node, calib_x_res, 0); + cv::read(height_node, calib_y_res, 0); + fs.release(); + return true; + } + return false; +} + +void Camera::SetCameraInfo(const sensor_msgs::msg::CameraInfo::SharedPtr cam_info) +{ + calib_x_res = cam_info->width; + calib_y_res = cam_info->height; + x_res = calib_x_res; + y_res = calib_y_res; + + calib_K.at(0, 0) = cam_info->k[0]; + calib_K.at(0, 1) = cam_info->k[1]; + calib_K.at(0, 2) = cam_info->k[2]; + calib_K.at(1, 0) = cam_info->k[3]; + calib_K.at(1, 1) = cam_info->k[4]; + calib_K.at(1, 2) = cam_info->k[5]; + calib_K.at(2, 0) = cam_info->k[6]; + calib_K.at(2, 1) = cam_info->k[7]; + calib_K.at(2, 2) = cam_info->k[8]; + + if (cam_info->d.size() >= 4) { - if (!getCamInfo_) + calib_D.at(0, 0) = cam_info->d[0]; + calib_D.at(1, 0) = cam_info->d[1]; + calib_D.at(2, 0) = cam_info->d[2]; + calib_D.at(3, 0) = cam_info->d[3]; + } + else + { + calib_D.at(0, 0) = 0; + calib_D.at(1, 0) = 0; + calib_D.at(2, 0) = 0; + calib_D.at(3, 0) = 0; + } +} + +bool Camera::SetCalib(const char* calibfile, int _x_res, int _y_res, + FILE_FORMAT format) +{ + x_res = _x_res; + y_res = _y_res; + if (!calibfile) + return false; + + bool success = false; + switch (format) + { + case FILE_FORMAT_XML: + success = LoadCalibXML(calibfile); + break; + case FILE_FORMAT_OPENCV: + case FILE_FORMAT_DEFAULT: + success = LoadCalibOpenCV(calibfile); + break; + default: + // TODO: throw exception? + break; + }; + + if (success) + { + // Scale matrix in case of different resolution calibration. + // The OpenCV documentation says: + // - If an image from camera is up-sampled/down-sampled by some factor, all + // intrinsic camera parameters + // (fx, fy, cx and cy) should be scaled (multiplied/divided, respectively) + // by the same factor. + // - The distortion coefficients remain the same regardless of the captured + // image resolution. + if ((calib_x_res != x_res) || (calib_y_res != y_res)) { - SetCameraInfo(*cam_info); - getCamInfo_ = true; - sub_.shutdown(); + calib_K_data[0][0] *= (double(x_res) / double(calib_x_res)); + calib_K_data[0][2] *= (double(x_res) / double(calib_x_res)); + calib_K_data[1][1] *= (double(y_res) / double(calib_y_res)); + calib_K_data[1][2] *= (double(y_res) / double(calib_y_res)); } } + return success; +} + +bool Camera::SaveCalibXML(const char* calibfile) +{ + TiXmlDocument document; + document.LinkEndChild(new TiXmlDeclaration("1.0", "UTF-8", "no")); + document.LinkEndChild(new TiXmlElement("camera")); + TiXmlElement* xml_root = document.RootElement(); + xml_root->SetAttribute("width", calib_x_res); + xml_root->SetAttribute("height", calib_y_res); + xml_root->LinkEndChild( + FileFormatUtils::createXMLMatrix("intrinsic_matrix", calib_K)); + xml_root->LinkEndChild( + FileFormatUtils::createXMLMatrix("distortion", calib_D)); + return document.SaveFile(calibfile); +} + +bool Camera::SaveCalibOpenCV(const char* calibfile) +{ + cv::FileStorage fs; + bool success = fs.open(calibfile, cv::FileStorage::WRITE); + if (success) + { + cv::write(fs, "intrinsic_matrix", calib_K); + cv::write(fs, "distortion", calib_D); + cv::write(fs, "width", calib_x_res); + cv::write(fs, "height", calib_y_res); + fs.release(); + return true; + } + return false; +} + +bool Camera::SaveCalib(const char* calibfile, FILE_FORMAT format) +{ + if (!calibfile) + return false; + + switch (format) + { + case FILE_FORMAT_XML: + return SaveCalibXML(calibfile); + case FILE_FORMAT_OPENCV: + case FILE_FORMAT_DEFAULT: + return SaveCalibOpenCV(calibfile); + default: + return false; + }; +} + +void Camera::Calibrate(ProjPoints& pp) +{ + std::vector> object_points; + std::vector> image_points; + object_points.emplace_back(); + for (const auto& point : pp.object_points) + { + object_points[0].push_back(point); + } + image_points.emplace_back(); + for (const auto& point : pp.image_points) + { + image_points[0].push_back(point); + } + cv::calibrateCamera(object_points, image_points, + cv::Size(pp.width, pp.height), calib_K, calib_D, + cv::Mat(), cv::Mat(), cv::CALIB_USE_INTRINSIC_GUESS); -bool Camera::SetCalib(const char *calibfile, int _x_res, int _y_res, FILE_FORMAT format) { - x_res = _x_res; - y_res = _y_res; - if(!calibfile) return false; - - bool success = false; - switch (format) { - case FILE_FORMAT_XML: - success = LoadCalibXML(calibfile); - break; - case FILE_FORMAT_OPENCV: - case FILE_FORMAT_DEFAULT: - success = LoadCalibOpenCV(calibfile); - break; - default: - // TODO: throw exception? - break; - }; - - if (success) { - // Scale matrix in case of different resolution calibration. - // The OpenCV documentation says: - // - If an image from camera is up-sampled/down-sampled by some factor, all intrinsic camera parameters - // (fx, fy, cx and cy) should be scaled (multiplied/divided, respectively) by the same factor. - // - The distortion coefficients remain the same regardless of the captured image resolution. - if ((calib_x_res != x_res) || (calib_y_res != y_res)) { - calib_K_data[0][0] *= (double(x_res)/double(calib_x_res)); - calib_K_data[0][2] *= (double(x_res)/double(calib_x_res)); - calib_K_data[1][1] *= (double(y_res)/double(calib_y_res)); - calib_K_data[1][2] *= (double(y_res)/double(calib_y_res)); - } - } - return success; -} - -bool Camera::SaveCalibXML(const char *calibfile) { - TiXmlDocument document; - document.LinkEndChild(new TiXmlDeclaration("1.0", "UTF-8", "no")); - document.LinkEndChild(new TiXmlElement("camera")); - TiXmlElement *xml_root = document.RootElement(); - xml_root->SetAttribute("width", calib_x_res); - xml_root->SetAttribute("height", calib_y_res); - xml_root->LinkEndChild(FileFormatUtils::createXMLMatrix("intrinsic_matrix", &calib_K)); - xml_root->LinkEndChild(FileFormatUtils::createXMLMatrix("distortion", &calib_D)); - return document.SaveFile(calibfile); -} - -bool Camera::SaveCalibOpenCV(const char *calibfile) { - cvSetErrMode(CV_ErrModeSilent); - CvFileStorage* fs = cvOpenFileStorage(calibfile, 0, CV_STORAGE_WRITE); - cvSetErrMode(CV_ErrModeLeaf); - if(fs){ - cvWrite(fs, "intrinsic_matrix", &calib_K, cvAttrList(0,0)); - cvWrite(fs, "distortion", &calib_D, cvAttrList(0,0)); - //cvWriteReal(fs, "fov_x", data.fov_x); - //cvWriteReal(fs, "fov_y", data.fov_y); - cvWriteInt(fs, "width", calib_x_res); - cvWriteInt(fs, "height", calib_y_res); - cvReleaseFileStorage(&fs); - return true; - } - // reset error status - cvSetErrStatus(CV_StsOk); - return false; -} - -bool Camera::SaveCalib(const char *calibfile, FILE_FORMAT format) { - if(!calibfile) - return false; - - switch (format) { - case FILE_FORMAT_XML: - return SaveCalibXML(calibfile); - case FILE_FORMAT_OPENCV: - case FILE_FORMAT_DEFAULT: - return SaveCalibOpenCV(calibfile); - default: - return false; - }; -} - -void Camera::Calibrate(ProjPoints &pp) -{ - CvMat *object_points = cvCreateMat((int)pp.object_points.size(), 1, CV_32FC3); - CvMat *image_points = cvCreateMat((int)pp.image_points.size(), 1, CV_32FC2); - const CvMat point_counts = cvMat((int)pp.point_counts.size(), 1, CV_32SC1, &pp.point_counts[0]); - for (size_t i=0; idata.fl[i*3+0] = (float)pp.object_points[i].x; - object_points->data.fl[i*3+1] = (float)pp.object_points[i].y; - object_points->data.fl[i*3+2] = (float)pp.object_points[i].z; - image_points->data.fl[i*2+0] = (float)pp.image_points[i].x; - image_points->data.fl[i*2+1] = (float)pp.image_points[i].y; - } - cvCalibrateCamera2(object_points, image_points, &point_counts, - cvSize(pp.width, pp.height), - &calib_K, &calib_D, 0, 0, CV_CALIB_USE_INTRINSIC_GUESS); - - calib_x_res = pp.width; - calib_y_res = pp.height; - - cvReleaseMat(&object_points); - cvReleaseMat(&image_points); -} - -void Camera::SetRes(int _x_res, int _y_res) { - x_res = _x_res; - y_res = _y_res; - // Scale matrix in case of different resolution calibration. - // The OpenCV documentation says: - // - If an image from camera is up-sampled/down-sampled by some factor, all intrinsic camera parameters - // (fx, fy, cx and cy) should be scaled (multiplied/divided, respectively) by the same factor. - // - The distortion coefficients remain the same regardless of the captured image resolution. - if ((calib_x_res != x_res) || (calib_y_res != y_res)) { - calib_K_data[0][0] *= (double(x_res)/double(calib_x_res)); - calib_K_data[0][2] *= (double(x_res)/double(calib_x_res)); - calib_K_data[1][1] *= (double(y_res)/double(calib_y_res)); - calib_K_data[1][2] *= (double(y_res)/double(calib_y_res)); - } + calib_x_res = pp.width; + calib_y_res = pp.height; +} + +void Camera::SetRes(int _x_res, int _y_res) +{ + x_res = _x_res; + y_res = _y_res; + // Scale matrix in case of different resolution calibration. + // The OpenCV documentation says: + // - If an image from camera is up-sampled/down-sampled by some factor, all + // intrinsic camera parameters + // (fx, fy, cx and cy) should be scaled (multiplied/divided, respectively) + // by the same factor. + // - The distortion coefficients remain the same regardless of the captured + // image resolution. + if ((calib_x_res != x_res) || (calib_y_res != y_res)) + { + calib_K_data[0][0] *= (double(x_res) / double(calib_x_res)); + calib_K_data[0][2] *= (double(x_res) / double(calib_x_res)); + calib_K_data[1][1] *= (double(y_res) / double(calib_y_res)); + calib_K_data[1][2] *= (double(y_res) / double(calib_y_res)); + } } // TODO: Better approach for this... // Note, the proj_matrix[8] is now negated. This is due to the fact // that with OpenCV and OpenGL projection matrices both y and z -// should be mirrored. All other components are -void Camera::GetOpenglProjectionMatrix(double proj_matrix[16], const int width, const int height, const float far_clip /*= 1000.0f*/, const float near_clip /*= 0.1f*/) { - proj_matrix[0] = 2.0f * calib_K_data[0][0] / float(width); - proj_matrix[1] = 0; - proj_matrix[2] = 0; - proj_matrix[3] = 0; - proj_matrix[4] = 2.0f * calib_K_data[0][1] / float(width); // skew - proj_matrix[5] = 2.0f * calib_K_data[1][1] / float(height); - proj_matrix[6] = 0; - proj_matrix[7] = 0; - //proj_matrix[8] = (2.0f * calib_K_data[0][2] / float(width)) - 1.0f; - proj_matrix[8] = -(2.0f * calib_K_data[0][2] / float(width)) + 1.0f; - proj_matrix[9] = (2.0f * calib_K_data[1][2] / float(height)) - 1.0f; - proj_matrix[10] = -(far_clip + near_clip)/(far_clip - near_clip); - proj_matrix[11] = -1.0f; - proj_matrix[12] = 0; - proj_matrix[13] = 0; - proj_matrix[14] = -2.0f * far_clip * near_clip / (far_clip - near_clip); - proj_matrix[15] = 0; -} - -void Camera::SetOpenglProjectionMatrix(double proj_matrix[16], const int width, const int height) { - x_res = width; - y_res = height; - calib_x_res = width; - calib_y_res = height; - calib_K_data[0][0] = proj_matrix[0] * float(width) / 2.0f; - calib_K_data[0][1] = proj_matrix[4] * float(width) / 2.0f; - calib_K_data[1][1] = proj_matrix[5] * float(height) / 2.0f; - //calib_K_data[0][2] = (proj_matrix[8] + 1.0f) * float(width) / 2.0f; - calib_K_data[0][2] = (-proj_matrix[8] + 1.0f) * float(width) / 2.0f; // Is this ok? - calib_K_data[1][2] = (proj_matrix[9] + 1.0f) * float(height) / 2.0f; - calib_K_data[2][2] = 1; -} - -void Camera::Undistort(PointDouble &point) +// should be mirrored. All other components are +void Camera::GetOpenglProjectionMatrix(double proj_matrix[16], const int width, + const int height, + const float far_clip /*= 1000.0f*/, + const float near_clip /*= 0.1f*/) { -/* - // focal length - double ifx = 1./cvmGet(&calib_K, 0, 0); - double ify = 1./cvmGet(&calib_K, 1, 1); - - // principal point - double cx = cvmGet(&calib_K, 0, 2); - double cy = cvmGet(&calib_K, 1, 2); - - // distortion coeffs - double* k = calib_D.data.db; - - // compensate distortion iteratively - double x = (point.x - cx)*ifx, y = (point.y - cy)*ify, x0 = x, y0 = y; - for(int j = 0; j < 5; j++){ - double r2 = x*x + y*y; - double icdist = 1./(1 + k[0]*r2 + k[1]*r2*r2); - double delta_x = 2*k[2]*x*y + k[3]*(r2 + 2*x*x); - double delta_y = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y; - x = (x0 - delta_x)*icdist; - y = (y0 - delta_y)*icdist; - } - // apply compensation - point.x = x/ifx + cx; - point.y = y/ify + cy; -*/ + proj_matrix[0] = 2.0f * calib_K_data[0][0] / float(width); + proj_matrix[1] = 0; + proj_matrix[2] = 0; + proj_matrix[3] = 0; + proj_matrix[4] = 2.0f * calib_K_data[0][1] / float(width); // skew + proj_matrix[5] = 2.0f * calib_K_data[1][1] / float(height); + proj_matrix[6] = 0; + proj_matrix[7] = 0; + // proj_matrix[8] = (2.0f * calib_K_data[0][2] / float(width)) - 1.0f; + proj_matrix[8] = -(2.0f * calib_K_data[0][2] / float(width)) + 1.0f; + proj_matrix[9] = (2.0f * calib_K_data[1][2] / float(height)) - 1.0f; + proj_matrix[10] = -(far_clip + near_clip) / (far_clip - near_clip); + proj_matrix[11] = -1.0f; + proj_matrix[12] = 0; + proj_matrix[13] = 0; + proj_matrix[14] = -2.0f * far_clip * near_clip / (far_clip - near_clip); + proj_matrix[15] = 0; +} + +void Camera::SetOpenglProjectionMatrix(double proj_matrix[16], const int width, + const int height) +{ + x_res = width; + y_res = height; + calib_x_res = width; + calib_y_res = height; + calib_K_data[0][0] = proj_matrix[0] * float(width) / 2.0f; + calib_K_data[0][1] = proj_matrix[4] * float(width) / 2.0f; + calib_K_data[1][1] = proj_matrix[5] * float(height) / 2.0f; + // calib_K_data[0][2] = (proj_matrix[8] + 1.0f) * float(width) / 2.0f; + calib_K_data[0][2] = + (-proj_matrix[8] + 1.0f) * float(width) / 2.0f; // Is this ok? + calib_K_data[1][2] = (proj_matrix[9] + 1.0f) * float(height) / 2.0f; + calib_K_data[2][2] = 1; +} + +void Camera::Undistort(PointDouble& point) +{ + /* + // focal length + double ifx = 1./cvmGet(&calib_K, 0, 0); + double ify = 1./cvmGet(&calib_K, 1, 1); + + // principal point + double cx = cvmGet(&calib_K, 0, 2); + double cy = cvmGet(&calib_K, 1, 2); + + // distortion coeffs + double* k = calib_D.data.db; + + // compensate distortion iteratively + double x = (point.x - cx)*ifx, y = (point.y - cy)*ify, x0 = x, y0 = y; + for(int j = 0; j < 5; j++){ + double r2 = x*x + y*y; + double icdist = 1./(1 + k[0]*r2 + k[1]*r2*r2); + double delta_x = 2*k[2]*x*y + k[3]*(r2 + 2*x*x); + double delta_y = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y; + x = (x0 - delta_x)*icdist; + y = (y0 - delta_y)*icdist; + } + // apply compensation + point.x = x/ifx + cx; + point.y = y/ify + cy; + */ } -void Camera::Undistort(vector& points) +void Camera::Undistort(vector& points) { -/* - // focal length - double ifx = 1./cvmGet(&calib_K, 0, 0); - double ify = 1./cvmGet(&calib_K, 1, 1); - - // principal point - double cx = cvmGet(&calib_K, 0, 2); - double cy = cvmGet(&calib_K, 1, 2); - - // distortion coeffs - double* k = calib_D.data.db; - - for(unsigned int i = 0; i < points.size(); i++) - { - // compensate distortion iteratively - double x = (points[i].x - cx)*ifx, y = (points[i].y - cy)*ify, x0 = x, y0 = y; - for(int j = 0; j < 5; j++){ - double r2 = x*x + y*y; - double icdist = 1./(1 + k[0]*r2 + k[1]*r2*r2); - double delta_x = 2*k[2]*x*y + k[3]*(r2 + 2*x*x); - double delta_y = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y; - x = (x0 - delta_x)*icdist; - y = (y0 - delta_y)*icdist; - } - // apply compensation - points[i].x = x/ifx + cx; - points[i].y = y/ify + cy; - } -*/ + /* + // focal length + double ifx = 1./cvmGet(&calib_K, 0, 0); + double ify = 1./cvmGet(&calib_K, 1, 1); + + // principal point + double cx = cvmGet(&calib_K, 0, 2); + double cy = cvmGet(&calib_K, 1, 2); + + // distortion coeffs + double* k = calib_D.data.db; + + for(unsigned int i = 0; i < points.size(); i++) + { + // compensate distortion iteratively + double x = (points[i].x - cx)*ifx, y = (points[i].y - cy)*ify, x0 = x, y0 + = y; for(int j = 0; j < 5; j++){ double r2 = x*x + y*y; double icdist + = 1./(1 + k[0]*r2 + k[1]*r2*r2); double delta_x = 2*k[2]*x*y + k[3]*(r2 + + 2*x*x); double delta_y = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y; x = (x0 - + delta_x)*icdist; y = (y0 - delta_y)*icdist; + } + // apply compensation + points[i].x = x/ifx + cx; + points[i].y = y/ify + cy; + } + */ } -void Camera::Undistort(CvPoint2D32f& point) +void Camera::Undistort(cv::Point2f& point) { -/* - // focal length - double ifx = 1./cvmGet(&calib_K, 0, 0); - double ify = 1./cvmGet(&calib_K, 1, 1); - - // principal point - double cx = cvmGet(&calib_K, 0, 2); - double cy = cvmGet(&calib_K, 1, 2); - - // distortion coeffs - double* k = calib_D.data.db; - - - // compensate distortion iteratively - double x = (point.x - cx)*ifx, y = (point.y - cy)*ify, x0 = x, y0 = y; - for(int j = 0; j < 5; j++){ - double r2 = x*x + y*y; - double icdist = 1./(1 + k[0]*r2 + k[1]*r2*r2); - double delta_x = 2*k[2]*x*y + k[3]*(r2 + 2*x*x); - double delta_y = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y; - x = (x0 - delta_x)*icdist; - y = (y0 - delta_y)*icdist; - } - // apply compensation - point.x = float(x/ifx + cx); - point.y = float(y/ify + cy); -*/ + /* + // focal length + double ifx = 1./cvmGet(&calib_K, 0, 0); + double ify = 1./cvmGet(&calib_K, 1, 1); + + // principal point + double cx = cvmGet(&calib_K, 0, 2); + double cy = cvmGet(&calib_K, 1, 2); + + // distortion coeffs + double* k = calib_D.data.db; + + + // compensate distortion iteratively + double x = (point.x - cx)*ifx, y = (point.y - cy)*ify, x0 = x, y0 = y; + for(int j = 0; j < 5; j++){ + double r2 = x*x + y*y; + double icdist = 1./(1 + k[0]*r2 + k[1]*r2*r2); + double delta_x = 2*k[2]*x*y + k[3]*(r2 + 2*x*x); + double delta_y = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y; + x = (x0 - delta_x)*icdist; + y = (y0 - delta_y)*icdist; + } + // apply compensation + point.x = float(x/ifx + cx); + point.y = float(y/ify + cy); + */ } /* - template - void Undistort(PointType& point) { - // focal length - double ifx = 1./cvmGet(&calib_K, 0, 0); - double ify = 1./cvmGet(&calib_K, 1, 1); - - // principal point - double cx = cvmGet(&calib_K, 0, 2); - double cy = cvmGet(&calib_K, 1, 2); - - // distortion coeffs - double* k = calib_D.data.db; - - // compensate distortion iteratively - double x = (point.x - cx)*ifx, y = (point.y - cy)*ify, x0 = x, y0 = y; - for(int j = 0; j < 5; j++){ - double r2 = x*x + y*y; - double icdist = 1./(1 + k[0]*r2 + k[1]*r2*r2); - double delta_x = 2*k[2]*x*y + k[3]*(r2 + 2*x*x); - double delta_y = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y; - x = (x0 - delta_x)*icdist; - y = (y0 - delta_y)*icdist; - } - // apply compensation - point.x = x/ifx + cx; - point.y = y/ify + cy; - } + template + void Undistort(PointType& point) { + // focal length + double ifx = 1./cvmGet(&calib_K, 0, 0); + double ify = 1./cvmGet(&calib_K, 1, 1); + + // principal point + double cx = cvmGet(&calib_K, 0, 2); + double cy = cvmGet(&calib_K, 1, 2); + + // distortion coeffs + double* k = calib_D.data.db; + + // compensate distortion iteratively + double x = (point.x - cx)*ifx, y = (point.y - cy)*ify, x0 = x, y0 = y; + for(int j = 0; j < 5; j++){ + double r2 = x*x + y*y; + double icdist = 1./(1 + k[0]*r2 + k[1]*r2*r2); + double delta_x = 2*k[2]*x*y + k[3]*(r2 + 2*x*x); + double delta_y = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y; + x = (x0 - delta_x)*icdist; + y = (y0 - delta_y)*icdist; + } + // apply compensation + point.x = x/ifx + cx; + point.y = y/ify + cy; + } */ -void Camera::Distort(vector& points) +void Camera::Distort(vector& points) { -/* - double u0 = cvmGet(&calib_K, 0, 2), v0 = cvmGet(&calib_K, 1, 2); // cx, cy - double fx = cvmGet(&calib_K, 0, 0), fy = cvmGet(&calib_K, 1, 1); - double _fx = 1./fx, _fy = 1./fy; - double* k = calib_D.data.db; - - double k1 = k[0], k2 = k[1]; - double p1 = k[2], p2 = k[3]; - - for(unsigned int i = 0; i < points.size(); i++) - { - // Distort - double y = (points[i].y - v0)*_fy; - double y2 = y*y; - double _2p1y = 2*p1*y; - double _3p1y2 = 3*p1*y2; - double p2y2 = p2*y2; - - double x = (points[i].x - u0)*_fx; - double x2 = x*x; - double r2 = x2 + y2; - double d = 1 + (k1 + k2*r2)*r2; - - points[i].x = fx*(x*(d + _2p1y) + p2y2 + (3*p2)*x2) + u0; - points[i].y = fy*(y*(d + (2*p2)*x) + _3p1y2 + p1*x2) + v0; - } -*/ + /* + double u0 = cvmGet(&calib_K, 0, 2), v0 = cvmGet(&calib_K, 1, 2); // cx, cy + double fx = cvmGet(&calib_K, 0, 0), fy = cvmGet(&calib_K, 1, 1); + double _fx = 1./fx, _fy = 1./fy; + double* k = calib_D.data.db; + + double k1 = k[0], k2 = k[1]; + double p1 = k[2], p2 = k[3]; + + for(unsigned int i = 0; i < points.size(); i++) + { + // Distort + double y = (points[i].y - v0)*_fy; + double y2 = y*y; + double _2p1y = 2*p1*y; + double _3p1y2 = 3*p1*y2; + double p2y2 = p2*y2; + + double x = (points[i].x - u0)*_fx; + double x2 = x*x; + double r2 = x2 + y2; + double d = 1 + (k1 + k2*r2)*r2; + + points[i].x = fx*(x*(d + _2p1y) + p2y2 + (3*p2)*x2) + u0; + points[i].y = fy*(y*(d + (2*p2)*x) + _3p1y2 + p1*x2) + v0; + } + */ } -void Camera::Distort(PointDouble & point) +void Camera::Distort(PointDouble& point) { -/* - double u0 = cvmGet(&calib_K, 0, 2), v0 = cvmGet(&calib_K, 1, 2); // cx, cy - double fx = cvmGet(&calib_K, 0, 0), fy = cvmGet(&calib_K, 1, 1); - double _fx = 1./fx, _fy = 1./fy; - double* k = calib_D.data.db; - - double k1 = k[0], k2 = k[1]; - double p1 = k[2], p2 = k[3]; - - // Distort - double y = (point.y - v0)*_fy; - double y2 = y*y; - double _2p1y = 2*p1*y; - double _3p1y2 = 3*p1*y2; - double p2y2 = p2*y2; - - double x = (point.x - u0)*_fx; - double x2 = x*x; - double r2 = x2 + y2; - double d = 1 + (k1 + k2*r2)*r2; - - point.x = fx*(x*(d + _2p1y) + p2y2 + (3*p2)*x2) + u0; - point.y = fy*(y*(d + (2*p2)*x) + _3p1y2 + p1*x2) + v0; -*/ + /* + double u0 = cvmGet(&calib_K, 0, 2), v0 = cvmGet(&calib_K, 1, 2); // cx, cy + double fx = cvmGet(&calib_K, 0, 0), fy = cvmGet(&calib_K, 1, 1); + double _fx = 1./fx, _fy = 1./fy; + double* k = calib_D.data.db; + + double k1 = k[0], k2 = k[1]; + double p1 = k[2], p2 = k[3]; + + // Distort + double y = (point.y - v0)*_fy; + double y2 = y*y; + double _2p1y = 2*p1*y; + double _3p1y2 = 3*p1*y2; + double p2y2 = p2*y2; + + double x = (point.x - u0)*_fx; + double x2 = x*x; + double r2 = x2 + y2; + double d = 1 + (k1 + k2*r2)*r2; + + point.x = fx*(x*(d + _2p1y) + p2y2 + (3*p2)*x2) + u0; + point.y = fy*(y*(d + (2*p2)*x) + _3p1y2 + p1*x2) + v0; + */ } -void Camera::Distort(CvPoint2D32f & point) +void Camera::Distort(cv::Point2f& point) { -/* - double u0 = cvmGet(&calib_K, 0, 2), v0 = cvmGet(&calib_K, 1, 2); // cx, cy - double fx = cvmGet(&calib_K, 0, 0), fy = cvmGet(&calib_K, 1, 1); - double _fx = 1./fx, _fy = 1./fy; - double* k = calib_D.data.db; - - double k1 = k[0], k2 = k[1]; - double p1 = k[2], p2 = k[3]; - - // Distort - double y = (point.y - v0)*_fy; - double y2 = y*y; - double _2p1y = 2*p1*y; - double _3p1y2 = 3*p1*y2; - double p2y2 = p2*y2; - - double x = (point.x - u0)*_fx; - double x2 = x*x; - double r2 = x2 + y2; - double d = 1 + (k1 + k2*r2)*r2; - - point.x = float(fx*(x*(d + _2p1y) + p2y2 + (3*p2)*x2) + u0); - point.y = float(fy*(y*(d + (2*p2)*x) + _3p1y2 + p1*x2) + v0); -*/ + /* + double u0 = cvmGet(&calib_K, 0, 2), v0 = cvmGet(&calib_K, 1, 2); // cx, cy + double fx = cvmGet(&calib_K, 0, 0), fy = cvmGet(&calib_K, 1, 1); + double _fx = 1./fx, _fy = 1./fy; + double* k = calib_D.data.db; + + double k1 = k[0], k2 = k[1]; + double p1 = k[2], p2 = k[3]; + + // Distort + double y = (point.y - v0)*_fy; + double y2 = y*y; + double _2p1y = 2*p1*y; + double _3p1y2 = 3*p1*y2; + double p2y2 = p2*y2; + + double x = (point.x - u0)*_fx; + double x2 = x*x; + double r2 = x2 + y2; + double d = 1 + (k1 + k2*r2)*r2; + + point.x = float(fx*(x*(d + _2p1y) + p2y2 + (3*p2)*x2) + u0); + point.y = float(fy*(y*(d + (2*p2)*x) + _3p1y2 + p1*x2) + v0); + */ +} + +void Camera::CalcExteriorOrientation(const vector& pw, + const vector& pi, + cv::Mat& rodriques, cv::Mat& tra) const +{ + vector pi2; + for (const auto& point : pi) + { + pi2.push_back(point); + } + + tra.setTo(cv::Scalar::all(0)); + rodriques.setTo(cv::Scalar::all(0)); + cv::solvePnP(pw, pi2, calib_K, cv::Mat(), rodriques, tra, false, + cv::SOLVEPNP_ITERATIVE); +} + +void Camera::CalcExteriorOrientation(const vector& pw, + const vector& pi, + cv::Mat& rodriques, cv::Mat& tra) const +{ + vector pw3; + for (const auto& point : pw) + { + pw3.emplace_back(cv::Point3d(point.x, point.y, 0)); + } + + CalcExteriorOrientation(pw3, pi, rodriques, tra); +} + +void Camera::CalcExteriorOrientation(const vector& pw, + const vector& pi, + Pose* pose) const +{ + cv::Mat ext_rodriques_mat = cv::Mat(3, 1, CV_64F); + cv::Mat ext_translate_mat = cv::Mat(3, 1, CV_64F); + CalcExteriorOrientation(pw, pi, ext_rodriques_mat, ext_translate_mat); + pose->SetRodriques(ext_rodriques_mat); + pose->SetTranslation(ext_translate_mat); +} + +void Camera::ProjectPoints(vector& pw, Pose* pose, + vector& pi) const +{ + cv::Mat ext_rodriques_mat = cv::Mat(3, 1, CV_64F); + cv::Mat ext_translate_mat = cv::Mat(3, 1, CV_64F); + pose->GetRodriques(ext_rodriques_mat); + pose->GetTranslation(ext_translate_mat); + cv::Mat object_points = cv::Mat((int)pw.size(), 1, CV_32FC3); + cv::Mat image_points = cv::Mat((int)pi.size(), 1, CV_32FC2); + for (size_t i = 0; i < pw.size(); i++) + { + object_points.at(i * 3 + 0) = (float)pw[i].x; + object_points.at(i * 3 + 1) = (float)pw[i].y; + object_points.at(i * 3 + 2) = (float)pw[i].z; + } + cv::projectPoints(object_points, ext_translate_mat, ext_translate_mat, + calib_K, calib_D, image_points); + for (size_t i = 0; i < pw.size(); i++) + { + pi[i].x = image_points.at(i * 2 + 0); + pi[i].y = image_points.at(i * 2 + 1); + } + object_points.release(); + image_points.release(); +} + +void Camera::ProjectPoints(const cv::Mat& object_points, + const cv::Mat& rotation_vector, + const cv::Mat& translation_vector, + cv::Mat& image_points) const +{ + // Project points + cv::projectPoints(object_points, rotation_vector, translation_vector, calib_K, + calib_D, image_points); +} + +void Camera::ProjectPoints(const cv::Mat& object_points, const Pose* pose, + cv::Mat& image_points) const +{ + cv::Mat ext_rodriques_mat = cv::Mat(3, 1, CV_64F); + cv::Mat ext_translate_mat = cv::Mat(3, 1, CV_64F); + pose->GetRodriques(ext_rodriques_mat); + pose->GetTranslation(ext_translate_mat); + cv::projectPoints(object_points, ext_rodriques_mat, ext_translate_mat, + calib_K, calib_D, image_points); +} + +void Camera::ProjectPoints(const cv::Mat& object_points, double gl[16], + cv::Mat& image_points) const +{ + double glm[4][4] = { + gl[0], gl[4], gl[8], gl[12], gl[1], gl[5], gl[9], gl[13], + gl[2], gl[6], gl[10], gl[14], gl[3], gl[7], gl[11], gl[15], + }; + cv::Mat glm_mat = cv::Mat(4, 4, CV_64F, glm); + + // For some reason we need to mirror both y and z ??? + double cv_mul_data[4][4]; + cv::Mat cv_mul = cv::Mat(4, 4, CV_64F, cv_mul_data); + cv::setIdentity(cv_mul); + cv_mul.at(1, 1) = -1; + cv_mul.at(2, 2) = -1; + glm_mat = cv_mul * glm_mat; + + // Rotation + Rotation r; + r.SetMatrix(glm_mat); + double rod[3]; + cv::Mat rod_mat = cv::Mat(3, 1, CV_64F, rod); + r.GetRodriques(rod_mat); + // Translation + double tra[3] = { glm[0][3], glm[1][3], glm[2][3] }; + cv::Mat tra_mat = cv::Mat(3, 1, CV_64F, tra); + // Project points + ProjectPoints(object_points, rod_mat, tra_mat, image_points); +} + +void Camera::ProjectPoint(const cv::Point3d& pw, const Pose* pose, + cv::Point2d& pi) const +{ + float object_points_data[3] = { (float)pw.x, (float)pw.y, (float)pw.z }; + float image_points_data[2] = { 0 }; + cv::Mat object_points = cv::Mat(1, 1, CV_32FC3, object_points_data); + cv::Mat image_points = cv::Mat(1, 1, CV_32FC2, image_points_data); + ProjectPoints(object_points, pose, image_points); + pi.x = image_points.at(0); + pi.y = image_points.at(1); +} + +void Camera::ProjectPoint(const cv::Point3f& pw, const Pose* pose, + cv::Point2f& pi) const +{ + float object_points_data[3] = { (float)pw.x, (float)pw.y, (float)pw.z }; + float image_points_data[2] = { 0 }; + cv::Mat object_points = cv::Mat(1, 1, CV_32FC3, object_points_data); + cv::Mat image_points = cv::Mat(1, 1, CV_32FC2, image_points_data); + ProjectPoints(object_points, pose, image_points); + pi.x = image_points.at(0); + pi.y = image_points.at(1); +} + +Homography::Homography() +{ + H = cv::Mat(3, 3, CV_64F); +} + +void Homography::Find(const vector& pw, + const vector& pi) +{ + assert(pw.size() == pi.size()); + int size = (int)pi.size(); + + cv::Point2d srcp[size]; + cv::Point2d dstp[size]; + + for (int i = 0; i < size; ++i) + { + srcp[i].x = pw[i].x; + srcp[i].y = pw[i].y; + + dstp[i].x = pi[i].x; + dstp[i].y = pi[i].y; + } + + cv::Mat src_pts, dst_pts; + dst_pts = cv::Mat(1, size, CV_64FC2, dstp); + src_pts = cv::Mat(1, size, CV_64FC2, srcp); + + cv::Mat tmp = cv::findHomography(src_pts, dst_pts); + if (tmp.elemSize() > 0) + { + H = tmp; + } +} + +void Homography::ProjectPoints(const vector& from, + vector& to) const +{ + int size = (int)from.size(); + + cv::Point3d srcp[size]; + + for (int i = 0; i < size; ++i) + { + srcp[i].x = from[i].x; + srcp[i].y = from[i].y; + srcp[i].z = 1; + } + + cv::Point3d dstp[size]; + + cv::Mat src_pts, dst_pts; + src_pts = cv::Mat(1, size, CV_64FC3, srcp); + dst_pts = cv::Mat(1, size, CV_64FC3, dstp); + + if (! H.empty()) { + cv::transform(src_pts, dst_pts, H); + + to.clear(); + for (int i = 0; i < size; ++i) + { + PointDouble pt; + pt.x = dstp[i].x / dstp[i].z; + pt.y = dstp[i].y / dstp[i].z; + + to.push_back(pt); + } + } } -void Camera::CalcExteriorOrientation(vector& pw, vector& pi, - Pose *pose) -{ - double ext_rodriques[3]; - double ext_translate[3]; - CvMat ext_rodriques_mat = cvMat(3, 1, CV_64F, ext_rodriques); - CvMat ext_translate_mat = cvMat(3, 1, CV_64F, ext_translate); - CvMat *object_points = cvCreateMat((int)pw.size(), 1, CV_32FC3); - CvMat *image_points = cvCreateMat((int)pi.size(), 1, CV_32FC2); - for (size_t i=0; idata.fl[i*3+0] = (float)pw[i].x; - object_points->data.fl[i*3+1] = (float)pw[i].y; - object_points->data.fl[i*3+2] = (float)pw[i].z; - image_points->data.fl[i*2+0] = (float)pi[i].x; - image_points->data.fl[i*2+1] = (float)pi[i].y; - } - //cvFindExtrinsicCameraParams2(object_points, image_points, &calib_K, &calib_D, &ext_rodriques_mat, &ext_translate_mat); - cvFindExtrinsicCameraParams2(object_points, image_points, &calib_K, NULL, &ext_rodriques_mat, &ext_translate_mat); - pose->SetRodriques(&ext_rodriques_mat); - pose->SetTranslation(&ext_translate_mat); - cvReleaseMat(&object_points); - cvReleaseMat(&image_points); -} - -void Camera::CalcExteriorOrientation(vector& pw, vector& pi, - CvMat *rodriques, CvMat *tra) -{ - //assert(pw.size() == pi.size()); - - int size = (int)pi.size(); - - CvPoint3D64f *world_pts = new CvPoint3D64f[size]; - CvPoint2D64f *image_pts = new CvPoint2D64f[size]; - - for(int i = 0; i < size; i++){ - world_pts[i].x = pw[i].x; - world_pts[i].y = pw[i].y; - world_pts[i].z = pw[i].z; - // flip image points! Why??? - //image_pts[i].x = x_res - pi[i].x; - //image_pts[i].y = y_res - pi[i].y; - image_pts[i].x = pi[i].x; - image_pts[i].y = pi[i].y; - } - - double rot[3]; // rotation vector - CvMat world_mat, image_mat, rot_vec; - cvInitMatHeader(&world_mat, size, 1, CV_64FC3, world_pts); - cvInitMatHeader(&image_mat, size, 1, CV_64FC2, image_pts); - cvInitMatHeader(&rot_vec, 3, 1, CV_64FC1, rot); - - cvZero(tra); - //cvmodFindExtrinsicCameraParams2(&world_mat, &image_mat, &calib_K, &calib_D, rodriques, tra, error); - cvFindExtrinsicCameraParams2(&world_mat, &image_mat, &calib_K, &calib_D, rodriques, tra); - - delete[] world_pts; - delete[] image_pts; -} - -void Camera::CalcExteriorOrientation(vector& pw, vector& pi, - CvMat *rodriques, CvMat *tra) -{ - //assert(pw.size() == pi.size()); - int size = (int)pi.size(); - - vector pw3; - pw3.resize(size); - for (int i=0; i& pw, vector& pi, Pose *pose) -{ - double ext_rodriques[3]; - double ext_translate[3]; - CvMat ext_rodriques_mat = cvMat(3, 1, CV_64F, ext_rodriques); - CvMat ext_translate_mat = cvMat(3, 1, CV_64F, ext_translate); - CalcExteriorOrientation(pw, pi, &ext_rodriques_mat, &ext_translate_mat); - pose->SetRodriques(&ext_rodriques_mat); - pose->SetTranslation(&ext_translate_mat); -} - -bool Camera::CalcExteriorOrientation(const CvMat* object_points, CvMat* image_points, CvMat *rodriques, CvMat *tra) { - cvFindExtrinsicCameraParams2(object_points, image_points, &calib_K, &calib_D, rodriques, tra); - return true; -} - -bool Camera::CalcExteriorOrientation(const CvMat* object_points, CvMat* image_points, Pose *pose) { - double ext_rodriques[3]; - double ext_translate[3]; - CvMat ext_rodriques_mat = cvMat(3, 1, CV_64F, ext_rodriques); - CvMat ext_translate_mat = cvMat(3, 1, CV_64F, ext_translate); - bool ret = CalcExteriorOrientation(object_points, image_points, &ext_rodriques_mat, &ext_translate_mat); - pose->SetRodriques(&ext_rodriques_mat); - pose->SetTranslation(&ext_translate_mat); - return ret; -} - -void Camera::ProjectPoints(vector& pw, Pose *pose, vector& pi) const { - double ext_rodriques[3]; - double ext_translate[3]; - CvMat ext_rodriques_mat = cvMat(3, 1, CV_64F, ext_rodriques); - CvMat ext_translate_mat = cvMat(3, 1, CV_64F, ext_translate); - pose->GetRodriques(&ext_rodriques_mat); - pose->GetTranslation(&ext_translate_mat); - CvMat *object_points = cvCreateMat((int)pw.size(), 1, CV_32FC3); - CvMat *image_points = cvCreateMat((int)pi.size(), 1, CV_32FC2); - for (size_t i=0; idata.fl[i*3+0] = (float)pw[i].x; - object_points->data.fl[i*3+1] = (float)pw[i].y; - object_points->data.fl[i*3+2] = (float)pw[i].z; - } - cvProjectPoints2(object_points, &ext_rodriques_mat, &ext_translate_mat, &calib_K, &calib_D, image_points); - for (size_t i=0; idata.fl[i*2+0]; - pi[i].y = image_points->data.fl[i*2+1]; - } - cvReleaseMat(&object_points); - cvReleaseMat(&image_points); -} - -void Camera::ProjectPoints(const CvMat* object_points, const CvMat* rotation_vector, - const CvMat* translation_vector, CvMat* image_points) const -{ - // Project points - cvProjectPoints2(object_points, rotation_vector, translation_vector, &calib_K, &calib_D, image_points); -} - -void Camera::ProjectPoints(const CvMat* object_points, const Pose* pose, CvMat* image_points) const -{ - double ext_rodriques[3]; - double ext_translate[3]; - CvMat ext_rodriques_mat = cvMat(3, 1, CV_64F, ext_rodriques); - CvMat ext_translate_mat = cvMat(3, 1, CV_64F, ext_translate); - pose->GetRodriques(&ext_rodriques_mat); - pose->GetTranslation(&ext_translate_mat); - cvProjectPoints2(object_points, &ext_rodriques_mat, &ext_translate_mat, &calib_K, &calib_D, image_points); -} - -void Camera::ProjectPoints(const CvMat* object_points, double gl[16], CvMat* image_points) const -{ - double glm[4][4] = { - gl[0], gl[4], gl[8], gl[12], - gl[1], gl[5], gl[9], gl[13], - gl[2], gl[6], gl[10], gl[14], - gl[3], gl[7], gl[11], gl[15], - }; - CvMat glm_mat = cvMat(4, 4, CV_64F, glm); - - // For some reason we need to mirror both y and z ??? - double cv_mul_data[4][4]; - CvMat cv_mul = cvMat(4, 4, CV_64F, cv_mul_data); - cvSetIdentity(&cv_mul); - cvmSet(&cv_mul, 1, 1, -1); - cvmSet(&cv_mul, 2, 2, -1); - cvMatMul(&cv_mul, &glm_mat, &glm_mat); - - // Rotation - Rotation r; - r.SetMatrix(&glm_mat); - double rod[3]; - CvMat rod_mat=cvMat(3, 1, CV_64F, rod); - r.GetRodriques(&rod_mat); - // Translation - double tra[3] = { glm[0][3], glm[1][3], glm[2][3] }; - CvMat tra_mat = cvMat(3, 1, CV_64F, tra); - // Project points - ProjectPoints(object_points, &rod_mat, &tra_mat, image_points); -} - -void Camera::ProjectPoint(const CvPoint3D64f pw, const Pose *pose, CvPoint2D64f &pi) const { - float object_points_data[3] = {(float)pw.x, (float)pw.y, (float)pw.z}; - float image_points_data[2] = {0}; - CvMat object_points = cvMat(1, 1, CV_32FC3, object_points_data); - CvMat image_points = cvMat(1, 1, CV_32FC2, image_points_data); - ProjectPoints(&object_points, pose, &image_points); - pi.x = image_points.data.fl[0]; - pi.y = image_points.data.fl[1]; -} - -void Camera::ProjectPoint(const CvPoint3D32f pw, const Pose *pose, CvPoint2D32f &pi) const { - float object_points_data[3] = {(float)pw.x, (float)pw.y, (float)pw.z}; - float image_points_data[2] = {0}; - CvMat object_points = cvMat(1, 1, CV_32FC3, object_points_data); - CvMat image_points = cvMat(1, 1, CV_32FC2, image_points_data); - ProjectPoints(&object_points, pose, &image_points); - pi.x = image_points.data.fl[0]; - pi.y = image_points.data.fl[1]; -} - -Homography::Homography() { - cvInitMatHeader(&H, 3, 3, CV_64F, H_data); -} - -void Homography::Find(const vector& pw, const vector& pi) -{ - assert(pw.size() == pi.size()); - int size = (int)pi.size(); - - CvPoint2D64f *srcp = new CvPoint2D64f[size]; - CvPoint2D64f *dstp = new CvPoint2D64f[size]; - - for(int i = 0; i < size; ++i){ - srcp[i].x = pw[i].x; - srcp[i].y = pw[i].y; - - dstp[i].x = pi[i].x; - dstp[i].y = pi[i].y; - } - - CvMat src_pts, dst_pts; - cvInitMatHeader(&dst_pts, 1, size, CV_64FC2, dstp); - cvInitMatHeader(&src_pts, 1, size, CV_64FC2, srcp); - -#ifdef OPENCV11 - cvFindHomography(&src_pts, &dst_pts, &H, 0, 0, 0); -#else - cvFindHomography(&src_pts, &dst_pts, &H); -#endif - - delete[] srcp; - delete[] dstp; -} - -void Homography::ProjectPoints(const vector& from, vector& to) -{ - int size = (int)from.size(); - - CvPoint3D64f *srcp = new CvPoint3D64f[size]; - - for(int i = 0; i < size; ++i){ - srcp[i].x = from[i].x; - srcp[i].y = from[i].y; - srcp[i].z = 1; - } - - CvPoint3D64f *dstp = new CvPoint3D64f[size]; - - CvMat src_pts, dst_pts; - cvInitMatHeader(&src_pts, 1, size, CV_64FC3, srcp); - cvInitMatHeader(&dst_pts, 1, size, CV_64FC3, dstp); - - cvTransform(&src_pts, &dst_pts, &H); - - to.clear(); - for(int i = 0; i < size; ++i) - { - PointDouble pt; - pt.x = dstp[i].x / dstp[i].z; - pt.y = dstp[i].y / dstp[i].z; - - to.push_back(pt); - } - - delete[] srcp; - delete[] dstp; -} - -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/CaptureDevice.cpp b/ar_track_alvar/src/CaptureDevice.cpp index 4c006e1..ce5a0d6 100644 --- a/ar_track_alvar/src/CaptureDevice.cpp +++ b/ar_track_alvar/src/CaptureDevice.cpp @@ -25,12 +25,12 @@ #include -namespace alvar { - -CaptureDevice::CaptureDevice(const std::string captureType, const std::string id, const std::string description) - : mCaptureType(captureType) - , mId(id) - , mDescription(description) +namespace alvar +{ +CaptureDevice::CaptureDevice(const std::string captureType, + const std::string id, + const std::string description) + : mCaptureType(captureType), mId(id), mDescription(description) { } @@ -40,24 +40,24 @@ CaptureDevice::~CaptureDevice() std::string CaptureDevice::captureType() const { - return mCaptureType; + return mCaptureType; } std::string CaptureDevice::id() const { - return mId; + return mId; } std::string CaptureDevice::description() const { - return mDescription; + return mDescription; } std::string CaptureDevice::uniqueName() const { - std::stringstream name; - name << captureType() << "_" << id(); - return name.str(); + std::stringstream name; + name << captureType() << "_" << id(); + return name.str(); } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/CaptureFactory.cpp b/ar_track_alvar/src/CaptureFactory.cpp index 87105c6..ff73447 100644 --- a/ar_track_alvar/src/CaptureFactory.cpp +++ b/ar_track_alvar/src/CaptureFactory.cpp @@ -27,244 +27,281 @@ #include "ar_track_alvar/CapturePlugin.h" #include "ar_track_alvar/DirectoryIterator.h" -namespace alvar { - +namespace alvar +{ CaptureFactoryPrivate::CaptureFactoryPrivate() - : mPluginPaths() - , mPluginPrefix() - , mPluginPostfix() - , mLoadedAllPlugins(false) - , mPluginMap() - , mCapturePluginMap() + : mPluginPaths() + , mPluginPrefix() + , mPluginPostfix() + , mLoadedAllPlugins(false) + , mPluginMap() + , mCapturePluginMap() { - setupPluginPaths(); + setupPluginPaths(); - mPluginPrefix = pluginPrefix(); - mPluginPrefix.append("alvarcaptureplugin"); + mPluginPrefix = pluginPrefix(); + mPluginPrefix.append("alvarcaptureplugin"); - mPluginPostfix.append(ALVAR_VERSION_NODOTS); - #if _DEBUG - mPluginPostfix.append("d"); - #endif - mPluginPostfix.append("."); - mPluginPostfix.append(pluginExtension()); + mPluginPostfix.append(ALVAR_VERSION_NODOTS); +#if _DEBUG + mPluginPostfix.append("d"); +#endif + mPluginPostfix.append("."); + mPluginPostfix.append(pluginExtension()); } CaptureFactoryPrivate::~CaptureFactoryPrivate() { - for (CapturePluginMap::iterator itr = mCapturePluginMap.begin(); itr != mCapturePluginMap.end(); itr++) { - delete itr->second; - } - mCapturePluginMap.clear(); - mPluginMap.clear(); + for (CapturePluginMap::iterator itr = mCapturePluginMap.begin(); + itr != mCapturePluginMap.end(); itr++) + { + delete itr->second; + } + mCapturePluginMap.clear(); + mPluginMap.clear(); } void CaptureFactoryPrivate::loadPlugins() { - // ensure that plugins have not already been loaded - if (mLoadedAllPlugins) { - return; + // ensure that plugins have not already been loaded + if (mLoadedAllPlugins) + { + return; + } + + // iterate over search paths + for (PluginPathsVector::iterator itr = mPluginPaths.begin(); + itr != mPluginPaths.end(); ++itr) + { + DirectoryIterator directory(*itr); + + // iterate over entries in current path + while (directory.hasNext()) + { + std::string entry = directory.next(); + + // verify that filename matches the plugin convention + int prefixIndex = entry.find(mPluginPrefix); + int postfixIndex = entry.rfind(mPluginPostfix); + if (prefixIndex == -1 || postfixIndex == -1) + { + continue; + } + + // load the actual plugin + entry = entry.substr(mPluginPrefix.size(), + postfixIndex - mPluginPrefix.size()); + loadPlugin(entry, directory.currentPath()); } + } - // iterate over search paths - for (PluginPathsVector::iterator itr = mPluginPaths.begin(); itr != mPluginPaths.end(); ++itr) { - DirectoryIterator directory(*itr); - - // iterate over entries in current path - while (directory.hasNext()) { - std::string entry = directory.next(); - - // verify that filename matches the plugin convention - int prefixIndex = entry.find(mPluginPrefix); - int postfixIndex = entry.rfind(mPluginPostfix); - if (prefixIndex == -1 || postfixIndex == -1) { - continue; - } - - // load the actual plugin - entry = entry.substr(mPluginPrefix.size(), postfixIndex - mPluginPrefix.size()); - loadPlugin(entry, directory.currentPath()); - } - } - - // this should only be done once - mLoadedAllPlugins = true; + // this should only be done once + mLoadedAllPlugins = true; } -void CaptureFactoryPrivate::loadPlugin(const std::string &captureType) +void CaptureFactoryPrivate::loadPlugin(const std::string& captureType) { - // ensure plugin is not alredy loaded - if (mPluginMap.find(captureType) != mPluginMap.end()) { - return; - } - - // iterate over search paths - for (PluginPathsVector::iterator itr = mPluginPaths.begin(); itr != mPluginPaths.end(); ++itr) { - DirectoryIterator directory(*itr); - - // iterate over entries in current path - while (directory.hasNext()) { - std::string entry = directory.next(); - - // verify that filename matches the plugin convention - int prefixIndex = entry.find(mPluginPrefix); - int postfixIndex = entry.rfind(mPluginPostfix); - if (prefixIndex == -1 || postfixIndex == -1) { - continue; - } - - // verify that filename matches capture type - entry = entry.substr(mPluginPrefix.size(), postfixIndex - mPluginPrefix.size()); - if (entry != captureType) { - continue; - } - - // load the actual plugin - loadPlugin(entry, directory.currentPath()); - - // stop searching - break; - } + // ensure plugin is not alredy loaded + if (mPluginMap.find(captureType) != mPluginMap.end()) + { + return; + } + + // iterate over search paths + for (PluginPathsVector::iterator itr = mPluginPaths.begin(); + itr != mPluginPaths.end(); ++itr) + { + DirectoryIterator directory(*itr); + + // iterate over entries in current path + while (directory.hasNext()) + { + std::string entry = directory.next(); + + // verify that filename matches the plugin convention + int prefixIndex = entry.find(mPluginPrefix); + int postfixIndex = entry.rfind(mPluginPostfix); + if (prefixIndex == -1 || postfixIndex == -1) + { + continue; + } + + // verify that filename matches capture type + entry = entry.substr(mPluginPrefix.size(), + postfixIndex - mPluginPrefix.size()); + if (entry != captureType) + { + continue; + } + + // load the actual plugin + loadPlugin(entry, directory.currentPath()); + + // stop searching + break; } + } } -void CaptureFactoryPrivate::loadPlugin(const std::string &captureType, const std::string &filename) +void CaptureFactoryPrivate::loadPlugin(const std::string& captureType, + const std::string& filename) { - // ensure plugin is not alredy loaded - if (mPluginMap.find(captureType) != mPluginMap.end()) { - return; + // ensure plugin is not alredy loaded + if (mPluginMap.find(captureType) != mPluginMap.end()) + { + return; + } + + try + { + // create and load the plugin + Plugin plugin(filename); + + // register the plugin + // for this to work, each plugin must export the following method + // extern "C" __declspec(dllexport) void registerPlugin(const std::string + // &captureType, alvar::CapturePlugin *capturePlugin); + // which creates a new CapturePlugin object: capturePlugin = new + // MyCapturePlugin(captureType); + typedef void (*RegisterPlugin)(const std::string& captureType, + CapturePlugin*& capturePlugin); + RegisterPlugin registerPlugin = (RegisterPlugin)plugin.resolve("registerPlu" + "gin"); + CapturePlugin* capturePlugin = NULL; + if (registerPlugin) + { + registerPlugin(captureType, capturePlugin); } - try { - // create and load the plugin - Plugin plugin(filename); - - // register the plugin - // for this to work, each plugin must export the following method - // extern "C" __declspec(dllexport) void registerPlugin(const std::string &captureType, alvar::CapturePlugin *capturePlugin); - // which creates a new CapturePlugin object: capturePlugin = new MyCapturePlugin(captureType); - typedef void (*RegisterPlugin)(const std::string &captureType, CapturePlugin *&capturePlugin); - RegisterPlugin registerPlugin = (RegisterPlugin)plugin.resolve("registerPlugin"); - CapturePlugin *capturePlugin = NULL; - if (registerPlugin) { - registerPlugin(captureType, capturePlugin); - } - - // return if plugin did not create it's capture plugin - if (capturePlugin == NULL) { - return; - } - - // insert the plugin and capture plugin into maps - mPluginMap.insert(PluginMap::value_type(captureType, plugin)); - mCapturePluginMap.insert(CapturePluginMap::value_type(captureType, capturePlugin)); - } - catch (AlvarException e) { - // if anything fails, simply ignore it... - #if defined(_DEBUG) || !defined(NDEBUG) - std::cout << e.what() << std::endl; - #endif + // return if plugin did not create it's capture plugin + if (capturePlugin == NULL) + { + return; } + + // insert the plugin and capture plugin into maps + mPluginMap.insert(PluginMap::value_type(captureType, plugin)); + mCapturePluginMap.insert( + CapturePluginMap::value_type(captureType, capturePlugin)); + } + catch (AlvarException e) + { +// if anything fails, simply ignore it... +#if defined(_DEBUG) || !defined(NDEBUG) + std::cout << e.what() << std::endl; +#endif + } } -CapturePlugin *CaptureFactoryPrivate::getPlugin(const std::string &captureType) +CapturePlugin* CaptureFactoryPrivate::getPlugin(const std::string& captureType) { - // find CapturePlugin implementation according to capture type - CapturePluginMap::iterator itr; + // find CapturePlugin implementation according to capture type + CapturePluginMap::iterator itr; + itr = mCapturePluginMap.find(captureType); + + // if not found, attempt to load plugin + if (itr == mCapturePluginMap.end()) + { + loadPlugin(captureType); itr = mCapturePluginMap.find(captureType); - - // if not found, attempt to load plugin - if (itr == mCapturePluginMap.end()) { - loadPlugin(captureType); - itr = mCapturePluginMap.find(captureType); - } - - // return CapturePlugin implementation if found - CapturePlugin *capturePlugin = NULL; - if (itr != mCapturePluginMap.end()) { - capturePlugin = itr->second; - } - return capturePlugin; + } + + // return CapturePlugin implementation if found + CapturePlugin* capturePlugin = NULL; + if (itr != mCapturePluginMap.end()) + { + capturePlugin = itr->second; + } + return capturePlugin; } - // static class variables instantiation for singleton implementation -CaptureFactory *CaptureFactory::mInstance = NULL; +CaptureFactory* CaptureFactory::mInstance = NULL; Mutex CaptureFactory::mMutex; CaptureFactory::CaptureFactoryDestroyer CaptureFactory::mDestroyer; -CaptureFactory *CaptureFactory::instance() +CaptureFactory* CaptureFactory::instance() { - // do not use double-checked locking - // http://www.aristeia.com/Papers/DDJ_Jul_Aug_2004_revised.pdf - // use a destroyer class to properly clean up resources - // http://www.research.ibm.com/designpatterns/pubs/ph-jun96.txt - Lock lock(&mMutex); - if (mInstance == NULL) { - mInstance = new CaptureFactory(); - mDestroyer.set(mInstance); - } - return mInstance; + // do not use double-checked locking + // http://www.aristeia.com/Papers/DDJ_Jul_Aug_2004_revised.pdf + // use a destroyer class to properly clean up resources + // http://www.research.ibm.com/designpatterns/pubs/ph-jun96.txt + Lock lock(&mMutex); + if (mInstance == NULL) + { + mInstance = new CaptureFactory(); + mDestroyer.set(mInstance); + } + return mInstance; } -CaptureFactory::CaptureFactory() - : d(new CaptureFactoryPrivate()) +CaptureFactory::CaptureFactory() : d(new CaptureFactoryPrivate()) { } CaptureFactory::~CaptureFactory() { - delete d; + delete d; } CaptureFactory::CapturePluginVector CaptureFactory::enumeratePlugins() { - // load all plugins - d->loadPlugins(); - - // return the available plugins as a vector of plugin names - CaptureFactory::CapturePluginVector keys; - for (CaptureFactoryPrivate::PluginMap::iterator itr = d->mPluginMap.begin(); itr != d->mPluginMap.end(); ++itr) { - keys.push_back(itr->first); - } - - return keys; + // load all plugins + d->loadPlugins(); + + // return the available plugins as a vector of plugin names + CaptureFactory::CapturePluginVector keys; + for (CaptureFactoryPrivate::PluginMap::iterator itr = d->mPluginMap.begin(); + itr != d->mPluginMap.end(); ++itr) + { + keys.push_back(itr->first); + } + + return keys; } -CaptureFactory::CaptureDeviceVector CaptureFactory::enumerateDevices(const std::string &captureType) +CaptureFactory::CaptureDeviceVector +CaptureFactory::enumerateDevices(const std::string& captureType) { - CaptureDeviceVector devices; - - // load all plugins and enumerate their devices - if (captureType.empty()) { - d->loadPlugins(); - for (CaptureFactoryPrivate::CapturePluginMap::iterator itr = d->mCapturePluginMap.begin(); itr != d->mCapturePluginMap.end(); ++itr) { - CaptureDeviceVector pluginDevices = itr->second->enumerateDevices(); - devices.insert(devices.end(), pluginDevices.begin(), pluginDevices.end()); - } + CaptureDeviceVector devices; + + // load all plugins and enumerate their devices + if (captureType.empty()) + { + d->loadPlugins(); + for (CaptureFactoryPrivate::CapturePluginMap::iterator itr = + d->mCapturePluginMap.begin(); + itr != d->mCapturePluginMap.end(); ++itr) + { + CaptureDeviceVector pluginDevices = itr->second->enumerateDevices(); + devices.insert(devices.end(), pluginDevices.begin(), pluginDevices.end()); } - // only enumerate the devices of one plugin - else { - CapturePlugin *capturePlugin = d->getPlugin(captureType); - if (capturePlugin) { - devices = capturePlugin->enumerateDevices(); - } + } + // only enumerate the devices of one plugin + else + { + CapturePlugin* capturePlugin = d->getPlugin(captureType); + if (capturePlugin) + { + devices = capturePlugin->enumerateDevices(); } + } - return devices; + return devices; } -Capture *CaptureFactory::createCapture(const CaptureDevice captureDevice) +Capture* CaptureFactory::createCapture(const CaptureDevice captureDevice) { - // get CapturePlugin implementation - CapturePlugin *capturePlugin = d->getPlugin(captureDevice.captureType()); - - // create Capture implementation and return - Capture *capture = NULL; - if (capturePlugin) { - capture = capturePlugin->createCapture(captureDevice); - } - return capture; + // get CapturePlugin implementation + CapturePlugin* capturePlugin = d->getPlugin(captureDevice.captureType()); + + // create Capture implementation and return + Capture* capture = NULL; + if (capturePlugin) + { + capture = capturePlugin->createCapture(captureDevice); + } + return capture; } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/CaptureFactory_unix.cpp b/ar_track_alvar/src/CaptureFactory_unix.cpp index 4b0b1ee..e376d82 100644 --- a/ar_track_alvar/src/CaptureFactory_unix.cpp +++ b/ar_track_alvar/src/CaptureFactory_unix.cpp @@ -24,69 +24,78 @@ #include "ar_track_alvar/CaptureFactory_private.h" #include -#include // for readlink() - -namespace alvar { +#include // for readlink() +namespace alvar +{ void CaptureFactoryPrivate::setupPluginPaths() { - // application path and default plugin path - const int bufferSize = 4096; - char applicationBuffer[bufferSize]; - int count = readlink("/proc/self/exe", applicationBuffer, bufferSize); - if (count != 0 && count < bufferSize) { - std::string applicationPath(applicationBuffer, count); - applicationPath = std::string(applicationPath, 0, applicationPath.find_last_of("/")); - mPluginPaths.push_back(applicationPath); - mPluginPaths.push_back(applicationPath + "/alvarplugins"); - } - - // ALVAR library path - parseEnvironmentVariable(std::string("ALVAR_LIBRARY_PATH")); + // application path and default plugin path + const int bufferSize = 4096; + char applicationBuffer[bufferSize]; + int count = readlink("/proc/self/exe", applicationBuffer, bufferSize); + if (count != 0 && count < bufferSize) + { + std::string applicationPath(applicationBuffer, count); + applicationPath = + std::string(applicationPath, 0, applicationPath.find_last_of("/")); + mPluginPaths.push_back(applicationPath); + mPluginPaths.push_back(applicationPath + "/alvarplugins"); + } - // ALVAR plugin path - parseEnvironmentVariable(std::string("ALVAR_PLUGIN_PATH")); + // ALVAR library path + parseEnvironmentVariable(std::string("ALVAR_LIBRARY_PATH")); + + // ALVAR plugin path + parseEnvironmentVariable(std::string("ALVAR_PLUGIN_PATH")); } -void CaptureFactoryPrivate::parseEnvironmentVariable(const std::string &variable) +void CaptureFactoryPrivate::parseEnvironmentVariable( + const std::string& variable) { - // acquire environment variable - char *buffer; - std::string path(""); - buffer = getenv(variable.data()); - if (buffer) { - path = std::string(buffer); + // acquire environment variable + char* buffer; + std::string path(""); + buffer = getenv(variable.data()); + if (buffer) + { + path = std::string(buffer); + } + + // tokenize paths + char delimitor = ':'; + if (!path.empty()) + { + std::string::size_type start = 0; + std::string::size_type end = 0; + while ((end = path.find_first_of(delimitor, start)) != std::string::npos) + { + std::string tmp(path, start, end - start); + if (!tmp.empty()) + { + mPluginPaths.push_back(tmp); + } + start = end + 1; } - - // tokenize paths - char delimitor = ':'; - if (!path.empty()) { - std::string::size_type start = 0; - std::string::size_type end = 0; - while ((end = path.find_first_of(delimitor, start)) != std::string::npos) { - std::string tmp(path, start, end - start); - if (!tmp.empty()) { - mPluginPaths.push_back(tmp); - } - start = end + 1; - } - if (start != path.size()) { - std::string tmp(path, start, std::string::npos); - if (!tmp.empty()) { - mPluginPaths.push_back(tmp); - } - } + if (start != path.size()) + { + std::string tmp(path, start, std::string::npos); + if (!tmp.empty()) + { + mPluginPaths.push_back(tmp); + } } + } } std::string CaptureFactoryPrivate::pluginPrefix() { - return std::string("lib"); + return std::string("lib"); } std::string CaptureFactoryPrivate::pluginExtension() { - return std::string("so"); + return std::string("so"); } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/CaptureFactory_win.cpp b/ar_track_alvar/src/CaptureFactory_win.cpp index dddce22..fd99436 100644 --- a/ar_track_alvar/src/CaptureFactory_win.cpp +++ b/ar_track_alvar/src/CaptureFactory_win.cpp @@ -25,78 +25,88 @@ #include -namespace alvar { - +namespace alvar +{ void CaptureFactoryPrivate::setupPluginPaths() { - // application path and default plugin path - const DWORD bufferSize = 4096; - char applicationBuffer[bufferSize]; - DWORD count = GetModuleFileName(NULL, applicationBuffer, bufferSize); - if (count != 0 && count < bufferSize) { - std::string applicationPath(applicationBuffer, count); - applicationPath = std::string(applicationPath, 0, applicationPath.find_last_of("\\")); - mPluginPaths.push_back(applicationPath); - mPluginPaths.push_back(applicationPath + "\\alvarplugins"); - } + // application path and default plugin path + const DWORD bufferSize = 4096; + char applicationBuffer[bufferSize]; + DWORD count = GetModuleFileName(NULL, applicationBuffer, bufferSize); + if (count != 0 && count < bufferSize) + { + std::string applicationPath(applicationBuffer, count); + applicationPath = + std::string(applicationPath, 0, applicationPath.find_last_of("\\")); + mPluginPaths.push_back(applicationPath); + mPluginPaths.push_back(applicationPath + "\\alvarplugins"); + } - // ALVAR library path - parseEnvironmentVariable(std::string("ALVAR_LIBRARY_PATH")); + // ALVAR library path + parseEnvironmentVariable(std::string("ALVAR_LIBRARY_PATH")); - // ALVAR plugin path - parseEnvironmentVariable(std::string("ALVAR_PLUGIN_PATH")); + // ALVAR plugin path + parseEnvironmentVariable(std::string("ALVAR_PLUGIN_PATH")); } -void CaptureFactoryPrivate::parseEnvironmentVariable(const std::string &variable) +void CaptureFactoryPrivate::parseEnvironmentVariable( + const std::string& variable) { - // acquire environment variable - char *buffer; - std::string path(""); - #if defined(_MSC_VER) && (_MSC_VER < 1400) - buffer = getenv(variable.data()); - if (buffer) { - path = std::string(buffer); - } - #else - size_t requiredSize; - getenv_s(&requiredSize, NULL, 0, variable.data()); - if (requiredSize > 0) { - buffer = (char *)malloc(requiredSize * sizeof(char)); - getenv_s(&requiredSize, buffer, requiredSize, variable.data()); - path = std::string(buffer, requiredSize - 1); - free(buffer); - } - #endif + // acquire environment variable + char* buffer; + std::string path(""); +#if defined(_MSC_VER) && (_MSC_VER < 1400) + buffer = getenv(variable.data()); + if (buffer) + { + path = std::string(buffer); + } +#else + size_t requiredSize; + getenv_s(&requiredSize, NULL, 0, variable.data()); + if (requiredSize > 0) + { + buffer = (char*)malloc(requiredSize * sizeof(char)); + getenv_s(&requiredSize, buffer, requiredSize, variable.data()); + path = std::string(buffer, requiredSize - 1); + free(buffer); + } +#endif - // tokenize paths - char delimitor = ';'; - if (!path.empty()) { - std::string::size_type start = 0; - std::string::size_type end = 0; - while ((end = path.find_first_of(delimitor, start)) != std::string::npos) { - std::string tmp(path, start, end - start); - if (!tmp.empty()) { - mPluginPaths.push_back(tmp); - } - start = end + 1; - } - if (start != path.size()) { - std::string tmp(path, start, std::string::npos); - if (!tmp.empty()) { - mPluginPaths.push_back(tmp); - } - } + // tokenize paths + char delimitor = ';'; + if (!path.empty()) + { + std::string::size_type start = 0; + std::string::size_type end = 0; + while ((end = path.find_first_of(delimitor, start)) != std::string::npos) + { + std::string tmp(path, start, end - start); + if (!tmp.empty()) + { + mPluginPaths.push_back(tmp); + } + start = end + 1; + } + if (start != path.size()) + { + std::string tmp(path, start, std::string::npos); + if (!tmp.empty()) + { + mPluginPaths.push_back(tmp); + } } + } } std::string CaptureFactoryPrivate::pluginPrefix() { - return std::string(""); + return std::string(""); } std::string CaptureFactoryPrivate::pluginExtension() { - return std::string("dll"); + return std::string("dll"); } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/ConnectedComponents.cpp b/ar_track_alvar/src/ConnectedComponents.cpp index 1ceaeac..fd68e55 100644 --- a/ar_track_alvar/src/ConnectedComponents.cpp +++ b/ar_track_alvar/src/ConnectedComponents.cpp @@ -27,288 +27,293 @@ using namespace std; -namespace alvar { +namespace alvar +{ using namespace std; Labeling::Labeling() { - gray = 0; - bw = 0; - cam = 0; - thresh_param1 = 31; - thresh_param2 = 5; + cam = 0; + thresh_param1 = 31; + thresh_param2 = 5; } Labeling::~Labeling() { - if(gray) - cvReleaseImage(&gray); - if(bw) - cvReleaseImage(&bw); + gray.release(); + bw.release(); } -bool Labeling::CheckBorder(CvSeq* contour, int width, int height) +bool Labeling::CheckBorder(const std::vector& contour, int width, + int height) { - bool ret = true; - for(int i = 0; i < contour->total; ++i) - { - CvPoint* pt = (CvPoint*)cvGetSeqElem(contour, i); - if((pt->x <= 1) || (pt->x >= width-2) || (pt->y <= 1) || (pt->y >= height-2)) ret = false; - } - return ret; + bool ret = true; + for (const auto& pt : contour) + { + if ((pt.x <= 1) || (pt.x >= width - 2) || (pt.y <= 1) || + (pt.y >= height - 2)) + ret = false; + } + return ret; } LabelingCvSeq::LabelingCvSeq() : _n_blobs(0), _min_edge(20), _min_area(25) { - SetOptions(); - storage = cvCreateMemStorage(0); + SetOptions(); } LabelingCvSeq::~LabelingCvSeq() { - if(storage) - cvReleaseMemStorage(&storage); } -void LabelingCvSeq::SetOptions(bool _detect_pose_grayscale) { - detect_pose_grayscale = _detect_pose_grayscale; +void LabelingCvSeq::SetOptions(bool _detect_pose_grayscale) +{ + detect_pose_grayscale = _detect_pose_grayscale; } -void LabelingCvSeq::LabelSquares(IplImage* image, bool visualize) +void LabelingCvSeq::LabelSquares(cv::Mat& image, bool visualize) { - - if (gray && ((gray->width != image->width) || (gray->height != image->height))) { - cvReleaseImage(&gray); gray=NULL; - if (bw) cvReleaseImage(&bw); bw=NULL; - } - if (gray == NULL) { - gray = cvCreateImage(cvSize(image->width, image->height), IPL_DEPTH_8U, 1); - gray->origin = image->origin; - bw = cvCreateImage(cvSize(image->width, image->height), IPL_DEPTH_8U, 1); - bw->origin = image->origin; + if (!gray.empty() && ((gray.cols != image.cols) || (gray.rows != image.rows))) + { + gray.release(); + bw.release(); + } + if (gray.empty()) + { + gray = cv::Mat(image.rows, image.cols, CV_8UC1); + bw = cv::Mat(image.rows, image.cols, CV_8UC1); + } + + // Convert grayscale and threshold + if (image.channels() == 4) + cv::cvtColor(image, gray, cv::COLOR_BGRA2GRAY); + else if (image.channels() == 3) + cv::cvtColor(image, gray, cv::COLOR_BGR2GRAY); + else if (image.channels() == 1) + image.copyTo(gray); + else + { + cerr << "Unsupported image format" << endl; + } + + cv::adaptiveThreshold(gray, bw, 255, cv::ADAPTIVE_THRESH_MEAN_C, + cv::THRESH_BINARY_INV, thresh_param1, thresh_param2); + + std::vector> contours; + std::vector> squares; + std::vector> square_contours; + + cv::findContours(bw, contours, cv::RETR_LIST, cv::CHAIN_APPROX_NONE, + cv::Point(0, 0)); + + for (const auto& contour : contours) + { + if (contour.size() < _min_edge) + { + continue; } - - // Convert grayscale and threshold - if(image->nChannels == 4) - cvCvtColor(image, gray, CV_RGBA2GRAY); - else if(image->nChannels == 3) - cvCvtColor(image, gray, CV_RGB2GRAY); - else if(image->nChannels == 1) - cvCopy(image, gray); - else { - cerr<<"Unsupported image format"< result; + cv::approxPolyDP(contour, result, cv::arcLength(contour, false) * 0.035, + true); // TODO: Parameters? + if (result.size() == 4 && CheckBorder(result, image.cols, image.rows) && + cv::contourArea(result) > _min_area && // TODO check limits + cv::isContourConvex(result)) // ttehop: Changed to 'contours' instead + // of 'result' + { + squares.push_back(result); + square_contours.push_back(contour); } + } - cvAdaptiveThreshold(gray, bw, 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY_INV, thresh_param1, thresh_param2); - //cvThreshold(gray, bw, 127, 255, CV_THRESH_BINARY_INV); - - CvSeq* contours; - CvSeq* squares = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvSeq), storage); - CvSeq* square_contours = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvSeq), storage); + _n_blobs = squares.size(); + blob_corners.resize(_n_blobs); - cvFindContours(bw, storage, &contours, sizeof(CvContour), - CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint(0,0)); + // For every detected 4-corner blob + for (int i = 0; i < _n_blobs; ++i) + { + vector fitted_lines(4); + blob_corners[i].resize(4); + const auto& square = squares.at(i); + const auto& square_contour = square_contours.at(i); - while(contours) + for (int j = 0; j < 4; ++j) { - if(contours->total < _min_edge) - { - contours = contours->h_next; - continue; - } - - CvSeq* result = cvApproxPoly(contours, sizeof(CvContour), storage, - CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.035, 0 ); // TODO: Parameters? - - if( result->total == 4 && CheckBorder(result, image->width, image->height) && - fabs(cvContourArea(result,CV_WHOLE_SEQ)) > _min_area && // TODO check limits - cvCheckContourConvexity(result) ) // ttehop: Changed to 'contours' instead of 'result' - { - cvSeqPush(squares, result); - cvSeqPush(square_contours, contours); - } - contours = contours->h_next; + const auto& pt0 = square.at(j); + const auto& pt1 = square.at((j + 1) % 4); + int k0 = -1, k1 = -1; + for (int k = 0; k < square_contour.size(); k++) + { + const auto& pt2 = square_contour.at(k); + if ((pt0.x == pt2.x) && (pt0.y == pt2.y)) + k0 = k; + if ((pt1.x == pt2.x) && (pt1.y == pt2.y)) + k1 = k; + } + int len; + if (k1 >= k0) + len = k1 - k0 - 1; // neither k0 nor k1 are included + else + len = square_contour.size() - k0 + k1 - 1; + if (len == 0) + len = 1; + + cv::Mat line_data = cv::Mat(1, len, CV_32FC2); + for (int l = 0; l < len; l++) + { + int ll = (k0 + l + 1) % square_contour.size(); + const auto& p = square_contour.at(ll); + cv::Point2f pp; + pp.x = float(p.x); + pp.y = float(p.y); + + // Undistort + if (cam) + cam->Undistort(pp); + + line_data.at(0, l) = pp; + } + + // Fit edge and put to vector of edges + cv::Vec4f params; + + // TODO: The detect_pose_grayscale is still under work... + /* + if (detect_pose_grayscale && + (pt0->x > 3) && (pt0->y > 3) && + (pt0->x < (gray->width-4)) && + (pt0->y < (gray->height-4))) + { + // ttehop: Grayscale experiment + FitLineGray(line_data, params, gray); + } + */ + cv::fitLine(line_data, params, cv::DIST_L2, 0, 0.01, 0.01); + + // cvFitLine(line_data, CV_DIST_L2, 0, 0.01, 0.01, params); + ////cvFitLine(line_data, CV_DIST_HUBER, 0, 0.01, 0.01, params); + Line line = Line(params); + if (visualize) + DrawLine(image, line); + fitted_lines[j] = line; + + line_data.release(); } - _n_blobs = squares->total; - blob_corners.resize(_n_blobs); - - // For every detected 4-corner blob - for(int i = 0; i < _n_blobs; ++i) + // Calculated four intersection points + for (size_t j = 0; j < 4; ++j) { - vector fitted_lines(4); - blob_corners[i].resize(4); - CvSeq* sq = (CvSeq*)cvGetSeqElem(squares, i); - CvSeq* square_contour = (CvSeq*)cvGetSeqElem(square_contours, i); - - for(int j = 0; j < 4; ++j) - { - CvPoint* pt0 = (CvPoint*)cvGetSeqElem(sq, j); - CvPoint* pt1 = (CvPoint*)cvGetSeqElem(sq, (j+1)%4); - int k0=-1, k1=-1; - for (int k = 0; ktotal; k++) { - CvPoint* pt2 = (CvPoint*)cvGetSeqElem(square_contour, k); - if ((pt0->x == pt2->x) && (pt0->y == pt2->y)) k0=k; - if ((pt1->x == pt2->x) && (pt1->y == pt2->y)) k1=k; - } - int len; - if (k1 >= k0) len = k1-k0-1; // neither k0 nor k1 are included - else len = square_contour->total-k0+k1-1; - if (len == 0) len = 1; - - CvMat* line_data = cvCreateMat(1, len, CV_32FC2); - for (int l=0; ltotal; - CvPoint* p = (CvPoint*)cvGetSeqElem(square_contour, ll); - CvPoint2D32f pp; - pp.x = float(p->x); - pp.y = float(p->y); - - // Undistort - if(cam) - cam->Undistort(pp); - - CV_MAT_ELEM(*line_data, CvPoint2D32f, 0, l) = pp; - } - - // Fit edge and put to vector of edges - float params[4] = {0}; - - // TODO: The detect_pose_grayscale is still under work... - /* - if (detect_pose_grayscale && - (pt0->x > 3) && (pt0->y > 3) && - (pt0->x < (gray->width-4)) && - (pt0->y < (gray->height-4))) - { - // ttehop: Grayscale experiment - FitLineGray(line_data, params, gray); - } - */ - cvFitLine(line_data, CV_DIST_L2, 0, 0.01, 0.01, params); - - //cvFitLine(line_data, CV_DIST_L2, 0, 0.01, 0.01, params); - ////cvFitLine(line_data, CV_DIST_HUBER, 0, 0.01, 0.01, params); - Line line = Line(params); - if(visualize) DrawLine(image, line); - fitted_lines[j] = line; - - cvReleaseMat(&line_data); - } - - // Calculated four intersection points - for(size_t j = 0; j < 4; ++j) - { - PointDouble intc = Intersection(fitted_lines[j],fitted_lines[(j+1)%4]); - - // TODO: Instead, test OpenCV find corner in sub-pix... - //CvPoint2D32f pt = cvPoint2D32f(intc.x, intc.y); - //cvFindCornerSubPix(gray, &pt, - // 1, cvSize(3,3), cvSize(-1,-1), - // cvTermCriteria( - // CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,10,1e-4)); - - // TODO: Now there is a wierd systematic 0.5 pixel error that is fixed here... - //intc.x += 0.5; - //intc.y += 0.5; - - if(cam) cam->Distort(intc); - - // TODO: Should we make this always counter-clockwise or clockwise? - /* - if (image->origin && j == 1) blob_corners[i][3] = intc; - else if (image->origin && j == 3) blob_corners[i][1] = intc; - else blob_corners[i][j] = intc; - */ - blob_corners[i][j] = intc; - } - if (visualize) { - for(size_t j = 0; j < 4; ++j) { - PointDouble &intc = blob_corners[i][j]; - if (j == 0) cvCircle(image, cvPoint(int(intc.x), int(intc.y)), 5, CV_RGB(255, 255, 255)); - if (j == 1) cvCircle(image, cvPoint(int(intc.x), int(intc.y)), 5, CV_RGB(255, 0, 0)); - if (j == 2) cvCircle(image, cvPoint(int(intc.x), int(intc.y)), 5, CV_RGB(0, 255, 0)); - if (j == 3) cvCircle(image, cvPoint(int(intc.x), int(intc.y)), 5, CV_RGB(0, 0, 255)); - } - } + PointDouble intc = + Intersection(fitted_lines[j], fitted_lines[(j + 1) % 4]); + + // TODO: Instead, test OpenCV find corner in sub-pix... + // CvPoint2D32f pt = cvPoint2D32f(intc.x, intc.y); + // cvFindCornerSubPix(gray, &pt, + // 1, cvSize(3,3), cvSize(-1,-1), + // cvTermCriteria( + // CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,10,1e-4)); + + // TODO: Now there is a wierd systematic 0.5 pixel error that is fixed + // here... + // intc.x += 0.5; + // intc.y += 0.5; + + if (cam) + cam->Distort(intc); + + // TODO: Should we make this always counter-clockwise or clockwise? + /* + if (image->origin && j == 1) blob_corners[i][3] = intc; + else if (image->origin && j == 3) blob_corners[i][1] = intc; + else blob_corners[i][j] = intc; + */ + blob_corners[i][j] = intc; } - - cvClearMemStorage(storage); + if (visualize) + { + for (size_t j = 0; j < 4; ++j) + { + PointDouble& intc = blob_corners[i][j]; + if (j == 0) + cv::circle(image, cv::Point(int(intc.x), int(intc.y)), 5, + CV_RGB(255, 255, 255)); + if (j == 1) + cv::circle(image, cv::Point(int(intc.x), int(intc.y)), 5, + CV_RGB(255, 0, 0)); + if (j == 2) + cv::circle(image, cv::Point(int(intc.x), int(intc.y)), 5, + CV_RGB(0, 255, 0)); + if (j == 3) + cv::circle(image, cv::Point(int(intc.x), int(intc.y)), 5, + CV_RGB(0, 0, 255)); + } + } + } } -CvSeq* LabelingCvSeq::LabelImage(IplImage* image, int min_size, bool approx) +std::vector> LabelingCvSeq::LabelImage(cv::Mat& image, + int min_size, + bool approx) { - assert(image->origin == 0); // Currently only top-left origin supported - if (gray && ((gray->width != image->width) || (gray->height != image->height))) { - cvReleaseImage(&gray); gray=NULL; - if (bw) cvReleaseImage(&bw); bw=NULL; - } - if (gray == NULL) { - gray = cvCreateImage(cvSize(image->width, image->height), IPL_DEPTH_8U, 1); - gray->origin = image->origin; - bw = cvCreateImage(cvSize(image->width, image->height), IPL_DEPTH_8U, 1); - bw->origin = image->origin; - } - - // Convert grayscale and threshold - if(image->nChannels == 4) - cvCvtColor(image, gray, CV_RGBA2GRAY); - else if(image->nChannels == 3) - cvCvtColor(image, gray, CV_RGB2GRAY); - else if(image->nChannels == 1) - cvCopy(image, gray); - else { - cerr<<"Unsupported image format"<total < min_size) - { - contours = contours->h_next; - continue; - } - - if(approx) - { - CvSeq* result = cvApproxPoly(contours, sizeof(CvContour), storage, - CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 ); // TODO: Parameters? - - if(cvCheckContourConvexity(result)) - { - cvSeqPush(squares, result); - } - } - else - cvSeqPush(squares, contours); - - contours = contours->h_next; - } - - cvClearMemStorage(storage); - - return squares; + if (!gray.empty() && ((gray.cols != image.cols) || (gray.rows != image.rows))) + { + gray.release(); + bw.release(); + } + if (gray.empty()) + { + gray = cv::Mat(image.rows, image.cols, CV_8UC1); + bw = cv::Mat(image.rows, image.cols, CV_8UC1); + } + + // Convert grayscale and threshold + if (image.channels() == 4) + cv::cvtColor(image, gray, cv::COLOR_RGBA2GRAY); + else if (image.channels() == 3) + cv::cvtColor(image, gray, cv::COLOR_RGB2GRAY); + else if (image.channels() == 1) + image.copyTo(gray); + else + { + cerr << "Unsupported image format" << endl; + } + + cv::adaptiveThreshold(gray, bw, 255, cv::ADAPTIVE_THRESH_MEAN_C, + cv::THRESH_BINARY_INV, thresh_param1, thresh_param2); + + std::vector> contours; + std::vector> squares; + + cv::findContours(bw, contours, cv::RETR_LIST, cv::CHAIN_APPROX_NONE, + cv::Point(0, 0)); + + for (const auto& contour : contours) + { + if (approx) + { + std::vector result; + cv::approxPolyDP(contour, result, cv::arcLength(contour, false) * 0.02, + false); // TODO: Parameters? + if (cv::isContourConvex(result)) + { + squares.push_back(result); + } + } + else + squares.push_back(contour); + } + + return squares; } -inline int round(double x) { - return (x)>=0?(int)((x)+0.5):(int)((x)-0.5); +inline int round(double x) +{ + return (x) >= 0 ? (int)((x) + 0.5) : (int)((x)-0.5); } -template -inline T absdiff(T c1, T c2) { - return (c2>c1?c2-c1:c1-c2); +template +inline T absdiff(T c1, T c2) +{ + return (c2 > c1 ? c2 - c1 : c1 - c2); } //#define SHOW_DEBUG @@ -317,92 +322,127 @@ inline T absdiff(T c1, T c2) { #endif // TODO: This should be in LabelingCvSeq ??? -void FitLineGray(CvMat *line_data, float params[4], IplImage *gray) { - // this very simple approach works... - /* - float *cx = &(params[2]); - float *cy = &(params[3]); - float *sx = &(params[0]); - float *sy = &(params[1]); - CvPoint2D32f *p1 = (CvPoint2D32f*)CV_MAT_ELEM_PTR_FAST(*line_data, 0, 0, sizeof(CvPoint2D32f)); - CvPoint2D32f *p2 = (CvPoint2D32f*)CV_MAT_ELEM_PTR_FAST(*line_data, 0, line_data->cols-1, sizeof(CvPoint2D32f)); - *cx = p1->x; *cy = p1->y; - *sx = p2->x - p1->x; *sy = p2->y - p1->y; - return; - */ +void FitLineGray(cv::Mat& line_data, float params[4], cv::Mat& gray) +{ + // this very simple approach works... + /* + float *cx = &(params[2]); + float *cy = &(params[3]); + float *sx = &(params[0]); + float *sy = &(params[1]); + CvPoint2D32f *p1 = (CvPoint2D32f*)CV_MAT_ELEM_PTR_FAST(*line_data, 0, 0, + sizeof(CvPoint2D32f)); CvPoint2D32f *p2 = + (CvPoint2D32f*)CV_MAT_ELEM_PTR_FAST(*line_data, 0, line_data->cols-1, + sizeof(CvPoint2D32f)); *cx = p1->x; *cy = p1->y; *sx = p2->x - p1->x; *sy = + p2->y - p1->y; return; + */ #ifdef SHOW_DEBUG - IplImage *tmp = cvCreateImage(cvSize(gray->width, gray->height), IPL_DEPTH_8U, 3); - IplImage *tmp2 = cvCreateImage(cvSize(gray->width*5, gray->height*5), IPL_DEPTH_8U, 3); - cvCvtColor(gray, tmp, CV_GRAY2RGB); - cvResize(tmp, tmp2, CV_INTER_NN); + IplImage* tmp = + cvCreateImage(cvSize(gray->width, gray->height), IPL_DEPTH_8U, 3); + IplImage* tmp2 = + cvCreateImage(cvSize(gray->width * 5, gray->height * 5), IPL_DEPTH_8U, 3); + cvCvtColor(gray, tmp, CV_GRAY2RGB); + cvResize(tmp, tmp2, CV_INTER_NN); #endif - // Discover 1st the line normal direction - CvPoint2D32f *p1 = (CvPoint2D32f*)CV_MAT_ELEM_PTR_FAST(*line_data, 0, 0, sizeof(CvPoint2D32f)); - CvPoint2D32f *p2 = (CvPoint2D32f*)CV_MAT_ELEM_PTR_FAST(*line_data, 0, line_data->cols-1, sizeof(CvPoint2D32f)); - double dx = +(p2->y - p1->y); - double dy = -(p2->x - p1->x); - if ((dx == 0) && (dy == 0)) return; - else if (dx == 0) { dy /= dy; } - else if (dy == 0) { dx /= dx; } - else if (abs(dx) > abs(dy)) { dy /= dx; dx /= dx; } - else { dx /= dy; dy /= dy; } - - // Build normal search table - const int win_size=5; - const int win_mid=win_size/2; - const int diff_win_size=win_size-1; - double xx[win_size], yy[win_size]; - double dxx[diff_win_size], dyy[diff_win_size]; - xx[win_mid] = 0; yy[win_mid] = 0; - for (int i=1; i<=win_size/2; i++) { - xx[win_mid + i] = round(i*dx); - xx[win_mid - i] = -xx[win_mid + i]; - yy[win_mid + i] = round(i*dy); - yy[win_mid - i] = -yy[win_mid + i]; - } - for (int i=0; icols; l++) { - CvPoint2D32f *p = (CvPoint2D32f*)CV_MAT_ELEM_PTR_FAST(*line_data, 0, l, sizeof(CvPoint2D32f)); - - double dx=0, dy=0, ww=0; - for (int i=0; iimageData[int((p->y+yy[i])*gray->widthStep+(p->x+xx[i]))]; - unsigned char c2 = (unsigned char)gray->imageData[int((p->y+yy[i+1])*gray->widthStep+(p->x+xx[i+1]))]; + // Discover 1st the line normal direction + auto p1 = line_data.ptr(0, 0); + auto p2 = line_data.ptr(0, line_data.cols - 1); + double dx = +(p2->y - p1->y); + double dy = -(p2->x - p1->x); + if ((dx == 0) && (dy == 0)) + return; + else if (dx == 0) + { + dy /= dy; + } + else if (dy == 0) + { + dx /= dx; + } + else if (abs(dx) > abs(dy)) + { + dy /= dx; + dx /= dx; + } + else + { + dx /= dy; + dy /= dy; + } + + // Build normal search table + const int win_size = 5; + const int win_mid = win_size / 2; + const int diff_win_size = win_size - 1; + double xx[win_size], yy[win_size]; + double dxx[diff_win_size], dyy[diff_win_size]; + xx[win_mid] = 0; + yy[win_mid] = 0; + for (int i = 1; i <= win_size / 2; i++) + { + xx[win_mid + i] = round(i * dx); + xx[win_mid - i] = -xx[win_mid + i]; + yy[win_mid + i] = round(i * dy); + yy[win_mid - i] = -yy[win_mid + i]; + } + for (int i = 0; i < diff_win_size; i++) + { + dxx[i] = (xx[i] + xx[i + 1]) / 2; + dyy[i] = (yy[i] + yy[i + 1]) / 2; + } + + // Adjust the points + for (int l = 0; l < line_data.cols; l++) + { + auto p = line_data.ptr(0, l); + + double dx = 0, dy = 0, ww = 0; + for (int i = 0; i < diff_win_size; i++) + { + unsigned char c1 = (unsigned char)gray.at( + int((p->y + yy[i]) * gray.cols + (p->x + xx[i]))); + unsigned char c2 = (unsigned char)gray.at( + int((p->y + yy[i + 1]) * gray.cols + (p->x + xx[i + 1]))); #ifdef SHOW_DEBUG - cvCircle(tmp2, cvPoint((p->x+xx[i])*5+2,(p->y+yy[i])*5+2), 0, CV_RGB(0,0,255)); - cvCircle(tmp2, cvPoint((p->x+xx[i+1])*5+2,(p->y+yy[i+1])*5+2), 0, CV_RGB(0,0,255)); + cvCircle(tmp2, cv::Point((p->x + xx[i]) * 5 + 2, (p->y + yy[i]) * 5 + 2), + 0, CV_RGB(0, 0, 255)); + cvCircle( + tmp2, + cv::Point((p->x + xx[i + 1]) * 5 + 2, (p->y + yy[i + 1]) * 5 + 2), 0, + CV_RGB(0, 0, 255)); #endif - double w = absdiff(c1, c2); - dx += dxx[i]*w; - dy += dyy[i]*w; - ww += w; - } - if (ww > 0) { - dx /= ww; dy /= ww; - } + double w = absdiff(c1, c2); + dx += dxx[i] * w; + dy += dyy[i] * w; + ww += w; + } + if (ww > 0) + { + dx /= ww; + dy /= ww; + } #ifdef SHOW_DEBUG - cvLine(tmp2, cvPoint(p->x*5+2,p->y*5+2), cvPoint((p->x+dx)*5+2, (p->y+dy)*5+2), CV_RGB(0,255,0)); - p->x += float(dx); p->y += float(dy); - cvCircle(tmp2, cvPoint(p->x*5+2,p->y*5+2), 0, CV_RGB(255,0,0)); + cvLine(tmp2, cv::Point(p->x * 5 + 2, p->y * 5 + 2), + cv::Point((p->x + dx) * 5 + 2, (p->y + dy) * 5 + 2), + CV_RGB(0, 255, 0)); + p->x += float(dx); + p->y += float(dy); + cvCircle(tmp2, cv::Point(p->x * 5 + 2, p->y * 5 + 2), 0, CV_RGB(255, 0, 0)); #else - p->x += float(dx); p->y += float(dy); + p->x += float(dx); + p->y += float(dy); #endif - } + } #ifdef SHOW_DEBUG - cvNamedWindow("tmp"); - cvShowImage("tmp",tmp2); - cvWaitKey(0); - cvReleaseImage(&tmp); - cvReleaseImage(&tmp2); + cvNamedWindow("tmp"); + cvShowImage("tmp", tmp2); + cvWaitKey(0); + cvReleaseImage(&tmp); + cvReleaseImage(&tmp2); #endif } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/CvTestbed.cpp b/ar_track_alvar/src/CvTestbed.cpp index 0a91986..165f63c 100644 --- a/ar_track_alvar/src/CvTestbed.cpp +++ b/ar_track_alvar/src/CvTestbed.cpp @@ -1,192 +1,244 @@ #include "ar_track_alvar/CvTestbed.h" -CvTestbed::CvTestbed() { - cap=NULL; - running=false; - videocallback=NULL; - keycallback=NULL; - images.clear(); -} - -CvTestbed::~CvTestbed() { - for (size_t i=0; icaptureImage(); - if (frame) { - default_videocallback(frame); - if (wintitle.size() > 0) { - cvShowImage(wintitle.c_str(), frame); - } - } - } - int key; +CvTestbed::CvTestbed() +{ + cap = NULL; + running = false; + videocallback = NULL; + keycallback = NULL; + images.clear(); +} + +CvTestbed::~CvTestbed() +{ + for (size_t i = 0; i < images.size(); i++) + { + if (images[i].release_at_exit) + { + images[i].ipl.release(); + } + } + images.clear(); +} + +void CvTestbed::default_videocallback(cv::Mat& image) +{ + // TODO: Skip frames if we are too slow? Impossible using OpenCV? + /* + static bool semaphore=false; + if (semaphore) return; + semaphore = true; + */ + + if (CvTestbed::Instance().videocallback) + { + CvTestbed::Instance().videocallback(image); + } + CvTestbed::Instance().ShowVisibleImages(); + + // semaphore = false; +} + +void CvTestbed::WaitKeys() +{ + running = true; + static bool pause = false; + while (running) + { + if (cap) + { + cv::Mat& frame = cap->captureImage(); + if (!frame.empty()) + { + default_videocallback(frame); + if (wintitle.size() > 0) + { + cv::imshow(wintitle, frame); + } + } + } + int key; #ifdef WIN32 - if( (key = cvWaitKey(1)) >= 0 ) { + if ((key = cvWaitKey(1)) >= 0) + { #else - if( (key = cvWaitKey(20)) >= 0 ) { + if ((key = cv::waitKey(20)) >= 0) + { #endif - if (keycallback) { - key = keycallback(key); - } - // If 'keycallback' didn't handle key - Use default handling - // By default 'C' for calibration - if (key == 'C') { - if (cap) { - cap->showSettingsDialog(); - } - } - // By default '0'-'9' toggles visible images - else if ((key >= '0') && (key <= '9')) { - int index=key-'0'; - ToggleImageVisible(index); - } - else if(key == 'p') { - pause = !pause; - } - else if (key > 0) { - running=false; - } - } - } -} - -void CvTestbed::ShowVisibleImages() { - for (size_t i=0; ienumerateDevices(); - if (vec.size() < 1) return false; - cap = CaptureFactory::instance()->createCapture(vec[0]); - if (!cap->start()) { - delete cap; - return false; - } - clean=true; - } - if (_wintitle) { - wintitle = _wintitle; - cvNamedWindow(_wintitle, 1); + if (keycallback) + { + key = keycallback(key); + } + // If 'keycallback' didn't handle key - Use default handling + // By default 'C' for calibration + if (key == 'C') + { + if (cap) + { + cap->showSettingsDialog(); + } + } + // By default '0'-'9' toggles visible images + else if ((key >= '0') && (key <= '9')) + { + int index = key - '0'; + ToggleImageVisible(index); + } + else if (key == 'p') + { + pause = !pause; + } + else if (key > 0) + { + running = false; + } + } + } +} + +void CvTestbed::ShowVisibleImages() +{ + for (size_t i = 0; i < images.size(); i++) + { + if (images[i].visible) + { + cv::imshow(images[i].title, images[i].ipl); + } + } +} + +CvTestbed& CvTestbed::Instance() +{ + static CvTestbed obj; + return obj; +} + +void CvTestbed::SetVideoCallback(void (*_videocallback)(cv::Mat& image)) +{ + videocallback = _videocallback; +} + +void CvTestbed::SetKeyCallback(int (*_keycallback)(int key)) +{ + keycallback = _keycallback; +} + +bool CvTestbed::StartVideo(Capture* _cap, const char* _wintitle) +{ + bool clean = false; + cap = _cap; + if (cap == NULL) + { + CaptureFactory::CaptureDeviceVector vec = + CaptureFactory::instance()->enumerateDevices(); + if (vec.size() < 1) + return false; + cap = CaptureFactory::instance()->createCapture(vec[0]); + if (!cap->start()) + { + delete cap; + return false; + } + clean = true; + } + if (_wintitle) + { + wintitle = _wintitle; + cv::namedWindow(_wintitle, 1); + } + WaitKeys(); // Call the main loop + if (clean) + { + cap->stop(); + delete cap; + } + return true; +} + +size_t CvTestbed::SetImage(const char* title, const cv::Mat& ipl, + bool release_at_exit /* =false */) +{ + size_t index = GetImageIndex(title); + if (index == -1) + { + // If the title wasn't found create new + Image i(ipl, title, false, release_at_exit); + images.push_back(i); + return (images.size() - 1); + } + // If the title was found replace the image + if (images[index].release_at_exit) + { + images[index].ipl.release(); + } + images[index].ipl = ipl; + images[index].release_at_exit = release_at_exit; + return index; +} + +cv::Mat CvTestbed::CreateImage(const char* title, cv::Size size, int depth, + int channels) +{ + cv::Mat ipl = cv::Mat(size, CV_MAKETYPE(depth, channels)); + SetImage(title, ipl, true); + return ipl; +} + +cv::Mat CvTestbed::CreateImageWithProto(const char* title, cv::Mat& proto, + int depth /* =0 */, + int channels /* =0 */) +{ + if (depth == 0) + depth = proto.depth(); + if (channels == 0) + channels = proto.channels(); + cv::Mat ipl = + cv::Mat(cv::Size(proto.cols, proto.rows), CV_MAKETYPE(depth, channels)); + SetImage(title, ipl, true); + return ipl; +} + +cv::Mat CvTestbed::GetImage(size_t index) +{ + if (index < 0) + return cv::Mat(); + if (index >= images.size()) + return cv::Mat(); + return images[index].ipl; +} + +size_t CvTestbed::GetImageIndex(const char* title) +{ + std::string s(title); + for (size_t i = 0; i < images.size(); i++) + { + if (s.compare(images[i].title) == 0) + { + return i; } - WaitKeys(); // Call the main loop - if (clean) { - cap->stop(); - delete cap; - } - return true; -} - -size_t CvTestbed::SetImage(const char *title, IplImage *ipl, bool release_at_exit /* =false */) { - size_t index = GetImageIndex(title); - if (index == -1) { - // If the title wasn't found create new - Image i(ipl, title, false, release_at_exit); - images.push_back(i); - return (images.size()-1); - } - // If the title was found replace the image - if (images[index].release_at_exit) { - cvReleaseImage(&(images[index].ipl)); - } - images[index].ipl = ipl; - images[index].release_at_exit = release_at_exit; - return index; -} - -IplImage *CvTestbed::CreateImage(const char *title, CvSize size, int depth, int channels ) { - IplImage *ipl=cvCreateImage(size, depth, channels); - if (!ipl) return NULL; - SetImage(title, ipl, true); - return ipl; -} - -IplImage *CvTestbed::CreateImageWithProto(const char *title, IplImage *proto, int depth /* =0 */, int channels /* =0 */) { - if (depth == 0) depth = proto->depth; - if (channels == 0) channels = proto->nChannels; - IplImage *ipl= cvCreateImage(cvSize(proto->width, proto->height), depth, channels); - if (!ipl) return NULL; - ipl->origin = proto->origin; - SetImage(title, ipl, true); - return ipl; -} - -IplImage *CvTestbed::GetImage(size_t index) { - if (index < 0) return NULL; - if (index >= images.size()) return NULL; - return images[index].ipl; -} - -size_t CvTestbed::GetImageIndex(const char *title) { - std::string s(title); - for (size_t i=0; i= images.size()) return false; - if (images[index].visible == false) { - images[index].visible=true; - cvNamedWindow(images[index].title.c_str(), flags); - return true; - } - else { - images[index].visible=false; - cvDestroyWindow(images[index].title.c_str()); - return false; - } + } + return (size_t)-1; +} + +cv::Mat CvTestbed::GetImage(const char* title) +{ + return GetImage(GetImageIndex(title)); +} + +bool CvTestbed::ToggleImageVisible(size_t index, int flags) +{ + if (index >= images.size()) + return false; + if (images[index].visible == false) + { + images[index].visible = true; + cv::namedWindow(images[index].title, flags); + return true; + } + else + { + images[index].visible = false; + cv::destroyWindow(images[index].title); + return false; + } } diff --git a/ar_track_alvar/src/DirectoryIterator.cpp b/ar_track_alvar/src/DirectoryIterator.cpp old mode 100755 new mode 100644 index d425537..766891b --- a/ar_track_alvar/src/DirectoryIterator.cpp +++ b/ar_track_alvar/src/DirectoryIterator.cpp @@ -25,36 +25,36 @@ #include "ar_track_alvar/DirectoryIterator_private.h" -namespace alvar { - -DirectoryIterator::DirectoryIterator(const std::string &path) - : d(new DirectoryIteratorPrivate(path)) +namespace alvar +{ +DirectoryIterator::DirectoryIterator(const std::string& path) + : d(new DirectoryIteratorPrivate(path)) { } DirectoryIterator::~DirectoryIterator() { - delete d; + delete d; } bool DirectoryIterator::hasNext() { - return d->hasNext(); + return d->hasNext(); } std::string DirectoryIterator::next() { - return d->next(); + return d->next(); } std::string DirectoryIterator::currentEntry() { - return d->mEntry; + return d->mEntry; } std::string DirectoryIterator::currentPath() { - return d->mDirectory + d->mEntry; + return d->mDirectory + d->mEntry; } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/DirectoryIterator_unix.cpp b/ar_track_alvar/src/DirectoryIterator_unix.cpp old mode 100755 new mode 100644 index 68bfa20..bf6534f --- a/ar_track_alvar/src/DirectoryIterator_unix.cpp +++ b/ar_track_alvar/src/DirectoryIterator_unix.cpp @@ -25,88 +25,97 @@ #include -namespace alvar { - +namespace alvar +{ class DirectoryIteratorPrivateData { public: - DirectoryIteratorPrivateData() - : mHandle(NULL) - , mData(NULL) - { - } - - DIR *mHandle; - dirent *mData; + DirectoryIteratorPrivateData() : mHandle(NULL), mData(NULL) + { + } + + DIR* mHandle; + dirent* mData; }; -DirectoryIteratorPrivate::DirectoryIteratorPrivate(const std::string &path) - : d(new DirectoryIteratorPrivateData()) - , mDirectory(path) - , mEntry() - , mValid(false) +DirectoryIteratorPrivate::DirectoryIteratorPrivate(const std::string& path) + : d(new DirectoryIteratorPrivateData()) + , mDirectory(path) + , mEntry() + , mValid(false) { - if (mDirectory.at(mDirectory.length() - 1) != '/') { - mDirectory.append("/"); - } + if (mDirectory.at(mDirectory.length() - 1) != '/') + { + mDirectory.append("/"); + } } DirectoryIteratorPrivate::~DirectoryIteratorPrivate() { - closedir(d->mHandle); - delete d; + closedir(d->mHandle); + delete d; } bool DirectoryIteratorPrivate::hasNext() { - if (d->mHandle == NULL) { - d->mHandle = opendir(mDirectory.data()); - - if (d->mHandle != NULL) { - d->mData = readdir(d->mHandle); - - if (d->mData != NULL) { - mValid = true; - skip(); - } - } + if (d->mHandle == NULL) + { + d->mHandle = opendir(mDirectory.data()); + + if (d->mHandle != NULL) + { + d->mData = readdir(d->mHandle); + + if (d->mData != NULL) + { + mValid = true; + skip(); + } } + } - return mValid; + return mValid; } std::string DirectoryIteratorPrivate::next() { - if (!hasNext()) { - return ""; - } - - mEntry = std::string(d->mData->d_name); - - d->mData = readdir(d->mHandle); - if (d->mData == NULL) { - mValid = false; - } - else { - skip(); - } - - return mEntry; + if (!hasNext()) + { + return ""; + } + + mEntry = std::string(d->mData->d_name); + + d->mData = readdir(d->mHandle); + if (d->mData == NULL) + { + mValid = false; + } + else + { + skip(); + } + + return mEntry; } void DirectoryIteratorPrivate::skip() { - while (true) { - if (std::string(d->mData->d_name) != "." && std::string(d->mData->d_name) != "..") { - return; - } - - d->mData = readdir(d->mHandle); - if (d->mData == NULL) { - mValid = false; - return; - } + while (true) + { + if (std::string(d->mData->d_name) != "." && + std::string(d->mData->d_name) != "..") + { + return; + } + + d->mData = readdir(d->mHandle); + if (d->mData == NULL) + { + mValid = false; + return; } + } } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/DirectoryIterator_win.cpp b/ar_track_alvar/src/DirectoryIterator_win.cpp old mode 100755 new mode 100644 index 09d7a45..74accb7 --- a/ar_track_alvar/src/DirectoryIterator_win.cpp +++ b/ar_track_alvar/src/DirectoryIterator_win.cpp @@ -25,83 +25,91 @@ #include -namespace alvar { - +namespace alvar +{ class DirectoryIteratorPrivateData { public: - DirectoryIteratorPrivateData() - : mHandle(INVALID_HANDLE_VALUE) - , mData() - { - } - - HANDLE mHandle; - WIN32_FIND_DATA mData; + DirectoryIteratorPrivateData() : mHandle(INVALID_HANDLE_VALUE), mData() + { + } + + HANDLE mHandle; + WIN32_FIND_DATA mData; }; -DirectoryIteratorPrivate::DirectoryIteratorPrivate(const std::string &path) - : d(new DirectoryIteratorPrivateData()) - , mDirectory(path) - , mEntry() - , mValid(false) +DirectoryIteratorPrivate::DirectoryIteratorPrivate(const std::string& path) + : d(new DirectoryIteratorPrivateData()) + , mDirectory(path) + , mEntry() + , mValid(false) { - if (mDirectory.at(mDirectory.length() - 1) != '\\') { - mDirectory.append("\\"); - } + if (mDirectory.at(mDirectory.length() - 1) != '\\') + { + mDirectory.append("\\"); + } } DirectoryIteratorPrivate::~DirectoryIteratorPrivate() { - FindClose(d->mHandle); - delete d; + FindClose(d->mHandle); + delete d; } bool DirectoryIteratorPrivate::hasNext() { - if (d->mHandle == INVALID_HANDLE_VALUE) { - std::string search = mDirectory + "*"; - d->mHandle = FindFirstFile(search.data(), &d->mData); - - if (d->mHandle != INVALID_HANDLE_VALUE) { - mValid = true; - skip(); - } + if (d->mHandle == INVALID_HANDLE_VALUE) + { + std::string search = mDirectory + "*"; + d->mHandle = FindFirstFile(search.data(), &d->mData); + + if (d->mHandle != INVALID_HANDLE_VALUE) + { + mValid = true; + skip(); } + } - return mValid; + return mValid; } std::string DirectoryIteratorPrivate::next() { - if (!hasNext()) { - return ""; - } - - mEntry = std::string(d->mData.cFileName); - - if (!FindNextFile(d->mHandle, &d->mData)) { - mValid = false; - } - else { - skip(); - } - - return mEntry; + if (!hasNext()) + { + return ""; + } + + mEntry = std::string(d->mData.cFileName); + + if (!FindNextFile(d->mHandle, &d->mData)) + { + mValid = false; + } + else + { + skip(); + } + + return mEntry; } void DirectoryIteratorPrivate::skip() { - while (true) { - if (std::string(d->mData.cFileName) != "." && std::string(d->mData.cFileName) != "..") { - return; - } - - if (!FindNextFile(d->mHandle, &d->mData)) { - mValid = false; - return; - } + while (true) + { + if (std::string(d->mData.cFileName) != "." && + std::string(d->mData.cFileName) != "..") + { + return; + } + + if (!FindNextFile(d->mHandle, &d->mData)) + { + mValid = false; + return; } + } } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/Draw.cpp b/ar_track_alvar/src/Draw.cpp index 4a3e891..5186756 100644 --- a/ar_track_alvar/src/Draw.cpp +++ b/ar_track_alvar/src/Draw.cpp @@ -22,258 +22,270 @@ */ #include "ar_track_alvar/Draw.h" -#include using namespace std; -namespace alvar { -using namespace std; - -void DrawPoints(IplImage *image, const vector& points, CvScalar color) +namespace alvar { - for(unsigned i = 0; i < points.size(); ++i) - cvLine(image, cvPoint(points[i].x,points[i].y), cvPoint(points[i].x,points[i].y), color); -} +using namespace std; -void DrawLine(IplImage* image, const Line line, CvScalar color) +void DrawLine(cv::Mat& image, const Line line, const cv::Scalar& color) { - double len = 100; - CvPoint p1, p2; - p1.x = int(line.c.x); p1.y = int(line.c.y); - p2.x = int(line.c.x+line.s.x*len); p2.y = int(line.c.y+line.s.y*len); - cvLine(image, p1, p2, color); + double len = 100; + cv::Point p1, p2; + p1.x = int(line.c.x); + p1.y = int(line.c.y); + p2.x = int(line.c.x + line.s.x * len); + p2.y = int(line.c.y + line.s.y * len); + cv::line(image, p1, p2, color); - p1.x = int(line.c.x); p1.y = int(line.c.y); - p2.x = int(line.c.x-line.s.x*len); p2.y = int(line.c.y-line.s.y*len); - cvLine(image, p1, p2, color); + p1.x = int(line.c.x); + p1.y = int(line.c.y); + p2.x = int(line.c.x - line.s.x * len); + p2.y = int(line.c.y - line.s.y * len); + cv::line(image, p1, p2, color); } -void DrawPoints(IplImage* image, const CvSeq* contour, CvScalar color) +void DrawPoints(cv::Mat& image, const std::vector& contour, + const cv::Scalar& color) { - for(int i = 0; i < contour->total; ++i) - { - CvPoint* pt = (CvPoint*)cvGetSeqElem( contour, i); - cvLine(image, cvPoint(pt->x, pt->y), cvPoint(pt->x, pt->y), color); - } + for (const auto& pt : contour) + { + cv::line(image, cv::Point(pt.x, pt.y), cv::Point(pt.x, pt.y), color); + } } -void DrawCircles(IplImage* image, const CvSeq* contour, int radius, CvScalar color) +void DrawCircles(cv::Mat& image, const std::vector& contour, + int radius, cv::Scalar color) { - for(int i = 0; i < contour->total; ++i) - { - CvPoint* pt = (CvPoint*)cvGetSeqElem( contour, i); - cvCircle(image, cvPoint(pt->x, pt->y), radius, color); - } + for (const auto& pt : contour) + { + cv::circle(image, cv::Point(pt.x, pt.y), radius, color); + } } -void DrawLines(IplImage* image, const CvSeq* contour, CvScalar color) +void DrawLines(cv::Mat& image, const std::vector& contour, + cv::Scalar color) { - if(contour->total >= 2) - { - for(int i = 0; i < contour->total; ++i) - { - CvPoint* pt1 = (CvPoint*)cvGetSeqElem( contour, i); - CvPoint* pt2 = (CvPoint*)cvGetSeqElem( contour, (i+1)%(contour->total)); - cvLine(image, cvPoint(pt1->x, pt1->y), cvPoint(pt2->x, pt2->y), color); - } - } + if (contour.size() >= 2) + { + for (int i = 0; i < contour.size(); ++i) + { + const auto& pt1 = contour[i]; + const auto& pt2 = contour[(i + 1) % (contour.size())]; + cv::line(image, cv::Point(pt1.x, pt1.y), cv::Point(pt2.x, pt2.y), color); + } + } } -void DrawCVEllipse(IplImage* image, CvBox2D& ellipse, CvScalar color, bool fill/*=false*/, double par) +void DrawCVEllipse(cv::Mat& image, const cv::RotatedRect& ellipse, + cv::Scalar color, bool fill /*=false*/, double par) { - CvPoint center; - center.x = static_cast(ellipse.center.x); - center.y = static_cast(ellipse.center.y); - int type = 1; - if(fill) - type = CV_FILLED; + cv::Point center; + center.x = static_cast(ellipse.center.x); + center.y = static_cast(ellipse.center.y); + int type = 1; + if (fill) + type = cv::FILLED; - //cout<(par+ellipse.size.width/2), static_cast(par+ellipse.size.height/2)), -ellipse.angle, 0, 360, color, type); + // cout<(par + ellipse.size.width / 2), + static_cast(par + ellipse.size.height / 2)), + -ellipse.angle, 0, 360, color, type); } -void BuildHideTexture(IplImage *image, IplImage *hide_texture, - Camera *cam, double gl_modelview[16], - PointDouble topleft, PointDouble botright) +void BuildHideTexture(cv::Mat& image, cv::Mat& hide_texture, Camera* cam, + double gl_modelview[16], PointDouble topleft, + PointDouble botright) { - assert(image->origin == 0); // Currently only top-left origin supported - double kx=1.0; - double ky=1.0; - - double width = abs(botright.x - topleft.x); - double height = abs(botright.y - topleft.y); - - //GLint vp[4]; //viewport - //GLdouble winx[8]; // point's coordinates in windowcoordinates - //GLdouble winy[8]; - //GLdouble winz[8]; - double objx; - double objy; - //GLdouble objz; - unsigned char pixels[8][3]; - unsigned char color[3]={0,0,0}; + double kx = 1.0; + double ky = 1.0; + + double width = abs(botright.x - topleft.x); + double height = abs(botright.y - topleft.y); - int i=0,j=0,t=0; - double ox,oy,ya,yb,xc,xd,offset; - double sizex = width/4, size2x=width/2; - double sizey = height/4, size2y=height/2; + // GLint vp[4]; //viewport + // GLdouble winx[8]; // point's coordinates in windowcoordinates + // GLdouble winy[8]; + // GLdouble winz[8]; + double objx; + double objy; + // GLdouble objz; + unsigned char pixels[8][3]; + unsigned char color[3] = { 0, 0, 0 }; - // Calculate extended coordinates of detected marker (+ border) - objx = width/2*kx; - objy = height/2*ky; + int i = 0, j = 0, t = 0; + double ox, oy, ya, yb, xc, xd, offset; + double sizex = width / 4, size2x = width / 2; + double sizey = height / 4, size2y = height / 2; - //cout<width<<","<height<width,ystep=2*objy/hide_texture->height; - for(i=0;iwidth;i++){ - ox = -objx+i*xstep; - offset = fmod((objx-ox), size2x); - if( offset < sizex) - xc = objx + offset; - else - xc = objx+size2x-offset; - offset = fmod((objx+ox), size2x); - if( offset < sizex) - xd = -objx - offset; - else - xd = -objx-size2x+offset; - r=(ox+objx); - for(j=0;jheight;j++){ - oy = -objy+j*ystep; - offset = fmod((objy-oy), size2y); - if( offset < sizey) - ya = objy + offset; - else - ya = objy+size2y-offset; - offset = fmod((oy+objy), size2y); - if( offset < sizey) - yb = -objy - offset; - else - yb = -objy-size2y+offset; - s=(oy+objy); + // Calculate extended coordinates of detected marker (+ border) + objx = width / 2 * kx; + objy = height / 2 * ky; - double points3d[4][3] = { - ox, ya, 0, - ox, yb, 0, - xc, oy, 0, - xd, oy, 0, - }; - double points2d[4][2]; - CvMat points3d_mat, points2d_mat; - cvInitMatHeader(&points3d_mat, 4, 3, CV_64F, points3d); - cvInitMatHeader(&points2d_mat, 4, 2, CV_64F, points2d); - cam->ProjectPoints(&points3d_mat, gl_modelview, &points2d_mat); - int kuvanx4 = (int)Limit(points2d[0][0], 0, image->width-1); int kuvany4 = (int)Limit(points2d[0][1], 0, image->height-1); - int kuvanx5 = (int)Limit(points2d[1][0], 0, image->width-1); int kuvany5 = (int)Limit(points2d[1][1], 0, image->height-1); - int kuvanx6 = (int)Limit(points2d[2][0], 0, image->width-1); int kuvany6 = (int)Limit(points2d[2][1], 0, image->height-1); - int kuvanx7 = (int)Limit(points2d[3][0], 0, image->width-1); int kuvany7 = (int)Limit(points2d[3][1], 0, image->height-1); + // cout<width<<","<height<hide_texture->width-w)|(j>hide_texture->width-w)) - opaque=60; - else if ((i<2*w)|(j<2*w)|(i>hide_texture->width-2*w)|(j>hide_texture->width-2*w)) - opaque=100; - else if ((i<3*w)|(j<3*w)|(i>hide_texture->width-3*w)|(j>hide_texture->width-3*w)) - opaque=140; - else if ((i<4*w)|(j<4*w)|(i>hide_texture->width-4*w)|(j>hide_texture->width-4*w)) - opaque=200; - else - opaque=255; - - cvSet2D(hide_texture, j, i, cvScalar( - (((lr-r)*pixels[7][0] + r*pixels[6][0]+ s* pixels[4][0] + (ls-s)* pixels[5][0])/l2r), - (((lr-r)*pixels[7][1] + r*pixels[6][1]+ s* pixels[4][1] + (ls-s)* pixels[5][1])/l2r), - (((lr-r)*pixels[7][2] + r*pixels[6][2]+ s* pixels[4][2] + (ls-s)* pixels[5][2])/l2r), - opaque - )); - } - } + double points3d[4][3] = { + ox, ya, 0, ox, yb, 0, xc, oy, 0, xd, oy, 0, + }; + double points2d[4][2]; + cv::Mat points3d_mat, points2d_mat; + points3d_mat = cv::Mat(4, 3, CV_64F, points3d); + points2d_mat = cv::Mat(4, 2, CV_64F, points2d); + cam->ProjectPoints(points3d_mat, gl_modelview, points2d_mat); + int kuvanx4 = (int)Limit(points2d[0][0], 0, image.cols - 1); + int kuvany4 = (int)Limit(points2d[0][1], 0, image.rows - 1); + int kuvanx5 = (int)Limit(points2d[1][0], 0, image.cols - 1); + int kuvany5 = (int)Limit(points2d[1][1], 0, image.rows - 1); + int kuvanx6 = (int)Limit(points2d[2][0], 0, image.cols - 1); + int kuvany6 = (int)Limit(points2d[2][1], 0, image.rows - 1); + int kuvanx7 = (int)Limit(points2d[3][0], 0, image.cols - 1); + int kuvany7 = (int)Limit(points2d[3][1], 0, image.rows - 1); + + pixels[4][0] = (unsigned char)image.at(kuvany4, kuvanx4)[0]; + pixels[4][1] = (unsigned char)image.at(kuvany4, kuvanx4)[1]; + pixels[4][2] = (unsigned char)image.at(kuvany4, kuvanx4)[2]; + pixels[5][0] = (unsigned char)image.at(kuvany5, kuvanx5)[0]; + pixels[5][1] = (unsigned char)image.at(kuvany5, kuvanx5)[1]; + pixels[5][2] = (unsigned char)image.at(kuvany5, kuvanx5)[2]; + pixels[6][0] = (unsigned char)image.at(kuvany6, kuvanx6)[0]; + pixels[6][1] = (unsigned char)image.at(kuvany6, kuvanx6)[1]; + pixels[6][2] = (unsigned char)image.at(kuvany6, kuvanx6)[2]; + pixels[7][0] = (unsigned char)image.at(kuvany7, kuvanx7)[0]; + pixels[7][1] = (unsigned char)image.at(kuvany7, kuvanx7)[1]; + pixels[7][2] = (unsigned char)image.at(kuvany7, kuvanx7)[2]; + + // make the borders of the texture partly transparent + int opaque; + const int w = 1; + if ((i < w) | (j < w) | (i > hide_texture.cols - w) | + (j > hide_texture.cols - w)) + opaque = 60; + else if ((i < 2 * w) | (j < 2 * w) | (i > hide_texture.cols - 2 * w) | + (j > hide_texture.cols - 2 * w)) + opaque = 100; + else if ((i < 3 * w) | (j < 3 * w) | (i > hide_texture.cols - 3 * w) | + (j > hide_texture.cols - 3 * w)) + opaque = 140; + else if ((i < 4 * w) | (j < 4 * w) | (i > hide_texture.cols - 4 * w) | + (j > hide_texture.cols - 4 * w)) + opaque = 200; + else + opaque = 255; + hide_texture.at(j, i) = + cv::Vec4b((((lr - r) * pixels[7][0] + r * pixels[6][0] + + s * pixels[4][0] + (ls - s) * pixels[5][0]) / + l2r), + (((lr - r) * pixels[7][1] + r * pixels[6][1] + + s * pixels[4][1] + (ls - s) * pixels[5][1]) / + l2r), + (((lr - r) * pixels[7][2] + r * pixels[6][2] + + s * pixels[4][2] + (ls - s) * pixels[5][2]) / + l2r), + opaque); + } + } } -void DrawTexture(IplImage *image, IplImage *texture, - Camera *cam, double gl_modelview[16], - PointDouble topleft, PointDouble botright) +void DrawTexture(cv::Mat& image, cv::Mat& texture, Camera* cam, + double gl_modelview[16], PointDouble topleft, + PointDouble botright) { - assert(image->origin == 0); // Currently only top-left origin supported - double width = abs(botright.x - topleft.x); - double height = abs(botright.y - topleft.y); - double objx = width/2; - double objy = height/2; - - // Project corners - double points3d[4][3] = { - -objx, -objy, 0, - -objx, objy, 0, - objx, objy, 0, - objx, -objy, 0, - }; - double points2d[4][2]; - CvMat points3d_mat, points2d_mat; - cvInitMatHeader(&points3d_mat, 4, 3, CV_64F, points3d); - cvInitMatHeader(&points2d_mat, 4, 2, CV_64F, points2d); - cam->ProjectPoints(&points3d_mat, gl_modelview, &points2d_mat); - - // Warp texture and mask using the perspective that is based on the corners - double map[9]; - CvMat map_mat = cvMat(3, 3, CV_64F, map); - CvPoint2D32f src[4] = { - { 0, 0 }, - { 0, float(texture->height-1) }, - { float(texture->width-1), float(texture->height-1) }, - { float(texture->width-1), 0 }, - }; - CvPoint2D32f dst[4] = { - { float(points2d[0][0]), float(points2d[0][1]) }, - { float(points2d[1][0]), float(points2d[1][1]) }, - { float(points2d[2][0]), float(points2d[2][1]) }, - { float(points2d[3][0]), float(points2d[3][1]) }, - }; - cvGetPerspectiveTransform(src, dst, &map_mat); - IplImage *img = cvCloneImage(image); - IplImage *img2 = cvCloneImage(image); - IplImage *mask = cvCreateImage(cvSize(image->width, image->height), 8, 1); - IplImage *mask2 = cvCreateImage(cvSize(image->width, image->height), 8, 1); - cvZero(img); - cvZero(img2); - cvZero(mask); - cvZero(mask2); - for (int j=0; jheight; j++) { //ttesis: why must we copy the texture first? - for (int i=0; iwidth; i++) { - CvScalar s = cvGet2D(texture, j, i); - cvSet2D(img, j, i, s); - if ((i>0) && (j>0) && (i<(texture->width-1)) && (j<(texture->height-1))) - cvSet2D(mask, j, i, cvScalar(1)); //ttesis: why are edges not included? - } - } - cvWarpPerspective(img, img2, &map_mat); - cvWarpPerspective(mask, mask2, &map_mat, 0); - - cvCopy(img2, image, mask2); + double width = abs(botright.x - topleft.x); + double height = abs(botright.y - topleft.y); + double objx = width / 2; + double objy = height / 2; + + // Project corners + double points3d[4][3] = { + -objx, -objy, 0, -objx, objy, 0, objx, objy, 0, objx, -objy, 0, + }; + double points2d[4][2]; + cv::Mat points3d_mat, points2d_mat; + points3d_mat = cv::Mat(4, 3, CV_64F, points3d); + points2d_mat = cv::Mat(4, 2, CV_64F, points2d); + cam->ProjectPoints(points3d_mat, gl_modelview, points2d_mat); + + // Warp texture and mask using the perspective that is based on the corners + double map[9]; + cv::Mat map_mat = cv::Mat(3, 3, CV_64F, map); + cv::Point2f src[4] = { + { 0, 0 }, + { 0, float(texture.rows - 1) }, + { float(texture.cols - 1), float(texture.rows - 1) }, + { float(texture.cols - 1), 0 }, + }; + cv::Point2f dst[4] = { + { float(points2d[0][0]), float(points2d[0][1]) }, + { float(points2d[1][0]), float(points2d[1][1]) }, + { float(points2d[2][0]), float(points2d[2][1]) }, + { float(points2d[3][0]), float(points2d[3][1]) }, + }; + map_mat = cv::getPerspectiveTransform(src, dst); + cv::Mat img = image.clone(); + cv::Mat img2 = image.clone(); + cv::Mat mask = cv::Mat::zeros(cv::Size(image.cols, image.rows), CV_8UC1); + cv::Mat mask2 = cv::Mat::zeros(cv::Size(image.cols, image.rows), CV_8UC1); + img.setTo(cv::Scalar::all(0)); + img2.setTo(cv::Scalar::all(0)); + for (int j = 0; j < texture.rows; j++) + { // ttesis: why must we copy the texture first? + for (int i = 0; i < texture.cols; i++) + { + cv::Vec3b s = texture.at(j, i); + img.at(j, i) = s; + if ((i > 0) && (j > 0) && (i < (texture.cols - 1)) && + (j < (texture.rows - 1))) + mask.at(j, i) = 1; // ttesis: why are edges not included? + } + } + cv::warpPerspective(img, img2, map_mat, img2.size()); + cv::warpPerspective(mask, mask2, map_mat, img2.size(), 0); + + img2.copyTo(image, mask2); - cvReleaseImage(&img); - cvReleaseImage(&img2); - cvReleaseImage(&mask); - cvReleaseImage(&mask2); + img.release(); + img2.release(); + mask.release(); + mask2.release(); } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/EC.cpp b/ar_track_alvar/src/EC.cpp index 946d8da..e33a814 100644 --- a/ar_track_alvar/src/EC.cpp +++ b/ar_track_alvar/src/EC.cpp @@ -24,258 +24,287 @@ #include "EC.h" #include "Optimization.h" -namespace alvar { - +namespace alvar +{ struct ProjectParams { - Camera *camera; - const CvMat *object_model; + Camera* camera; + const CvMat* object_model; }; -static void ProjectRot(CvMat* state, CvMat* projection, void* x) +static void ProjectRot(CvMat* state, CvMat* projection, void* x) { - ProjectParams *foo = (ProjectParams*)x; - Camera *camera = foo->camera; - const CvMat *object_model = foo->object_model; - int count = projection->rows; - CvMat rot_mat = cvMat(3, 1, CV_64F, &(state->data.db[0+0])); - double zeros[3] = {0}; - CvMat zero_tra = cvMat(3, 1, CV_64F, zeros); - cvReshape(projection, projection, 2, 1); - cvProjectPoints2(object_model, &rot_mat, &zero_tra, &(camera->calib_K), &(camera->calib_D), projection); - cvReshape(projection, projection, 1, count); + ProjectParams* foo = (ProjectParams*)x; + Camera* camera = foo->camera; + const CvMat* object_model = foo->object_model; + int count = projection->rows; + CvMat rot_mat = cvMat(3, 1, CV_64F, &(state->data.db[0 + 0])); + double zeros[3] = { 0 }; + CvMat zero_tra = cvMat(3, 1, CV_64F, zeros); + cvReshape(projection, projection, 2, 1); + cvProjectPoints2(object_model, &rot_mat, &zero_tra, &(camera->calib_K), + &(camera->calib_D), projection); + cvReshape(projection, projection, 1, count); } // TODO: How this differs from the Camera::ProjectPoints ??? static void Project(CvMat* state, CvMat* projection, void* x) { - ProjectParams *foo = (ProjectParams*)x; - Camera *camera = foo->camera; - const CvMat *object_model = foo->object_model; - int count = projection->rows; - CvMat rot_mat = cvMat(3, 1, CV_64F, &(state->data.db[0+0])); - CvMat tra_mat = cvMat(3, 1, CV_64F, &(state->data.db[0+3])); - cvReshape(projection, projection, 2, 1); - cvProjectPoints2(object_model, &rot_mat, &tra_mat, &(camera->calib_K), &(camera->calib_D), projection); - cvReshape(projection, projection, 1, count); + ProjectParams* foo = (ProjectParams*)x; + Camera* camera = foo->camera; + const CvMat* object_model = foo->object_model; + int count = projection->rows; + CvMat rot_mat = cvMat(3, 1, CV_64F, &(state->data.db[0 + 0])); + CvMat tra_mat = cvMat(3, 1, CV_64F, &(state->data.db[0 + 3])); + cvReshape(projection, projection, 2, 1); + cvProjectPoints2(object_model, &rot_mat, &tra_mat, &(camera->calib_K), + &(camera->calib_D), projection); + cvReshape(projection, projection, 1, count); } -bool CameraEC::UpdatePose(const CvMat* object_points, CvMat* image_points, Pose *pose, CvMat *weights) { - double rot[3]; CvMat rotm = cvMat(3, 1, CV_64F, rot); - double tra[3]; CvMat tram = cvMat(3, 1, CV_64F, tra); - pose->GetRodriques(&rotm); - pose->GetTranslation(&tram); - bool ret = UpdatePose(object_points, image_points, &rotm, &tram, weights); - pose->SetRodriques(&rotm); - pose->SetTranslation(&tram); - return ret; +bool CameraEC::UpdatePose(const CvMat* object_points, CvMat* image_points, + Pose* pose, CvMat* weights) +{ + double rot[3]; + CvMat rotm = cvMat(3, 1, CV_64F, rot); + double tra[3]; + CvMat tram = cvMat(3, 1, CV_64F, tra); + pose->GetRodriques(&rotm); + pose->GetTranslation(&tram); + bool ret = UpdatePose(object_points, image_points, &rotm, &tram, weights); + pose->SetRodriques(&rotm); + pose->SetTranslation(&tram); + return ret; } -bool CameraEC::UpdatePose(const CvMat* object_points, CvMat* image_points, CvMat *rot, CvMat *tra, CvMat *weights) { - if (object_points->height < 4) return false; - /* if (object_points->height < 6) { - return false; - // TODO: We need to change image_points into CV_32FC2 - return Camera::CalcExteriorOrientation(object_points, image_points, rot, tra); - }*/ - CvMat* par = cvCreateMat(6, 1, CV_64F); - memcpy(&(par->data.db[0+0]), rot->data.db, 3*sizeof(double)); - memcpy(&(par->data.db[0+3]), tra->data.db, 3*sizeof(double)); - - ProjectParams pparams; - pparams.camera = this; - pparams.object_model = object_points; - - alvar::Optimization *opt = new alvar::Optimization(6, image_points->height); - double tmp = opt->Optimize(par, image_points, 0.0005, 2, Project, &pparams, alvar::Optimization::TUKEY_LM, 0, 0, weights); - - memcpy(rot->data.db, &(par->data.db[0+0]), 3*sizeof(double)); - memcpy(tra->data.db, &(par->data.db[0+3]), 3*sizeof(double)); - - delete opt; - - cvReleaseMat(&par); - return true; +bool CameraEC::UpdatePose(const CvMat* object_points, CvMat* image_points, + CvMat* rot, CvMat* tra, CvMat* weights) +{ + if (object_points->height < 4) + return false; + /* if (object_points->height < 6) { + return false; + // TODO: We need to change image_points into CV_32FC2 + return Camera::CalcExteriorOrientation(object_points, image_points, rot, + tra); + }*/ + CvMat* par = cvCreateMat(6, 1, CV_64F); + memcpy(&(par->data.db[0 + 0]), rot->data.db, 3 * sizeof(double)); + memcpy(&(par->data.db[0 + 3]), tra->data.db, 3 * sizeof(double)); + + ProjectParams pparams; + pparams.camera = this; + pparams.object_model = object_points; + + alvar::Optimization* opt = new alvar::Optimization(6, image_points->height); + double tmp = opt->Optimize(par, image_points, 0.0005, 2, Project, &pparams, + alvar::Optimization::TUKEY_LM, 0, 0, weights); + + memcpy(rot->data.db, &(par->data.db[0 + 0]), 3 * sizeof(double)); + memcpy(tra->data.db, &(par->data.db[0 + 3]), 3 * sizeof(double)); + + delete opt; + + cvReleaseMat(&par); + return true; } -bool CameraEC::UpdateRotation(const CvMat* object_points, CvMat* image_points, Pose *pose) +bool CameraEC::UpdateRotation(const CvMat* object_points, CvMat* image_points, + Pose* pose) { - double rot[3]; CvMat rotm = cvMat(3, 1, CV_64F, rot); - double tra[3]; CvMat tram = cvMat(3, 1, CV_64F, tra); - pose->GetRodriques(&rotm); - pose->GetTranslation(&tram); - bool ret = UpdateRotation(object_points, image_points, &rotm, &tram); - pose->SetRodriques(&rotm); - pose->SetTranslation(&tram); - return ret; + double rot[3]; + CvMat rotm = cvMat(3, 1, CV_64F, rot); + double tra[3]; + CvMat tram = cvMat(3, 1, CV_64F, tra); + pose->GetRodriques(&rotm); + pose->GetTranslation(&tram); + bool ret = UpdateRotation(object_points, image_points, &rotm, &tram); + pose->SetRodriques(&rotm); + pose->SetTranslation(&tram); + return ret; } -bool CameraEC::UpdateRotation(const CvMat* object_points, CvMat* image_points, CvMat *rot, CvMat *tra) { - - CvMat* par = cvCreateMat(3, 1, CV_64F); - memcpy(&(par->data.db[0+0]), rot->data.db, 3*sizeof(double)); - ProjectParams pparams; - pparams.camera = this; - pparams.object_model = object_points; - alvar::Optimization *opt = new alvar::Optimization(3, image_points->height); - double tmp = opt->Optimize(par, image_points, 0.0005, 2, ProjectRot, &pparams, alvar::Optimization::TUKEY_LM); - memcpy(rot->data.db, &(par->data.db[0+0]), 3*sizeof(double)); - delete opt; - cvReleaseMat(&par); - return true; +bool CameraEC::UpdateRotation(const CvMat* object_points, CvMat* image_points, + CvMat* rot, CvMat* tra) +{ + CvMat* par = cvCreateMat(3, 1, CV_64F); + memcpy(&(par->data.db[0 + 0]), rot->data.db, 3 * sizeof(double)); + ProjectParams pparams; + pparams.camera = this; + pparams.object_model = object_points; + alvar::Optimization* opt = new alvar::Optimization(3, image_points->height); + double tmp = opt->Optimize(par, image_points, 0.0005, 2, ProjectRot, &pparams, + alvar::Optimization::TUKEY_LM); + memcpy(rot->data.db, &(par->data.db[0 + 0]), 3 * sizeof(double)); + delete opt; + cvReleaseMat(&par); + return true; } // Ol etta mirror asia on kunnossa void GetOrigo(Pose* pose, CvMat* O) { - pose->GetTranslation(O); + pose->GetTranslation(O); } -void GetPointOnLine(const Pose* pose, Camera *camera, const CvPoint2D32f *u, CvMat* P) +void GetPointOnLine(const Pose* pose, Camera* camera, const CvPoint2D32f* u, + CvMat* P) { - double kid[9], rotd[9], trad[3], ud[3] = {u->x, u->y, 1}; - CvMat Ki = cvMat(3, 3, CV_64F, kid); - CvMat R = cvMat(3, 3, CV_64F, rotd); - CvMat T = cvMat(3, 1, CV_64F, trad); - CvMat U = cvMat(3, 1, CV_64F, ud); - pose->GetMatrix(&R); - pose->GetTranslation(&T); - cvInv(&(camera->calib_K), &Ki); - cvMatMul(&R, &Ki, &Ki); - cvGEMM(&Ki, &U, 1, &T, 1, P, 0); + double kid[9], rotd[9], trad[3], ud[3] = { u->x, u->y, 1 }; + CvMat Ki = cvMat(3, 3, CV_64F, kid); + CvMat R = cvMat(3, 3, CV_64F, rotd); + CvMat T = cvMat(3, 1, CV_64F, trad); + CvMat U = cvMat(3, 1, CV_64F, ud); + pose->GetMatrix(&R); + pose->GetTranslation(&T); + cvInv(&(camera->calib_K), &Ki); + cvMatMul(&R, &Ki, &Ki); + cvGEMM(&Ki, &U, 1, &T, 1, P, 0); } -bool MidPointAlgorithm(CvMat* o1, CvMat* o2, CvMat* p1, CvMat* p2, CvPoint3D32f& X, double limit) +bool MidPointAlgorithm(CvMat* o1, CvMat* o2, CvMat* p1, CvMat* p2, + CvPoint3D32f& X, double limit) { - double ud[3], vd[3], wd[3]; - CvMat u = cvMat(3, 1, CV_64F, ud); - CvMat v = cvMat(3, 1, CV_64F, vd); - CvMat w = cvMat(3, 1, CV_64F, wd); - - cvSub(p1, o1, &u); - cvSub(p2, o2, &v); - cvSub(o1, o2, &w); - - double a = cvDotProduct(&u, &u); - double b = cvDotProduct(&u, &v); - double c = cvDotProduct(&v, &v); - double d = cvDotProduct(&u, &w); - double e = cvDotProduct(&v, &w); - double D = a*c - b*b; - double sc, tc; - - // compute the line parameters of the two closest points - if (D < limit) - { - return false; - // the lines are almost parallel - sc = 0.0; - tc = (b>c ? d/b : e/c); // use the largest denominator - } - else - { - sc = (b*e - c*d) / D; - tc = (a*e - b*d) / D; - } - - double m1d[3], m2d[3]; - CvMat m1 = cvMat(3, 1, CV_64F, m1d); - CvMat m2 = cvMat(3, 1, CV_64F, m2d); - cvAddWeighted(&u, sc, o1, 1.0, 0.0, &m1); - cvAddWeighted(&v, tc, o2, 1.0, 0.0, &m2); - cvAddWeighted(&m1, 0.5, &m2, 0.5, 0.0, &m1); - - X.x = (float)m1d[0]; - X.y = (float)m1d[1]; - X.z = (float)m1d[2]; - - return true; + double ud[3], vd[3], wd[3]; + CvMat u = cvMat(3, 1, CV_64F, ud); + CvMat v = cvMat(3, 1, CV_64F, vd); + CvMat w = cvMat(3, 1, CV_64F, wd); + + cvSub(p1, o1, &u); + cvSub(p2, o2, &v); + cvSub(o1, o2, &w); + + double a = cvDotProduct(&u, &u); + double b = cvDotProduct(&u, &v); + double c = cvDotProduct(&v, &v); + double d = cvDotProduct(&u, &w); + double e = cvDotProduct(&v, &w); + double D = a * c - b * b; + double sc, tc; + + // compute the line parameters of the two closest points + if (D < limit) + { + return false; + // the lines are almost parallel + sc = 0.0; + tc = (b > c ? d / b : e / c); // use the largest denominator + } + else + { + sc = (b * e - c * d) / D; + tc = (a * e - b * d) / D; + } + + double m1d[3], m2d[3]; + CvMat m1 = cvMat(3, 1, CV_64F, m1d); + CvMat m2 = cvMat(3, 1, CV_64F, m2d); + cvAddWeighted(&u, sc, o1, 1.0, 0.0, &m1); + cvAddWeighted(&v, tc, o2, 1.0, 0.0, &m2); + cvAddWeighted(&m1, 0.5, &m2, 0.5, 0.0, &m1); + + X.x = (float)m1d[0]; + X.y = (float)m1d[1]; + X.z = (float)m1d[2]; + + return true; } // todo -bool CameraEC::ReconstructFeature(const Pose *pose1, const Pose *pose2, const CvPoint2D32f *u1, const CvPoint2D32f *u2, CvPoint3D32f *p3d, double limit) { - double o1d[3], o2d[3], p1d[3], p2d[3]; - CvMat o1 = cvMat(3, 1, CV_64F, o1d); - CvMat o2 = cvMat(3, 1, CV_64F, o2d); - CvMat p1 = cvMat(3, 1, CV_64F, p1d); - CvMat p2 = cvMat(3, 1, CV_64F, p2d); - - Pose po1 = *pose1; // Make copy so that we don't destroy the pose content - Pose po2 = *pose2; - po1.Invert(); - po2.Invert(); - GetOrigo(&po1, &o1); - GetOrigo(&po2, &o2); - GetPointOnLine(&po1, this, u1, &p1); - GetPointOnLine(&po2, this, u2, &p2); - - return MidPointAlgorithm(&o1, &o2, &p1, &p2, *p3d, limit); -} +bool CameraEC::ReconstructFeature(const Pose* pose1, const Pose* pose2, + const CvPoint2D32f* u1, + const CvPoint2D32f* u2, CvPoint3D32f* p3d, + double limit) +{ + double o1d[3], o2d[3], p1d[3], p2d[3]; + CvMat o1 = cvMat(3, 1, CV_64F, o1d); + CvMat o2 = cvMat(3, 1, CV_64F, o2d); + CvMat p1 = cvMat(3, 1, CV_64F, p1d); + CvMat p2 = cvMat(3, 1, CV_64F, p2d); + + Pose po1 = *pose1; // Make copy so that we don't destroy the pose content + Pose po2 = *pose2; + po1.Invert(); + po2.Invert(); + GetOrigo(&po1, &o1); + GetOrigo(&po2, &o2); + GetPointOnLine(&po1, this, u1, &p1); + GetPointOnLine(&po2, this, u2, &p2); -void CameraEC::Get3dOnPlane(const Pose *pose, CvPoint2D32f p2d, CvPoint3D32f &p3d) { - double pd[16], md[9], kd[9]; - CvMat P = cvMat(4, 4, CV_64F, pd); - CvMat H = cvMat(3, 3, CV_64F, md); - CvMat Ki = cvMat(3, 3, CV_64F, kd); - - pose->GetMatrix(&P); - cvInv(&(calib_K), &Ki); - - // Construct homography from pose - int ind_s = 0, ind_c = 0; - for(int i = 0; i < 3; ++i) - { - CvRect r; r.x = ind_s; r.y = 0; r.height = 3; r.width = 1; - CvMat sub = cvMat(3, 1, CV_64F); - cvGetSubRect(&P, &sub, r); - CvMat col = cvMat(3, 1, CV_64F); - cvGetCol(&H, &col, ind_c); - cvCopy(&sub, &col); - ind_c++; - ind_s++; - if(i == 1) ind_s++; - } - - // Apply H to get the 3D coordinates - Camera::Undistort(p2d); - double xd[3] = {p2d.x, p2d.y, 1}; - CvMat X = cvMat(3, 1, CV_64F, xd); - cvMatMul(&Ki, &X, &X); - cvInv(&H, &H); - cvMatMul(&H, &X, &X); - - p3d.x = (float)(xd[0] / xd[2]); - p3d.y = (float)(xd[1] / xd[2]); - p3d.z = 0; + return MidPointAlgorithm(&o1, &o2, &p1, &p2, *p3d, limit); } -void CameraEC::Get3dOnDepth(const Pose *pose, CvPoint2D32f p2d, float depth, CvPoint3D32f &p3d) +void CameraEC::Get3dOnPlane(const Pose* pose, CvPoint2D32f p2d, + CvPoint3D32f& p3d) { - double wx, wy, wz; - Camera::Undistort(p2d); - - // Tassa asetetaan z = inf ja lasketaan x ja y jotenkin?!? - //double object_scale = _dist_to_target; // TODO Same as the pose?!?!?!? - - // inv(K)*[u v 1]'*scale - wx = depth*(p2d.x-calib_K_data[0][2])/calib_K_data[0][0]; - wy = depth*(p2d.y-calib_K_data[1][2])/calib_K_data[1][1]; - wz = depth; - - // Now the points are in camera coordinate frame. - alvar::Pose p = *pose; - p.Invert(); - - double Xd[4] = {wx, wy, wz, 1}; - CvMat Xdm = cvMat(4, 1, CV_64F, Xd); - double Pd[16]; - CvMat Pdm = cvMat(4, 4, CV_64F, Pd); - p.GetMatrix(&Pdm); - cvMatMul(&Pdm, &Xdm, &Xdm); - p3d.x = float(Xd[0]/Xd[3]); - p3d.y = float(Xd[1]/Xd[3]); - p3d.z = float(Xd[2]/Xd[3]); + double pd[16], md[9], kd[9]; + CvMat P = cvMat(4, 4, CV_64F, pd); + CvMat H = cvMat(3, 3, CV_64F, md); + CvMat Ki = cvMat(3, 3, CV_64F, kd); + + pose->GetMatrix(&P); + cvInv(&(calib_K), &Ki); + + // Construct homography from pose + int ind_s = 0, ind_c = 0; + for (int i = 0; i < 3; ++i) + { + CvRect r; + r.x = ind_s; + r.y = 0; + r.height = 3; + r.width = 1; + CvMat sub = cvMat(3, 1, CV_64F); + cvGetSubRect(&P, &sub, r); + CvMat col = cvMat(3, 1, CV_64F); + cvGetCol(&H, &col, ind_c); + cvCopy(&sub, &col); + ind_c++; + ind_s++; + if (i == 1) + ind_s++; + } + + // Apply H to get the 3D coordinates + Camera::Undistort(p2d); + double xd[3] = { p2d.x, p2d.y, 1 }; + CvMat X = cvMat(3, 1, CV_64F, xd); + cvMatMul(&Ki, &X, &X); + cvInv(&H, &H); + cvMatMul(&H, &X, &X); + + p3d.x = (float)(xd[0] / xd[2]); + p3d.y = (float)(xd[1] / xd[2]); + p3d.z = 0; } -} // namespace alvar +void CameraEC::Get3dOnDepth(const Pose* pose, CvPoint2D32f p2d, float depth, + CvPoint3D32f& p3d) +{ + double wx, wy, wz; + Camera::Undistort(p2d); + + // Tassa asetetaan z = inf ja lasketaan x ja y jotenkin?!? + // double object_scale = _dist_to_target; // TODO Same as the pose?!?!?!? + + // inv(K)*[u v 1]'*scale + wx = depth * (p2d.x - calib_K_data[0][2]) / calib_K_data[0][0]; + wy = depth * (p2d.y - calib_K_data[1][2]) / calib_K_data[1][1]; + wz = depth; + + // Now the points are in camera coordinate frame. + alvar::Pose p = *pose; + p.Invert(); + + double Xd[4] = { wx, wy, wz, 1 }; + CvMat Xdm = cvMat(4, 1, CV_64F, Xd); + double Pd[16]; + CvMat Pdm = cvMat(4, 4, CV_64F, Pd); + p.GetMatrix(&Pdm); + cvMatMul(&Pdm, &Xdm, &Xdm); + p3d.x = float(Xd[0] / Xd[3]); + p3d.y = float(Xd[1] / Xd[3]); + p3d.z = float(Xd[2] / Xd[3]); +} +} // namespace alvar diff --git a/ar_track_alvar/src/FernImageDetector.cpp b/ar_track_alvar/src/FernImageDetector.cpp index 3e6b183..dd61402 100644 --- a/ar_track_alvar/src/FernImageDetector.cpp +++ b/ar_track_alvar/src/FernImageDetector.cpp @@ -25,17 +25,16 @@ namespace alvar { +#define PATCH_SIZE 31 +#define PYR_LEVELS 1 +#define N_VIEWS 5000 +#define N_PTS_TO_FIND 400 +#define N_PTS_TO_TEACH 200 +#define SIZE_BLUR 13 -#define PATCH_SIZE 31 -#define PYR_LEVELS 1 -#define N_VIEWS 5000 -#define N_PTS_TO_FIND 400 -#define N_PTS_TO_TEACH 200 -#define SIZE_BLUR 13 - -#define N_STRUCTS 50 -#define STRUCT_SIZE 11 -#define SIGNATURE_SIZE 400 +#define N_STRUCTS 50 +#define STRUCT_SIZE 11 +#define SIGNATURE_SIZE 400 // default opencv parameters // PATCH_SIZE = 31, @@ -55,28 +54,26 @@ namespace alvar // double _phiMin=-CV_PI, double _phiMax=CV_PI ); // Calculate random parameterized affine transformation A -// A = T(patch center) * R(theta) * R(phi)' * S(lambda1, lambda2) * R(phi) * T(-pt) +// A = T(patch center) * R(theta) * R(phi)' * S(lambda1, lambda2) * R(phi) * +// T(-pt) -FernClassifierWrapper::FernClassifierWrapper() - : FernClassifier() +FernClassifierWrapper::FernClassifierWrapper() : FernClassifier() { } -FernClassifierWrapper::FernClassifierWrapper(const FileNode &fileNode) - : FernClassifier(fileNode) +FernClassifierWrapper::FernClassifierWrapper(const FileNode& fileNode) + : FernClassifier(fileNode) { } -FernClassifierWrapper::FernClassifierWrapper(const vector > &points, - const vector &referenceImages, - const vector > &labels, - int _nclasses, int _patchSize, - int _signatureSize, int _nstructs, - int _structSize, int _nviews, - int _compressionMethod, - const PatchGenerator &patchGenerator) - : FernClassifier(points, referenceImages, labels, _nclasses, _patchSize, _signatureSize, - _nstructs, _structSize, _nviews, _compressionMethod, patchGenerator) +FernClassifierWrapper::FernClassifierWrapper( + const vector >& points, const vector& referenceImages, + const vector >& labels, int _nclasses, int _patchSize, + int _signatureSize, int _nstructs, int _structSize, int _nviews, + int _compressionMethod, const PatchGenerator& patchGenerator) + : FernClassifier(points, referenceImages, labels, _nclasses, _patchSize, + _signatureSize, _nstructs, _structSize, _nviews, + _compressionMethod, patchGenerator) { } @@ -84,439 +81,490 @@ FernClassifierWrapper::~FernClassifierWrapper() { } -void FernClassifierWrapper::readBinary(std::fstream &stream) +void FernClassifierWrapper::readBinary(std::fstream& stream) { - clear(); - - stream.read((char *)&verbose, sizeof(verbose)); - stream.read((char *)&nstructs, sizeof(nstructs)); - stream.read((char *)&structSize, sizeof(structSize)); - stream.read((char *)&nclasses, sizeof(nclasses)); - stream.read((char *)&signatureSize, sizeof(signatureSize)); - stream.read((char *)&compressionMethod, sizeof(compressionMethod)); - stream.read((char *)&leavesPerStruct, sizeof(leavesPerStruct)); - stream.read((char *)&patchSize.width, sizeof(patchSize.width)); - stream.read((char *)&patchSize.height, sizeof(patchSize.height)); - - std::vector::size_type featuresSize; - stream.read((char *)&featuresSize, sizeof(featuresSize)); - features.reserve(featuresSize); - unsigned int featuresValue; - Feature value; - for (std::vector::size_type i = 0; i < featuresSize; ++i) { - stream.read((char *)&featuresValue, sizeof(featuresValue)); - value.x1 = (uchar)(featuresValue % patchSize.width); - value.y1 = (uchar)(featuresValue / patchSize.width); - stream.read((char *)&featuresValue, sizeof(featuresValue)); - value.x2 = (uchar)(featuresValue % patchSize.width); - value.y2 = (uchar)(featuresValue / patchSize.width); - features.push_back(value); - } - - // don't read classCounters - /* - std::vector::size_type classCountersSize; - stream.read((char *)&classCountersSize, sizeof(classCountersSize)); - classCounters.reserve(classCountersSize); - int classCountersValue; - for (std::vector::size_type i = 0; i < classCountersSize; ++i) { - stream.read((char *)&classCountersValue, sizeof(classCountersValue)); - classCounters.push_back(classCountersValue); - } - */ - - std::vector::size_type posteriorsSize; - stream.read((char *)&posteriorsSize, sizeof(posteriorsSize)); - posteriors.reserve(posteriorsSize); - float posteriorsValue; - for (std::vector::size_type i = 0; i < posteriorsSize; ++i) { - stream.read((char *)&posteriorsValue, sizeof(posteriorsValue)); - posteriors.push_back(posteriorsValue); - } + clear(); + + stream.read((char*)&verbose, sizeof(verbose)); + stream.read((char*)&nstructs, sizeof(nstructs)); + stream.read((char*)&structSize, sizeof(structSize)); + stream.read((char*)&nclasses, sizeof(nclasses)); + stream.read((char*)&signatureSize, sizeof(signatureSize)); + stream.read((char*)&compressionMethod, sizeof(compressionMethod)); + stream.read((char*)&leavesPerStruct, sizeof(leavesPerStruct)); + stream.read((char*)&patchSize.width, sizeof(patchSize.width)); + stream.read((char*)&patchSize.height, sizeof(patchSize.height)); + + std::vector::size_type featuresSize; + stream.read((char*)&featuresSize, sizeof(featuresSize)); + features.reserve(featuresSize); + unsigned int featuresValue; + Feature value; + for (std::vector::size_type i = 0; i < featuresSize; ++i) + { + stream.read((char*)&featuresValue, sizeof(featuresValue)); + value.x1 = (uchar)(featuresValue % patchSize.width); + value.y1 = (uchar)(featuresValue / patchSize.width); + stream.read((char*)&featuresValue, sizeof(featuresValue)); + value.x2 = (uchar)(featuresValue % patchSize.width); + value.y2 = (uchar)(featuresValue / patchSize.width); + features.push_back(value); + } + + // don't read classCounters + /* + std::vector::size_type classCountersSize; + stream.read((char *)&classCountersSize, sizeof(classCountersSize)); + classCounters.reserve(classCountersSize); + int classCountersValue; + for (std::vector::size_type i = 0; i < classCountersSize; ++i) { + stream.read((char *)&classCountersValue, sizeof(classCountersValue)); + classCounters.push_back(classCountersValue); + } + */ + + std::vector::size_type posteriorsSize; + stream.read((char*)&posteriorsSize, sizeof(posteriorsSize)); + posteriors.reserve(posteriorsSize); + float posteriorsValue; + for (std::vector::size_type i = 0; i < posteriorsSize; ++i) + { + stream.read((char*)&posteriorsValue, sizeof(posteriorsValue)); + posteriors.push_back(posteriorsValue); + } } -void FernClassifierWrapper::writeBinary(std::fstream &stream) const +void FernClassifierWrapper::writeBinary(std::fstream& stream) const { - stream.write((char *)&verbose, sizeof(verbose)); - stream.write((char *)&nstructs, sizeof(nstructs)); - stream.write((char *)&structSize, sizeof(structSize)); - stream.write((char *)&nclasses, sizeof(nclasses)); - stream.write((char *)&signatureSize, sizeof(signatureSize)); - stream.write((char *)&compressionMethod, sizeof(compressionMethod)); - stream.write((char *)&leavesPerStruct, sizeof(leavesPerStruct)); - stream.write((char *)&patchSize.width, sizeof(patchSize.width)); - stream.write((char *)&patchSize.height, sizeof(patchSize.height)); - - std::vector::size_type featuresSize = features.size(); - stream.write((char *)&featuresSize, sizeof(featuresSize)); - unsigned int featuresValue; - for (std::vector::const_iterator itr = features.begin(); itr != features.end(); ++itr) { - featuresValue = itr->y1 * patchSize.width + itr->x1; - stream.write((char *)&featuresValue, sizeof(featuresValue)); - featuresValue = itr->y2 * patchSize.width + itr->x2; - stream.write((char *)&featuresValue, sizeof(featuresValue)); - } - - // don't write classCounters - /* - std::vector::size_type classCountersSize = classCounters.size(); - stream.write((char *)&classCountersSize, sizeof(classCountersSize)); - for (std::vector::const_iterator itr = classCounters.begin(); itr != classCounters.end(); ++itr) { - stream.write((char *)&*itr, sizeof(*itr)); - } - */ - - std::vector::size_type posteriorsSize = posteriors.size(); - stream.write((char *)&posteriorsSize, sizeof(posteriorsSize)); - for (std::vector::const_iterator itr = posteriors.begin(); itr != posteriors.end(); ++itr) { - stream.write((char *)&*itr, sizeof(*itr)); - } + stream.write((char*)&verbose, sizeof(verbose)); + stream.write((char*)&nstructs, sizeof(nstructs)); + stream.write((char*)&structSize, sizeof(structSize)); + stream.write((char*)&nclasses, sizeof(nclasses)); + stream.write((char*)&signatureSize, sizeof(signatureSize)); + stream.write((char*)&compressionMethod, sizeof(compressionMethod)); + stream.write((char*)&leavesPerStruct, sizeof(leavesPerStruct)); + stream.write((char*)&patchSize.width, sizeof(patchSize.width)); + stream.write((char*)&patchSize.height, sizeof(patchSize.height)); + + std::vector::size_type featuresSize = features.size(); + stream.write((char*)&featuresSize, sizeof(featuresSize)); + unsigned int featuresValue; + for (std::vector::const_iterator itr = features.begin(); + itr != features.end(); ++itr) + { + featuresValue = itr->y1 * patchSize.width + itr->x1; + stream.write((char*)&featuresValue, sizeof(featuresValue)); + featuresValue = itr->y2 * patchSize.width + itr->x2; + stream.write((char*)&featuresValue, sizeof(featuresValue)); + } + + // don't write classCounters + /* + std::vector::size_type classCountersSize = classCounters.size(); + stream.write((char *)&classCountersSize, sizeof(classCountersSize)); + for (std::vector::const_iterator itr = classCounters.begin(); itr != + classCounters.end(); ++itr) { stream.write((char *)&*itr, sizeof(*itr)); + } + */ + + std::vector::size_type posteriorsSize = posteriors.size(); + stream.write((char*)&posteriorsSize, sizeof(posteriorsSize)); + for (std::vector::const_iterator itr = posteriors.begin(); + itr != posteriors.end(); ++itr) + { + stream.write((char*)&*itr, sizeof(*itr)); + } } FernImageDetector::FernImageDetector(const bool visualize) - : mPatchGenerator(0, 256, 13, true, /*0.25*/0.10, 1.0/*0.6, 1.5*/, -CV_PI*1.0, CV_PI*1.0, -CV_PI*0.0, CV_PI*0.0/*-2*CV_PI, 2*CV_PI*/) // TODO: check angle values, cant be -2pi..2pi ? - , mLDetector(3, 20, PYR_LEVELS, N_VIEWS, PATCH_SIZE, 2) - , mClassifier() - , mKeyPoints() - , mImagePoints() - , mModelPoints() - , mVisualize(visualize) - , mObjects() - , mSize() - , mCorrespondences() - , mHomography() - , mInlierRatio(0) + : mPatchGenerator(0, 256, 13, true, /*0.25*/ 0.10, 1.0 /*0.6, 1.5*/, + -CV_PI * 1.0, CV_PI * 1.0, -CV_PI * 0.0, + CV_PI * 0.0 /*-2*CV_PI, 2*CV_PI*/) // TODO: check angle + // values, cant be + // -2pi..2pi ? + , mLDetector(3, 20, PYR_LEVELS, N_VIEWS, PATCH_SIZE, 2) + , mClassifier() + , mKeyPoints() + , mImagePoints() + , mModelPoints() + , mVisualize(visualize) + , mObjects() + , mSize() + , mCorrespondences() + , mHomography() + , mInlierRatio(0) { - //mHomography.eye(3, 3, CV_64F); - mClassifier.resize(1); + // mHomography.eye(3, 3, CV_64F); + mClassifier.resize(1); } FernImageDetector::~FernImageDetector() { } -void FernImageDetector::imagePoints(vector &points) +void FernImageDetector::imagePoints(vector& points) { - points.clear(); - for(size_t i = 0; i < mImagePoints.size(); ++i) { - points.push_back(cvPoint2D64f(mImagePoints[i].x, mImagePoints[i].y)); - } + points.clear(); + for (size_t i = 0; i < mImagePoints.size(); ++i) + { + points.push_back(cvPoint2D64f(mImagePoints[i].x, mImagePoints[i].y)); + } } -void FernImageDetector::modelPoints(vector &points, bool normalize) +void FernImageDetector::modelPoints(vector& points, + bool normalize) { - points.clear(); - //int minx = 1e10, miny = 1e10; - //int maxx = 0, maxy = 0; - for(size_t i = 0; i < mModelPoints.size(); ++i) { - CvPoint3D64f pt = cvPoint3D64f(mModelPoints[i].x, mModelPoints[i].y, 0.0); - if(normalize) { - //minx = (pt.xmaxx)?pt.x:maxx; - //maxy = (pt.y>maxy)?pt.y:maxy; - pt.x -= mSize.width/2; - pt.y -= mSize.height/2; - pt.x /= mSize.width*0.10; - pt.y /= mSize.width*0.10; - } - points.push_back(pt); - } + points.clear(); + // int minx = 1e10, miny = 1e10; + // int maxx = 0, maxy = 0; + for (size_t i = 0; i < mModelPoints.size(); ++i) + { + CvPoint3D64f pt = cvPoint3D64f(mModelPoints[i].x, mModelPoints[i].y, 0.0); + if (normalize) + { + // minx = (pt.xmaxx)?pt.x:maxx; + // maxy = (pt.y>maxy)?pt.y:maxy; + pt.x -= mSize.width / 2; + pt.y -= mSize.height / 2; + pt.x /= mSize.width * 0.10; + pt.y /= mSize.width * 0.10; + } + points.push_back(pt); + } } cv::Size FernImageDetector::size() { - return mSize; + return mSize; } cv::Mat FernImageDetector::homography() { - return mHomography; + return mHomography; } double FernImageDetector::inlierRatio() { - return mInlierRatio; + return mInlierRatio; } -void FernImageDetector::train(const std::string &filename) +void FernImageDetector::train(const std::string& filename) { - Mat object = imread(filename.c_str(), CV_LOAD_IMAGE_GRAYSCALE); - train(object); + Mat object = imread(filename.c_str(), CV_LOAD_IMAGE_GRAYSCALE); + train(object); } -void FernImageDetector::train(Mat &object) +void FernImageDetector::train(Mat& object) { - mObjects.push_back(object.clone()); - - Mat blurredObject; - GaussianBlur(mObjects[0], blurredObject, Size(SIZE_BLUR, SIZE_BLUR), 0, 0); - - if(mVisualize) { - cvNamedWindow("Object", 1); - imshow("Object", blurredObject); - cv::waitKey(2000); - } - - //buildPyramid(object, objpyr, mLDetector.nOctaves-1); - //mLDetector(mObjects[0], mKeyPoints, N_PTS_TO_TEACH); // TODO: find robust features, TODO: in pyramids? - mLDetector.getMostStable2D(blurredObject, mKeyPoints, N_PTS_TO_TEACH, mPatchGenerator); - - if(mVisualize) { - for(int i = 0; i < (int)mKeyPoints.size(); ++i) - circle(blurredObject, mKeyPoints[i].pt, int(mKeyPoints[i].size/10), CV_RGB(64,64,64)); - - imshow("Object", blurredObject); - cv::waitKey(2000); - } - - mClassifier[0].trainFromSingleView(blurredObject, - mKeyPoints, - PATCH_SIZE, - SIGNATURE_SIZE, - N_STRUCTS, // TODO: why was (int)mKeyPoints.size(), use here? why not a constant? - STRUCT_SIZE, - N_VIEWS, - FernClassifier::COMPRESSION_NONE, - mPatchGenerator); - - mSize = cv::Size(object.cols, object.rows); + mObjects.push_back(object.clone()); + + Mat blurredObject; + GaussianBlur(mObjects[0], blurredObject, Size(SIZE_BLUR, SIZE_BLUR), 0, 0); + + if (mVisualize) + { + cvNamedWindow("Object", 1); + imshow("Object", blurredObject); + cv::waitKey(2000); + } + + // buildPyramid(object, objpyr, mLDetector.nOctaves-1); + // mLDetector(mObjects[0], mKeyPoints, N_PTS_TO_TEACH); // TODO: find robust + // features, TODO: in pyramids? + mLDetector.getMostStable2D(blurredObject, mKeyPoints, N_PTS_TO_TEACH, + mPatchGenerator); + + if (mVisualize) + { + for (int i = 0; i < (int)mKeyPoints.size(); ++i) + circle(blurredObject, mKeyPoints[i].pt, int(mKeyPoints[i].size / 10), + CV_RGB(64, 64, 64)); + + imshow("Object", blurredObject); + cv::waitKey(2000); + } + + mClassifier[0].trainFromSingleView( + blurredObject, mKeyPoints, PATCH_SIZE, SIGNATURE_SIZE, + N_STRUCTS, // TODO: why was (int)mKeyPoints.size(), use here? why not a + // constant? + STRUCT_SIZE, N_VIEWS, FernClassifier::COMPRESSION_NONE, mPatchGenerator); + + mSize = cv::Size(object.cols, object.rows); } -void FernImageDetector::findFeatures(Mat &object, bool planeAssumption) +void FernImageDetector::findFeatures(Mat& object, bool planeAssumption) { - //cv::flip(object, object, 1); - - vector keypoints; - vector objpyr; - - GaussianBlur(object, object, Size(SIZE_BLUR, SIZE_BLUR), 0, 0); - //buildPyramid(object, objpyr, mLDetector.nOctaves-1); - mLDetector.nOctaves = 1; - mLDetector(object/*objpyr*/, keypoints, N_PTS_TO_FIND); - - int m = mKeyPoints.size(); - int n = keypoints.size(); - vector bestMatches(m, -1); - vector maxLogProb(m, -FLT_MAX); - vector signature; - vector pairs; - - for(size_t i = 0; i < keypoints.size(); ++i) { - Point2f pt = keypoints[i].pt; - //int oct = keypoints[i].octave; std::cout<<"oct "<= 0 && (bestMatches[k] < 0 || signature[k] > maxLogProb[k])) { - maxLogProb[k] = signature[k]; - bestMatches[k] = i; - } - } - - for(int i = 0; i < m; i++ ) - if(bestMatches[i] >= 0) { - pairs.push_back(i); - pairs.push_back(bestMatches[i]); + // cv::flip(object, object, 1); + + vector keypoints; + vector objpyr; + + GaussianBlur(object, object, Size(SIZE_BLUR, SIZE_BLUR), 0, 0); + // buildPyramid(object, objpyr, mLDetector.nOctaves-1); + mLDetector.nOctaves = 1; + mLDetector(object /*objpyr*/, keypoints, N_PTS_TO_FIND); + + int m = mKeyPoints.size(); + int n = keypoints.size(); + vector bestMatches(m, -1); + vector maxLogProb(m, -FLT_MAX); + vector signature; + vector pairs; + + for (size_t i = 0; i < keypoints.size(); ++i) + { + Point2f pt = keypoints[i].pt; + // int oct = keypoints[i].octave; std::cout<<"oct "<= 0 && (bestMatches[k] < 0 || signature[k] > maxLogProb[k])) + { + maxLogProb[k] = signature[k]; + bestMatches[k] = i; + } + } + + for (int i = 0; i < m; i++) + if (bestMatches[i] >= 0) + { + pairs.push_back(i); + pairs.push_back(bestMatches[i]); } - mCorrespondences = Mat(mObjects[0].rows + object.rows, std::max( mObjects[0].cols, object.cols), CV_8UC3); - mCorrespondences = Scalar(0.); - Mat part(mCorrespondences, Rect(0, 0, mObjects[0].cols, mObjects[0].rows)); - cvtColor(mObjects[0], part, CV_GRAY2BGR); - part = Mat(mCorrespondences, Rect(0, mObjects[0].rows, object.cols, object.rows)); - cvtColor(object, part, CV_GRAY2BGR); - - for(int i = 0; i < (int)keypoints.size(); ++i) - circle(object, keypoints[i].pt, int(keypoints[i].size/5), CV_RGB(64,64,64)); - - vector fromPt, toPt; - vector mask; - for(int i = 0; i < m; ++i) - if( bestMatches[i] >= 0 ){ - fromPt.push_back(mKeyPoints[i].pt); - toPt.push_back(keypoints[bestMatches[i]].pt); - } - - static double valmin = 1.0; - static double valmax = 0.0; - mModelPoints.clear(); - mImagePoints.clear(); - int n_inliers = 0; - - if(planeAssumption && fromPt.size() > 8) { - cv::Mat H = cv::findHomography(Mat(fromPt), Mat(toPt), mask, RANSAC/*CV_LMEDS*/, 20); - mHomography = H; - //CompareModelAndObservation(); - - for(size_t i = 0, j = 0; i < (int)pairs.size(); i += 2, ++j) { - if(mask[j]) { - cv::Point2f pi(keypoints[pairs[i+1]].pt); - cv::Point2f pw(mKeyPoints[pairs[i]].pt); - mModelPoints.push_back(pw); - mImagePoints.push_back(pi); - line(mCorrespondences, mKeyPoints[pairs[i]].pt, - keypoints[pairs[i+1]].pt + Point2f(0.0,(float)mObjects[0].rows), - Scalar(i*i%244,100-i*100%30,i*i-50*i)); - n_inliers++; - } - } - } else { - for(size_t i = 0, j = 0; i < (int)pairs.size(); i += 2, ++j) { - cv::Point2f pi(keypoints[pairs[i+1]].pt); - cv::Point2f pw(mKeyPoints[pairs[i]].pt); - mModelPoints.push_back(pw); - mImagePoints.push_back(pi); - line(mCorrespondences, mKeyPoints[pairs[i]].pt, - keypoints[pairs[i+1]].pt + Point2f(0.0,(float)mObjects[0].rows), - Scalar(i*i%244,100-i*100%30,i*i-50*i)); - } - } - - - double val = 0.0; - if(fromPt.size()>0) val = 1.*n_inliers/fromPt.size(); - if(val > valmax) valmax = val; - if(val < valmin) valmin = val; - - mInlierRatio = val; - - if (mVisualize) { - cvNamedWindow("Matches", 1); - imshow("Matches", mCorrespondences); - cv::waitKey(1); + mCorrespondences = Mat(mObjects[0].rows + object.rows, + std::max(mObjects[0].cols, object.cols), CV_8UC3); + mCorrespondences = Scalar(0.); + Mat part(mCorrespondences, Rect(0, 0, mObjects[0].cols, mObjects[0].rows)); + cvtColor(mObjects[0], part, CV_GRAY2BGR); + part = Mat(mCorrespondences, + Rect(0, mObjects[0].rows, object.cols, object.rows)); + cvtColor(object, part, CV_GRAY2BGR); + + for (int i = 0; i < (int)keypoints.size(); ++i) + circle(object, keypoints[i].pt, int(keypoints[i].size / 5), + CV_RGB(64, 64, 64)); + + vector fromPt, toPt; + vector mask; + for (int i = 0; i < m; ++i) + if (bestMatches[i] >= 0) + { + fromPt.push_back(mKeyPoints[i].pt); + toPt.push_back(keypoints[bestMatches[i]].pt); + } + + static double valmin = 1.0; + static double valmax = 0.0; + mModelPoints.clear(); + mImagePoints.clear(); + int n_inliers = 0; + + if (planeAssumption && fromPt.size() > 8) + { + cv::Mat H = cv::findHomography(Mat(fromPt), Mat(toPt), mask, + RANSAC /*CV_LMEDS*/, 20); + mHomography = H; + // CompareModelAndObservation(); + + for (size_t i = 0, j = 0; i < (int)pairs.size(); i += 2, ++j) + { + if (mask[j]) + { + cv::Point2f pi(keypoints[pairs[i + 1]].pt); + cv::Point2f pw(mKeyPoints[pairs[i]].pt); + mModelPoints.push_back(pw); + mImagePoints.push_back(pi); + line(mCorrespondences, mKeyPoints[pairs[i]].pt, + keypoints[pairs[i + 1]].pt + Point2f(0.0, (float)mObjects[0].rows), + Scalar(i * i % 244, 100 - i * 100 % 30, i * i - 50 * i)); + n_inliers++; + } + } + } + else + { + for (size_t i = 0, j = 0; i < (int)pairs.size(); i += 2, ++j) + { + cv::Point2f pi(keypoints[pairs[i + 1]].pt); + cv::Point2f pw(mKeyPoints[pairs[i]].pt); + mModelPoints.push_back(pw); + mImagePoints.push_back(pi); + line(mCorrespondences, mKeyPoints[pairs[i]].pt, + keypoints[pairs[i + 1]].pt + Point2f(0.0, (float)mObjects[0].rows), + Scalar(i * i % 244, 100 - i * 100 % 30, i * i - 50 * i)); } + } + + double val = 0.0; + if (fromPt.size() > 0) + val = 1. * n_inliers / fromPt.size(); + if (val > valmax) + valmax = val; + if (val < valmin) + valmin = val; + + mInlierRatio = val; + + if (mVisualize) + { + cvNamedWindow("Matches", 1); + imshow("Matches", mCorrespondences); + cv::waitKey(1); + } } -bool FernImageDetector::read(const std::string &filename, const bool binary) +bool FernImageDetector::read(const std::string& filename, const bool binary) { - if (binary) { - std::fstream bs(filename.c_str(), std::fstream::in | std::fstream::binary); - - if (!bs.is_open()) { - return false; - } - - bs.read((char *)&mLDetector.radius, sizeof(mLDetector.radius)); - bs.read((char *)&mLDetector.threshold, sizeof(mLDetector.threshold)); - bs.read((char *)&mLDetector.nOctaves, sizeof(mLDetector.nOctaves)); - bs.read((char *)&mLDetector.nViews, sizeof(mLDetector.nViews)); - bs.read((char *)&mLDetector.verbose, sizeof(mLDetector.verbose)); - bs.read((char *)&mLDetector.baseFeatureSize, sizeof(mLDetector.baseFeatureSize)); - bs.read((char *)&mLDetector.clusteringDistance, sizeof(mLDetector.clusteringDistance)); - - mClassifier[0].readBinary(bs); - - std::vector::size_type size; - bs.read((char *)&size, sizeof(size)); - mKeyPoints.reserve(size); - KeyPoint value; - for (std::vector::size_type i = 0; i < size; ++i) { - bs.read((char *)&value.pt.x, sizeof(value.pt.x)); - bs.read((char *)&value.pt.y, sizeof(value.pt.y)); - bs.read((char *)&value.size, sizeof(value.size)); - bs.read((char *)&value.angle, sizeof(value.angle)); - bs.read((char *)&value.response, sizeof(value.response)); - bs.read((char *)&value.octave, sizeof(value.octave)); - bs.read((char *)&value.class_id, sizeof(value.class_id)); - mKeyPoints.push_back(value); - } - - bs.read((char *)&mSize.width, sizeof(mSize.width)); - bs.read((char *)&mSize.height, sizeof(mSize.height)); - - std::vector::size_type objectsSize; - bs.read((char *)&objectsSize, sizeof(objectsSize)); - mObjects.reserve(objectsSize); - int rows; - int cols; - int type; - for (std::vector::size_type i = 0; i < objectsSize; ++i) { - bs.read((char *)&rows, sizeof(rows)); - bs.read((char *)&cols, sizeof(cols)); - bs.read((char *)&type, sizeof(type)); - Mat objectsValue(rows, cols, type); - bs.read((char *)objectsValue.data, objectsValue.elemSize() * objectsValue.total()); - mObjects.push_back(objectsValue); - } - - bs.close(); + if (binary) + { + std::fstream bs(filename.c_str(), std::fstream::in | std::fstream::binary); + + if (!bs.is_open()) + { + return false; + } + + bs.read((char*)&mLDetector.radius, sizeof(mLDetector.radius)); + bs.read((char*)&mLDetector.threshold, sizeof(mLDetector.threshold)); + bs.read((char*)&mLDetector.nOctaves, sizeof(mLDetector.nOctaves)); + bs.read((char*)&mLDetector.nViews, sizeof(mLDetector.nViews)); + bs.read((char*)&mLDetector.verbose, sizeof(mLDetector.verbose)); + bs.read((char*)&mLDetector.baseFeatureSize, + sizeof(mLDetector.baseFeatureSize)); + bs.read((char*)&mLDetector.clusteringDistance, + sizeof(mLDetector.clusteringDistance)); + + mClassifier[0].readBinary(bs); + + std::vector::size_type size; + bs.read((char*)&size, sizeof(size)); + mKeyPoints.reserve(size); + KeyPoint value; + for (std::vector::size_type i = 0; i < size; ++i) + { + bs.read((char*)&value.pt.x, sizeof(value.pt.x)); + bs.read((char*)&value.pt.y, sizeof(value.pt.y)); + bs.read((char*)&value.size, sizeof(value.size)); + bs.read((char*)&value.angle, sizeof(value.angle)); + bs.read((char*)&value.response, sizeof(value.response)); + bs.read((char*)&value.octave, sizeof(value.octave)); + bs.read((char*)&value.class_id, sizeof(value.class_id)); + mKeyPoints.push_back(value); } - else { - FileStorage fs(filename, FileStorage::READ); - - if (!fs.isOpened()) { - return false; - } - - FileNode node = fs.getFirstTopLevelNode(); - std::cout << "loaded file" << std::endl; - cv::read(node["model_points"], mKeyPoints); - std::cout << "loaded model points" << std::endl; - mClassifier[0].read(node["fern_classifier"]); - std::cout << "loaded classifier" << std::endl; + + bs.read((char*)&mSize.width, sizeof(mSize.width)); + bs.read((char*)&mSize.height, sizeof(mSize.height)); + + std::vector::size_type objectsSize; + bs.read((char*)&objectsSize, sizeof(objectsSize)); + mObjects.reserve(objectsSize); + int rows; + int cols; + int type; + for (std::vector::size_type i = 0; i < objectsSize; ++i) + { + bs.read((char*)&rows, sizeof(rows)); + bs.read((char*)&cols, sizeof(cols)); + bs.read((char*)&type, sizeof(type)); + Mat objectsValue(rows, cols, type); + bs.read((char*)objectsValue.data, + objectsValue.elemSize() * objectsValue.total()); + mObjects.push_back(objectsValue); + } + + bs.close(); + } + else + { + FileStorage fs(filename, FileStorage::READ); + + if (!fs.isOpened()) + { + return false; } - return true; + FileNode node = fs.getFirstTopLevelNode(); + std::cout << "loaded file" << std::endl; + cv::read(node["model_points"], mKeyPoints); + std::cout << "loaded model points" << std::endl; + mClassifier[0].read(node["fern_classifier"]); + std::cout << "loaded classifier" << std::endl; + } + + return true; } -bool FernImageDetector::write(const std::string &filename, const bool binary) +bool FernImageDetector::write(const std::string& filename, const bool binary) { - if (binary) { - std::fstream bs(filename.c_str(), std::fstream::out | std::fstream::binary); - - if (!bs.is_open()) { - return false; - } - - bs.write((char *)&mLDetector.radius, sizeof(mLDetector.radius)); - bs.write((char *)&mLDetector.threshold, sizeof(mLDetector.threshold)); - bs.write((char *)&mLDetector.nOctaves, sizeof(mLDetector.nOctaves)); - bs.write((char *)&mLDetector.nViews, sizeof(mLDetector.nViews)); - bs.write((char *)&mLDetector.verbose, sizeof(mLDetector.verbose)); - bs.write((char *)&mLDetector.baseFeatureSize, sizeof(mLDetector.baseFeatureSize)); - bs.write((char *)&mLDetector.clusteringDistance, sizeof(mLDetector.clusteringDistance)); - - mClassifier[0].writeBinary(bs); - - std::vector::size_type size = mKeyPoints.size(); - bs.write((char *)&size, sizeof(size)); - for (std::vector::const_iterator itr = mKeyPoints.begin(); itr != mKeyPoints.end(); ++itr) { - bs.write((char *)&itr->pt.x, sizeof(itr->pt.x)); - bs.write((char *)&itr->pt.y, sizeof(itr->pt.y)); - bs.write((char *)&itr->size, sizeof(itr->size)); - bs.write((char *)&itr->angle, sizeof(itr->angle)); - bs.write((char *)&itr->response, sizeof(itr->response)); - bs.write((char *)&itr->octave, sizeof(itr->octave)); - bs.write((char *)&itr->class_id, sizeof(itr->class_id)); - } - - bs.write((char *)&mSize.width, sizeof(mSize.width)); - bs.write((char *)&mSize.height, sizeof(mSize.height)); - - std::vector::size_type objectsSize = mObjects.size(); - bs.write((char *)&objectsSize, sizeof(objectsSize)); - for (std::vector::const_iterator itr = mObjects.begin(); itr != mObjects.end(); ++itr) { - bs.write((char *)&itr->rows, sizeof(itr->rows)); - bs.write((char *)&itr->cols, sizeof(itr->cols)); - int type = itr->type(); - bs.write((char *)&type, sizeof(type)); - bs.write((char *)itr->data, itr->elemSize() * itr->total()); - } - - bs.close(); + if (binary) + { + std::fstream bs(filename.c_str(), std::fstream::out | std::fstream::binary); + + if (!bs.is_open()) + { + return false; } - else { - FileStorage fs(filename, FileStorage::WRITE); - if (!fs.isOpened()) { - return false; - } + bs.write((char*)&mLDetector.radius, sizeof(mLDetector.radius)); + bs.write((char*)&mLDetector.threshold, sizeof(mLDetector.threshold)); + bs.write((char*)&mLDetector.nOctaves, sizeof(mLDetector.nOctaves)); + bs.write((char*)&mLDetector.nViews, sizeof(mLDetector.nViews)); + bs.write((char*)&mLDetector.verbose, sizeof(mLDetector.verbose)); + bs.write((char*)&mLDetector.baseFeatureSize, + sizeof(mLDetector.baseFeatureSize)); + bs.write((char*)&mLDetector.clusteringDistance, + sizeof(mLDetector.clusteringDistance)); + + mClassifier[0].writeBinary(bs); + + std::vector::size_type size = mKeyPoints.size(); + bs.write((char*)&size, sizeof(size)); + for (std::vector::const_iterator itr = mKeyPoints.begin(); + itr != mKeyPoints.end(); ++itr) + { + bs.write((char*)&itr->pt.x, sizeof(itr->pt.x)); + bs.write((char*)&itr->pt.y, sizeof(itr->pt.y)); + bs.write((char*)&itr->size, sizeof(itr->size)); + bs.write((char*)&itr->angle, sizeof(itr->angle)); + bs.write((char*)&itr->response, sizeof(itr->response)); + bs.write((char*)&itr->octave, sizeof(itr->octave)); + bs.write((char*)&itr->class_id, sizeof(itr->class_id)); + } - WriteStructContext ws(fs, "fern_image_detector", CV_NODE_MAP); - cv::write(fs, "model_points", mKeyPoints); - mClassifier[0].write(fs, "fern_classifier"); + bs.write((char*)&mSize.width, sizeof(mSize.width)); + bs.write((char*)&mSize.height, sizeof(mSize.height)); + + std::vector::size_type objectsSize = mObjects.size(); + bs.write((char*)&objectsSize, sizeof(objectsSize)); + for (std::vector::const_iterator itr = mObjects.begin(); + itr != mObjects.end(); ++itr) + { + bs.write((char*)&itr->rows, sizeof(itr->rows)); + bs.write((char*)&itr->cols, sizeof(itr->cols)); + int type = itr->type(); + bs.write((char*)&type, sizeof(type)); + bs.write((char*)itr->data, itr->elemSize() * itr->total()); } - return true; + bs.close(); + } + else + { + FileStorage fs(filename, FileStorage::WRITE); + + if (!fs.isOpened()) + { + return false; + } + + WriteStructContext ws(fs, "fern_image_detector", CV_NODE_MAP); + cv::write(fs, "model_points", mKeyPoints); + mClassifier[0].write(fs, "fern_classifier"); + } + + return true; } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/FernPoseEstimator.cpp b/ar_track_alvar/src/FernPoseEstimator.cpp index d50497e..3485480 100644 --- a/ar_track_alvar/src/FernPoseEstimator.cpp +++ b/ar_track_alvar/src/FernPoseEstimator.cpp @@ -23,12 +23,9 @@ #include "FernPoseEstimator.h" -namespace alvar { - -FernPoseEstimator::FernPoseEstimator() - : mPose() - , mCamera() - , mCameraEC() +namespace alvar +{ +FernPoseEstimator::FernPoseEstimator() : mPose(), mCamera(), mCameraEC() { } @@ -38,46 +35,53 @@ FernPoseEstimator::~FernPoseEstimator() Pose FernPoseEstimator::pose() const { - return mPose; + return mPose; } Camera FernPoseEstimator::camera() const { - return mCamera; + return mCamera; } -bool FernPoseEstimator::setCalibration(const std::string &filename, int width, int height) +bool FernPoseEstimator::setCalibration(const std::string& filename, int width, + int height) { - bool r1 = mCamera.SetCalib(filename.c_str(), width, height); - bool r2 = mCameraEC.SetCalib(filename.c_str(), width, height); - return r1 && r2; + bool r1 = mCamera.SetCalib(filename.c_str(), width, height); + bool r2 = mCameraEC.SetCalib(filename.c_str(), width, height); + return r1 && r2; } void FernPoseEstimator::setResolution(int width, int height) { - mCamera.SetRes(width, height); - mCameraEC.SetRes(width, height); + mCamera.SetRes(width, height); + mCameraEC.SetRes(width, height); } -void FernPoseEstimator::calculateFromPointCorrespondences(FernPoseEstimator::ModelPointVector &mpts, FernPoseEstimator::ImagePointVector &ipts) +void FernPoseEstimator::calculateFromPointCorrespondences( + FernPoseEstimator::ModelPointVector& mpts, + FernPoseEstimator::ImagePointVector& ipts) { - mCamera.CalcExteriorOrientation(mpts, ipts, &mPose); // TODO replace camera->cameraec + mCamera.CalcExteriorOrientation(mpts, ipts, + &mPose); // TODO replace camera->cameraec } -void FernPoseEstimator::updateFromTrackedPoints(FernPoseEstimator::ExternalContainerMap &container) +void FernPoseEstimator::updateFromTrackedPoints( + FernPoseEstimator::ExternalContainerMap& container) { - mCameraEC.UpdatePose(container, &mPose); + mCameraEC.UpdatePose(container, &mPose); } -void FernPoseEstimator::extractPlaneCoordinates(FernPoseEstimator::ExternalContainerMap &container) +void FernPoseEstimator::extractPlaneCoordinates( + FernPoseEstimator::ExternalContainerMap& container) { - ExternalContainerMap::iterator iter = container.begin(); - ExternalContainerMap::iterator iter_end = container.end(); - for(; iter != iter_end; ++iter) { - alvar::ExternalContainer &f = iter->second; - mCameraEC.Get3dOnPlane(&mPose, f.p2d, f.p3d); - f.has_p3d = true; - } + ExternalContainerMap::iterator iter = container.begin(); + ExternalContainerMap::iterator iter_end = container.end(); + for (; iter != iter_end; ++iter) + { + alvar::ExternalContainer& f = iter->second; + mCameraEC.Get3dOnPlane(&mPose, f.p2d, f.p3d); + f.has_p3d = true; + } } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/FileFormatUtils.cpp b/ar_track_alvar/src/FileFormatUtils.cpp index 5444546..54b6499 100644 --- a/ar_track_alvar/src/FileFormatUtils.cpp +++ b/ar_track_alvar/src/FileFormatUtils.cpp @@ -26,83 +26,111 @@ #include #include -namespace alvar { - - bool FileFormatUtils::decodeXMLMatrix(const TiXmlElement *xml_matrix, int &type, int &rows, int &cols) { - const char * xml_type = xml_matrix->Attribute("type"); - if (strcmp("CV_32F", xml_type) == 0) type = CV_32F; - else if (strcmp("CV_64F", xml_type) == 0) type = CV_64F; - else return false; - - if (xml_matrix->QueryIntAttribute("rows", &rows) != TIXML_SUCCESS) return false; - if (xml_matrix->QueryIntAttribute("cols", &cols) != TIXML_SUCCESS) return false; - - return true; - } - - CvMat* FileFormatUtils::allocateXMLMatrix(const TiXmlElement *xml_matrix) { - if (!xml_matrix) return NULL; - - int type, rows, cols; - if (!decodeXMLMatrix(xml_matrix, type, rows, cols)) return NULL; - - return cvCreateMat(rows, cols, type); - } - - bool FileFormatUtils::parseXMLMatrix(const TiXmlElement *xml_matrix, CvMat *matrix) { - if (!xml_matrix || !matrix) return false; - - int type, rows, cols; - if (!decodeXMLMatrix(xml_matrix, type, rows, cols)) return false; - - if (type != cvGetElemType(matrix)) return false; - if (rows != matrix->rows) return false; - if (cols != matrix->cols) return false; - - const TiXmlElement *xml_data = xml_matrix->FirstChildElement("data"); - for (int r = 0; r < matrix->rows; ++r) { - for (int c = 0; c < matrix->cols; ++c) { - if (!xml_data) return false; - double value = atof(xml_data->GetText()); - cvSetReal2D(matrix, r, c, value); - xml_data = (const TiXmlElement *) xml_data->NextSibling("data"); - } - } - - return true; - } - - TiXmlElement* FileFormatUtils::createXMLMatrix(const char* element_name, const CvMat *matrix) { - if (!matrix) return NULL; - - TiXmlElement* xml_matrix = new TiXmlElement(element_name); - int precision; - if (cvGetElemType(matrix) == CV_32F) { - xml_matrix->SetAttribute("type", "CV_32F"); - precision = std::numeric_limits::digits10 + 2; - } - else if (cvGetElemType(matrix) == CV_64F) { - xml_matrix->SetAttribute("type", "CV_64F"); - precision = std::numeric_limits::digits10 + 2; - } - else { - delete xml_matrix; - return NULL; - } - - xml_matrix->SetAttribute("rows", matrix->rows); - xml_matrix->SetAttribute("cols", matrix->cols); - - for (int r = 0; r < matrix->rows; ++r) { - for (int c = 0; c < matrix->cols; ++c) { - TiXmlElement *xml_data = new TiXmlElement("data"); - xml_matrix->LinkEndChild(xml_data); - std::stringstream ss; - ss.precision(precision); - ss<LinkEndChild(new TiXmlText(ss.str().c_str())); - } - } - return xml_matrix; - } +namespace alvar +{ +bool FileFormatUtils::decodeXMLMatrix(const TiXmlElement* xml_matrix, int& type, + int& rows, int& cols) +{ + const char* xml_type = xml_matrix->Attribute("type"); + if (strcmp("CV_32F", xml_type) == 0) + type = CV_32F; + else if (strcmp("CV_64F", xml_type) == 0) + type = CV_64F; + else + return false; + + if (xml_matrix->QueryIntAttribute("rows", &rows) != TIXML_SUCCESS) + return false; + if (xml_matrix->QueryIntAttribute("cols", &cols) != TIXML_SUCCESS) + return false; + + return true; } + +cv::Mat* FileFormatUtils::allocateXMLMatrix(const TiXmlElement* xml_matrix) +{ + if (!xml_matrix) + return NULL; + + int type, rows, cols; + if (!decodeXMLMatrix(xml_matrix, type, rows, cols)) + return NULL; + + return new cv::Mat(rows, cols, type); +} + +bool FileFormatUtils::parseXMLMatrix(const TiXmlElement* xml_matrix, + cv::Mat& matrix) +{ + if (!xml_matrix || matrix.empty()) + return false; + + int type, rows, cols; + if (!decodeXMLMatrix(xml_matrix, type, rows, cols)) + return false; + + if (type != matrix.type()) + return false; + if (rows != matrix.rows) + return false; + if (cols != matrix.cols) + return false; + + const TiXmlElement* xml_data = xml_matrix->FirstChildElement("data"); + for (int r = 0; r < matrix.rows; ++r) + { + for (int c = 0; c < matrix.cols; ++c) + { + if (!xml_data) + return false; + double value = atof(xml_data->GetText()); + matrix.at(r, c) = value; + xml_data = (const TiXmlElement*)xml_data->NextSibling("data"); + } + } + + return true; +} + +TiXmlElement* FileFormatUtils::createXMLMatrix(const char* element_name, + const cv::Mat& matrix) +{ + if (matrix.empty()) + return NULL; + + TiXmlElement* xml_matrix = new TiXmlElement(element_name); + int precision; + if (matrix.type() == CV_32F) + { + xml_matrix->SetAttribute("type", "CV_32F"); + precision = std::numeric_limits::digits10 + 2; + } + else if (matrix.type() == CV_64F) + { + xml_matrix->SetAttribute("type", "CV_64F"); + precision = std::numeric_limits::digits10 + 2; + } + else + { + delete xml_matrix; + return NULL; + } + + xml_matrix->SetAttribute("rows", matrix.rows); + xml_matrix->SetAttribute("cols", matrix.cols); + + for (int r = 0; r < matrix.rows; ++r) + { + for (int c = 0; c < matrix.cols; ++c) + { + TiXmlElement* xml_data = new TiXmlElement("data"); + xml_matrix->LinkEndChild(xml_data); + std::stringstream ss; + ss.precision(precision); + ss << matrix.at(r, c); + xml_data->LinkEndChild(new TiXmlText(ss.str().c_str())); + } + } + return xml_matrix; +} +} // namespace alvar diff --git a/ar_track_alvar/src/Filter.cpp b/ar_track_alvar/src/Filter.cpp index 7cd80b0..d6875c2 100644 --- a/ar_track_alvar/src/Filter.cpp +++ b/ar_track_alvar/src/Filter.cpp @@ -27,87 +27,116 @@ template class ALVAR_EXPORT alvar::FilterArray; template class ALVAR_EXPORT alvar::FilterArray; template class ALVAR_EXPORT alvar::FilterArray; -template class ALVAR_EXPORT alvar::FilterArray; +template class ALVAR_EXPORT + alvar::FilterArray; using namespace std; -namespace alvar { +namespace alvar +{ using namespace std; -Filter::Filter() { - value=0; +Filter::Filter() +{ + value = 0; } -void FilterAverage::push_to_buffer(double y) { - buffer.push_back(y); - while (buffer.size() > window_size) { - buffer.pop_front(); - } +void FilterAverage::push_to_buffer(double y) +{ + buffer.push_back(y); + while (buffer.size() > window_size) + { + buffer.pop_front(); + } } -double FilterAverage::next(double y) { - if (window_size <= 0) { - count++; - double alpha = 1.0/count; - return (value=((1.0-alpha)*value)+(alpha*y)); - } else { - push_to_buffer(y); - double sum = 0; - for (deque::iterator iter = buffer.begin(); iter != buffer.end(); iter++) { - sum += (double)*iter; - } - return (value=sum/buffer.size()); - } +double FilterAverage::next(double y) +{ + if (window_size <= 0) + { + count++; + double alpha = 1.0 / count; + return (value = ((1.0 - alpha) * value) + (alpha * y)); + } + else + { + push_to_buffer(y); + double sum = 0; + for (deque::iterator iter = buffer.begin(); iter != buffer.end(); + iter++) + { + sum += (double)*iter; + } + return (value = sum / buffer.size()); + } } -void FilterAverage::reset() { - buffer.clear(); +void FilterAverage::reset() +{ + buffer.clear(); } -double FilterAverage::deviation() const { - double sum = 0; - if (buffer.size() == 0) return 0; - for (deque::const_iterator iter = buffer.begin(); iter != buffer.end(); iter++) { - double val = ((double)*iter)-value; - sum += (val*val); - } - sum /= buffer.size(); - return sqrt(sum); +double FilterAverage::deviation() const +{ + double sum = 0; + if (buffer.size() == 0) + return 0; + for (deque::const_iterator iter = buffer.begin(); + iter != buffer.end(); iter++) + { + double val = ((double)*iter) - value; + sum += (val * val); + } + sum /= buffer.size(); + return sqrt(sum); } -double FilterMedian::next(double y) { - if (window_size <= 1) return y; - push_to_buffer(y); - copy(buffer.begin(), buffer.end(), sort_buffer.begin()); - int nth = buffer.size()/2; - nth_element(sort_buffer.begin(), sort_buffer.begin() + nth, sort_buffer.begin()+buffer.size()); - return value = sort_buffer[nth]; +double FilterMedian::next(double y) +{ + if (window_size <= 1) + return y; + push_to_buffer(y); + copy(buffer.begin(), buffer.end(), sort_buffer.begin()); + int nth = buffer.size() / 2; + nth_element(sort_buffer.begin(), sort_buffer.begin() + nth, + sort_buffer.begin() + buffer.size()); + return value = sort_buffer[nth]; } -double FilterRunningAverage::next(double y) { - if (breset) { - breset=false; - value=(double)y; - } else { - value = ((1.0-alpha) * value) + (alpha * (double)y); - } - return value; +double FilterRunningAverage::next(double y) +{ + if (breset) + { + breset = false; + value = (double)y; + } + else + { + value = ((1.0 - alpha) * value) + (alpha * (double)y); + } + return value; } -void FilterRunningAverage::reset() { breset=true; } +void FilterRunningAverage::reset() +{ + breset = true; +} -double FilterDoubleExponentialSmoothing::next(double y) { - if (breset) { - breset=false; - value=(double)y; - slope=0.0; - } - else { - double value_prev = value; - value = ((1.0-alpha) * (value + slope)) + (alpha * (double)y); - slope = ((1.0-gamma) * (slope)) + (gamma * (value - value_prev)); - } - return value; +double FilterDoubleExponentialSmoothing::next(double y) +{ + if (breset) + { + breset = false; + value = (double)y; + slope = 0.0; + } + else + { + double value_prev = value; + value = ((1.0 - alpha) * (value + slope)) + (alpha * (double)y); + slope = ((1.0 - gamma) * (slope)) + (gamma * (value - value_prev)); + } + return value; } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/GlutViewer.cpp b/ar_track_alvar/src/GlutViewer.cpp index e361bcc..f255cdb 100644 --- a/ar_track_alvar/src/GlutViewer.cpp +++ b/ar_track_alvar/src/GlutViewer.cpp @@ -7,136 +7,142 @@ using namespace std; using namespace alvar; using namespace GlutViewer; -Drawable::Drawable(double _scale, double _r, double _g, double _b) { - SetScale(_scale); - SetColor(_r, _g, _b); +Drawable::Drawable(double _scale, double _r, double _g, double _b) +{ + SetScale(_scale); + SetColor(_r, _g, _b); } -void Drawable::SetScale(double _scale) { - scale=_scale; +void Drawable::SetScale(double _scale) +{ + scale = _scale; } -void Drawable::SetColor(double _r, double _g, double _b) { - color[0] = _r; - color[1] = _g; - color[2] = _b; +void Drawable::SetColor(double _r, double _g, double _b) +{ + color[0] = _r; + color[1] = _g; + color[2] = _b; } -void Drawable::Draw() { - //double color[3] = {1, 1, 1}; - glPushMatrix(); - glMultMatrixd(gl_mat); - DrawAxis(scale, color); - glPopMatrix(); +void Drawable::Draw() +{ + // double color[3] = {1, 1, 1}; + glPushMatrix(); + glMultMatrixd(gl_mat); + DrawAxis(scale, color); + glPopMatrix(); } void Drawable::DrawAxis(double scale, double color[3]) { - glEnable(GL_DEPTH_TEST); - glDisable(GL_CULL_FACE); - - glColor3d(color[0], color[1], color[2]); - glBegin(GL_QUADS); - glVertex3f(-scale/2, -scale/2, 0.0); - glVertex3f(-scale/2, scale/2, 0.0); - - glVertex3f(-scale/2, scale/2, 0.0); - glVertex3f( scale/2, scale/2, 0.0); - - glVertex3f( scale/2, scale/2, 0.0); - glVertex3f( scale/2, -scale/2, 0.0); - - glVertex3f( scale/2, -scale/2, 0.0); - glVertex3f(-scale/2, -scale/2, 0.0); - glEnd(); - - // Z - glColor3d(0.0, 0.0, 1.0); - glBegin(GL_LINES); - glVertex3f(0.0, 0.0, 0.0); - glVertex3f(0.0, 0.0, scale); - glEnd(); - - glDisable(GL_DEPTH_TEST); - - // X - glColor3d(1.0, 0.0, 0.0); - glBegin(GL_LINES); - glVertex3f(0.0, 0.0, 0.0); - glVertex3f(scale, 0.0, 0.0); - glEnd(); - - // Y - glColor3d(0.0, 1.0, 0.0); - glBegin(GL_LINES); - glVertex3f(0.0, 0.0, 0.0); - glVertex3f(0.0, scale, 0.0); - glEnd(); + glEnable(GL_DEPTH_TEST); + glDisable(GL_CULL_FACE); + + glColor3d(color[0], color[1], color[2]); + glBegin(GL_QUADS); + glVertex3f(-scale / 2, -scale / 2, 0.0); + glVertex3f(-scale / 2, scale / 2, 0.0); + + glVertex3f(-scale / 2, scale / 2, 0.0); + glVertex3f(scale / 2, scale / 2, 0.0); + + glVertex3f(scale / 2, scale / 2, 0.0); + glVertex3f(scale / 2, -scale / 2, 0.0); + + glVertex3f(scale / 2, -scale / 2, 0.0); + glVertex3f(-scale / 2, -scale / 2, 0.0); + glEnd(); + + // Z + glColor3d(0.0, 0.0, 1.0); + glBegin(GL_LINES); + glVertex3f(0.0, 0.0, 0.0); + glVertex3f(0.0, 0.0, scale); + glEnd(); + + glDisable(GL_DEPTH_TEST); + + // X + glColor3d(1.0, 0.0, 0.0); + glBegin(GL_LINES); + glVertex3f(0.0, 0.0, 0.0); + glVertex3f(scale, 0.0, 0.0); + glEnd(); + + // Y + glColor3d(0.0, 1.0, 0.0); + glBegin(GL_LINES); + glVertex3f(0.0, 0.0, 0.0); + glVertex3f(0.0, scale, 0.0); + glEnd(); } -void Drawable::SetGLMatTraQuat(double *tra, double *quat, bool flip) +void Drawable::SetGLMatTraQuat(double* tra, double* quat, bool flip) { - Rotation r; - if (quat != 0) - { - CvMat cv_mat; - cvInitMatHeader(&cv_mat, 4, 1, CV_64F, quat); - r.SetQuaternion(&cv_mat); - } - - int flp = 1; - if (flip) - { - r.Transpose(); - //flp=-1; - } - - CvMat cv_gl_mat; - cvInitMatHeader(&cv_gl_mat, 4, 4, CV_64F, gl_mat); cvZero(&cv_gl_mat); - r.GetMatrix(&cv_gl_mat); - cvSet2D(&cv_gl_mat, 0, 3, cvScalar(flp*tra[0])); - cvSet2D(&cv_gl_mat, 1, 3, cvScalar(flp*tra[1])); - cvSet2D(&cv_gl_mat, 2, 3, cvScalar(flp*tra[2])); - cvSet2D(&cv_gl_mat, 3, 3, cvScalar(1)); - - cvTranspose(&cv_gl_mat, &cv_gl_mat); + Rotation r; + if (quat != 0) + { + CvMat cv_mat; + cvInitMatHeader(&cv_mat, 4, 1, CV_64F, quat); + r.SetQuaternion(&cv_mat); + } + + int flp = 1; + if (flip) + { + r.Transpose(); + // flp=-1; + } + + CvMat cv_gl_mat; + cvInitMatHeader(&cv_gl_mat, 4, 4, CV_64F, gl_mat); + cvZero(&cv_gl_mat); + r.GetMatrix(&cv_gl_mat); + cvSet2D(&cv_gl_mat, 0, 3, cvScalar(flp * tra[0])); + cvSet2D(&cv_gl_mat, 1, 3, cvScalar(flp * tra[1])); + cvSet2D(&cv_gl_mat, 2, 3, cvScalar(flp * tra[2])); + cvSet2D(&cv_gl_mat, 3, 3, cvScalar(1)); + + cvTranspose(&cv_gl_mat, &cv_gl_mat); } -void Drawable::SetGLMatTraRod(double *tra, double *rod) +void Drawable::SetGLMatTraRod(double* tra, double* rod) { - // This is the OpenGL augmentation matrix - CvMat cv_gl_mat; - cvInitMatHeader(&cv_gl_mat, 4, 4, CV_64F, gl_mat); cvSetIdentity(&cv_gl_mat); - - // Figure out the rotation part - double rot_mat_data[3][3]; - CvMat rot_mat = cvMat(3, 3, CV_64F, rot_mat_data); - cvSetIdentity(&rot_mat); - if (rod != 0) - { - CvMat rod_mat; - cvInitMatHeader(&rod_mat, 3, 1, CV_64F, rod); - cvRodrigues2(&rod_mat, &rot_mat, 0); - } - - // Fill in the rotation part - cvmSet(&cv_gl_mat, 0, 0, cvmGet(&rot_mat, 0, 0)); - cvmSet(&cv_gl_mat, 0, 1, cvmGet(&rot_mat, 0, 1)); - cvmSet(&cv_gl_mat, 0, 2, cvmGet(&rot_mat, 0, 2)); - cvmSet(&cv_gl_mat, 1, 0, cvmGet(&rot_mat, 1, 0)); - cvmSet(&cv_gl_mat, 1, 1, cvmGet(&rot_mat, 1, 1)); - cvmSet(&cv_gl_mat, 1, 2, cvmGet(&rot_mat, 1, 2)); - cvmSet(&cv_gl_mat, 2, 0, cvmGet(&rot_mat, 2, 0)); - cvmSet(&cv_gl_mat, 2, 1, cvmGet(&rot_mat, 2, 1)); - cvmSet(&cv_gl_mat, 2, 2, cvmGet(&rot_mat, 2, 2)); - - // Fill in the translation part - cvSet2D(&cv_gl_mat, 0, 3, cvScalar(tra[0])); - cvSet2D(&cv_gl_mat, 1, 3, cvScalar(tra[1])); - cvSet2D(&cv_gl_mat, 2, 3, cvScalar(tra[2])); - - // Transpose into OpenGL presentation order - cvTranspose(&cv_gl_mat, &cv_gl_mat); + // This is the OpenGL augmentation matrix + CvMat cv_gl_mat; + cvInitMatHeader(&cv_gl_mat, 4, 4, CV_64F, gl_mat); + cvSetIdentity(&cv_gl_mat); + + // Figure out the rotation part + double rot_mat_data[3][3]; + CvMat rot_mat = cvMat(3, 3, CV_64F, rot_mat_data); + cvSetIdentity(&rot_mat); + if (rod != 0) + { + CvMat rod_mat; + cvInitMatHeader(&rod_mat, 3, 1, CV_64F, rod); + cvRodrigues2(&rod_mat, &rot_mat, 0); + } + + // Fill in the rotation part + cvmSet(&cv_gl_mat, 0, 0, cvmGet(&rot_mat, 0, 0)); + cvmSet(&cv_gl_mat, 0, 1, cvmGet(&rot_mat, 0, 1)); + cvmSet(&cv_gl_mat, 0, 2, cvmGet(&rot_mat, 0, 2)); + cvmSet(&cv_gl_mat, 1, 0, cvmGet(&rot_mat, 1, 0)); + cvmSet(&cv_gl_mat, 1, 1, cvmGet(&rot_mat, 1, 1)); + cvmSet(&cv_gl_mat, 1, 2, cvmGet(&rot_mat, 1, 2)); + cvmSet(&cv_gl_mat, 2, 0, cvmGet(&rot_mat, 2, 0)); + cvmSet(&cv_gl_mat, 2, 1, cvmGet(&rot_mat, 2, 1)); + cvmSet(&cv_gl_mat, 2, 2, cvmGet(&rot_mat, 2, 2)); + + // Fill in the translation part + cvSet2D(&cv_gl_mat, 0, 3, cvScalar(tra[0])); + cvSet2D(&cv_gl_mat, 1, 3, cvScalar(tra[1])); + cvSet2D(&cv_gl_mat, 2, 3, cvScalar(tra[2])); + + // Transpose into OpenGL presentation order + cvTranspose(&cv_gl_mat, &cv_gl_mat); } Mutex mutex_items; @@ -145,353 +151,360 @@ vector items; int cur_button; float elev = 0.0, azim = 0.0, rad = 0.0; float panx = 0.0, pany = 0.0; -float jaw = 0.0, jawx, jawy, jawz; +float jaw = 0.0, jawx, jawy, jawz; int ar_window; int vr_window; -float off_x=0, off_y=0; +float off_x = 0, off_y = 0; -unsigned char* image=0; +unsigned char* image = 0; int a_argc; -char **a_argv; +char** a_argv; int width; int height; Threads threads; double proj_mat[16]; -double modelview_mat[16] = { 1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1 }; +double modelview_mat[16] = { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 }; -static void *glut_thread(void *lpThreadParameter) +static void* glut_thread(void* lpThreadParameter) { - //InitializeCriticalSection(&critical_section_items); + // InitializeCriticalSection(&critical_section_items); - glutInit(&a_argc, a_argv); - glutInitDisplayMode(GLUT_DEPTH | GLUT_RGB | GLUT_DOUBLE); - glutInitWindowSize(width, height); + glutInit(&a_argc, a_argv); + glutInitDisplayMode(GLUT_DEPTH | GLUT_RGB | GLUT_DOUBLE); + glutInitWindowSize(width, height); - ar_window = glutCreateWindow("AR"); - glutDisplayFunc(DrawAr); - glutSpecialFunc(KeyCallback); - glutPositionWindow(0, 0); + ar_window = glutCreateWindow("AR"); + glutDisplayFunc(DrawAr); + glutSpecialFunc(KeyCallback); + glutPositionWindow(0, 0); - vr_window = glutCreateWindow("VR"); - glutDisplayFunc(DrawVr); - glutPositionWindow(0, height); + vr_window = glutCreateWindow("VR"); + glutDisplayFunc(DrawVr); + glutPositionWindow(0, height); - glEnable(GL_CULL_FACE); - glEnable(GL_DEPTH_TEST); - glEnable(GL_COLOR_MATERIAL); + glEnable(GL_CULL_FACE); + glEnable(GL_DEPTH_TEST); + glEnable(GL_COLOR_MATERIAL); - glutMouseFunc(Mouse); - glutMotionFunc(Motion); + glutMouseFunc(Mouse); + glutMotionFunc(Motion); - atexit(Exit); + atexit(Exit); - glutMainLoop(); - return 0; + glutMainLoop(); + return 0; } void GlutViewer::KeyCallback(int key, int x, int y) { - switch(key) - { - case GLUT_KEY_LEFT: - off_x-=1; - break; - case GLUT_KEY_RIGHT: - off_x+=1; - break; - case GLUT_KEY_UP: - off_y-=1; - break; - case GLUT_KEY_DOWN: - off_y+=1; - break; - - } + switch (key) + { + case GLUT_KEY_LEFT: + off_x -= 1; + break; + case GLUT_KEY_RIGHT: + off_x += 1; + break; + case GLUT_KEY_UP: + off_y -= 1; + break; + case GLUT_KEY_DOWN: + off_y += 1; + break; + } } double GlutViewer::GetXOffset() { - return off_x; + return off_x; } double GlutViewer::GetYOffset() { - return off_y; + return off_y; } void GlutViewer::Start(int argc, char** argv, int w, int h, float r) { - a_argc = argc; - a_argv = argv; - width = w; - height = h; - rad = r; + a_argc = argc; + a_argv = argv; + width = w; + height = h; + rad = r; - threads.create(glut_thread, 0); + threads.create(glut_thread, 0); } void GlutViewer::DrawFloor() { - glColor3f(0.5, 0.5, 1.0); - - glBegin(GL_LINES); - for(int i = -20; i <= 20; i+=1) - { - glVertex3f((float)i, 0.0f, -20); - glVertex3f((float)i, 0.0f, 20); - glVertex3f(-20, 0.0f, (float)i); - glVertex3f( 20, 0.0f, (float)i); - } - glEnd(); + glColor3f(0.5, 0.5, 1.0); + + glBegin(GL_LINES); + for (int i = -20; i <= 20; i += 1) + { + glVertex3f((float)i, 0.0f, -20); + glVertex3f((float)i, 0.0f, 20); + glVertex3f(-20, 0.0f, (float)i); + glVertex3f(20, 0.0f, (float)i); + } + glEnd(); } void GlutViewer::Mouse(int button, int state, int x, int y) { - cur_button = button; + cur_button = button; } void GlutViewer::DrawAxis(float scale) { - glColor3f(1.0, 0.0, 0.0); - glBegin(GL_LINES); - glVertex3f(0.0, 0.0, 0.0); - glVertex3f(scale, 0.0, 0.0); - glEnd(); - glColor3f(0.0, 1.0, 0.0); - glBegin(GL_LINES); - glVertex3f(0.0, 0.0, 0.0); - glVertex3f(0.0, scale, 0.0); - glEnd(); - glColor3f(0.0, 0.0, 1.0); - glBegin(GL_LINES); - glVertex3f(0.0, 0.0, 0.0); - glVertex3f(0.0, 0.0, scale); - glEnd(); + glColor3f(1.0, 0.0, 0.0); + glBegin(GL_LINES); + glVertex3f(0.0, 0.0, 0.0); + glVertex3f(scale, 0.0, 0.0); + glEnd(); + glColor3f(0.0, 1.0, 0.0); + glBegin(GL_LINES); + glVertex3f(0.0, 0.0, 0.0); + glVertex3f(0.0, scale, 0.0); + glEnd(); + glColor3f(0.0, 0.0, 1.0); + glBegin(GL_LINES); + glVertex3f(0.0, 0.0, 0.0); + glVertex3f(0.0, 0.0, scale); + glEnd(); } void GlutViewer::Motion(int x, int y) { - static int oldx, oldy; - - int dx = oldx-x; - int dy = oldy-y; - - switch(cur_button) - { - case GLUT_LEFT_BUTTON : - if (abs(dx)>abs(dy)) - { - if (dx>0) - azim += 3.0; - else if (dx<0) - azim -= 3.0; - } - else if (dy>0) - elev -= 3.0; - else if (dy<0) - elev += 3.0; - break; - - case GLUT_MIDDLE_BUTTON : - if (abs(dx)>abs(dy)) - { - if (dx>0) - panx += 10.5; - else if (dx<0) - panx -= 10.5; - } - else if (dy>0) - pany -= 10.5; - else if (dy<0) - pany += 10.5; - break;//?? - - case GLUT_RIGHT_BUTTON : - if (dy > 0) - rad += (10.2); - else if (dy < 0) - rad -= (10.2); - break; - - default: - break; - } - - oldx = x; - oldy = y; + static int oldx, oldy; + + int dx = oldx - x; + int dy = oldy - y; + + switch (cur_button) + { + case GLUT_LEFT_BUTTON: + if (abs(dx) > abs(dy)) + { + if (dx > 0) + azim += 3.0; + else if (dx < 0) + azim -= 3.0; + } + else if (dy > 0) + elev -= 3.0; + else if (dy < 0) + elev += 3.0; + break; + + case GLUT_MIDDLE_BUTTON: + if (abs(dx) > abs(dy)) + { + if (dx > 0) + panx += 10.5; + else if (dx < 0) + panx -= 10.5; + } + else if (dy > 0) + pany -= 10.5; + else if (dy < 0) + pany += 10.5; + break; //?? + + case GLUT_RIGHT_BUTTON: + if (dy > 0) + rad += (10.2); + else if (dy < 0) + rad -= (10.2); + break; + + default: + break; + } + + oldx = x; + oldy = y; } - void GlutViewer::DrawContent() { - DrawAxis(100.f); - Lock lock(&mutex_items); - for (unsigned i = 0; i < items.size(); ++i) { - items[i]->Draw(); - } + DrawAxis(100.f); + Lock lock(&mutex_items); + for (unsigned i = 0; i < items.size(); ++i) + { + items[i]->Draw(); + } } - void GlutViewer::DrawVr() { - glutSetWindow(vr_window); - glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT); - glClearColor(0.5, 0.2, 0.2, 1.0); - - //glViewport(0, 0, w, h); - glMatrixMode(GL_PROJECTION); - glLoadIdentity(); - //gluPerspective(70, 1, 0.001, 5000); - - glMatrixMode(GL_MODELVIEW); - glLoadIdentity(); - - glTranslatef(panx, pany, -rad); - glRotatef(-elev, 1.0, 0.0, 0.0); - glRotatef( azim, 0.0, 1.0, 0.0); - - float pos[4] = {50, 0, 50}; - glLightfv(GL_LIGHT0, GL_POSITION, pos); - - DrawContent(); - - //glFlush(); - glutSwapBuffers(); - glutPostRedisplay(); + glutSetWindow(vr_window); + glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT); + glClearColor(0.5, 0.2, 0.2, 1.0); + + // glViewport(0, 0, w, h); + glMatrixMode(GL_PROJECTION); + glLoadIdentity(); + // gluPerspective(70, 1, 0.001, 5000); + + glMatrixMode(GL_MODELVIEW); + glLoadIdentity(); + + glTranslatef(panx, pany, -rad); + glRotatef(-elev, 1.0, 0.0, 0.0); + glRotatef(azim, 0.0, 1.0, 0.0); + + float pos[4] = { 50, 0, 50 }; + glLightfv(GL_LIGHT0, GL_POSITION, pos); + + DrawContent(); + + // glFlush(); + glutSwapBuffers(); + glutPostRedisplay(); } void GlutViewer::DrawAr() { - glutSetWindow(ar_window); - glClearColor(0.2, 0.5, 0.2, 1.0); - glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT); + glutSetWindow(ar_window); + glClearColor(0.2, 0.5, 0.2, 1.0); + glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT); - DrawVideo(); + DrawVideo(); - glMatrixMode(GL_PROJECTION); - glLoadMatrixd(proj_mat); + glMatrixMode(GL_PROJECTION); + glLoadMatrixd(proj_mat); - glMatrixMode(GL_MODELVIEW); - glLoadMatrixd(modelview_mat); + glMatrixMode(GL_MODELVIEW); + glLoadMatrixd(modelview_mat); - DrawContent(); + DrawContent(); - glutSwapBuffers(); - glutPostRedisplay(); + glutSwapBuffers(); + glutPostRedisplay(); } -void GlutViewer::SetGlProjectionMatrix(double p[16]) { - memcpy(proj_mat, p, sizeof(double)*16); +void GlutViewer::SetGlProjectionMatrix(double p[16]) +{ + memcpy(proj_mat, p, sizeof(double) * 16); } -void GlutViewer::SetGlModelviewMatrix(double p[16]) { - memcpy(modelview_mat, p, sizeof(double)*16); +void GlutViewer::SetGlModelviewMatrix(double p[16]) +{ + memcpy(modelview_mat, p, sizeof(double) * 16); } void GlutViewer::Reshape(int w, int h) { - h = (h == 0 ? 1 : h); - //glViewport(0, 0, w, h); + h = (h == 0 ? 1 : h); + // glViewport(0, 0, w, h); - //glMatrixMode(GL_PROJECTION); + // glMatrixMode(GL_PROJECTION); - //glLoadIdentity(); - //glMultMatrixd(projmat); + // glLoadIdentity(); + // glMultMatrixd(projmat); - //glMatrixMode(GL_MODELVIEW); + // glMatrixMode(GL_MODELVIEW); - //glutPostRedisplay(); + // glutPostRedisplay(); } void GlutViewer::Exit() { - //DeleteCriticalSection(&critical_section_items); + // DeleteCriticalSection(&critical_section_items); } -void GlutViewer::DrawableClear() { - Lock lock(&mutex_items); - items.clear(); +void GlutViewer::DrawableClear() +{ + Lock lock(&mutex_items); + items.clear(); } void GlutViewer::DrawableAdd(Drawable* item) { - Lock lock(&mutex_items); - items.push_back(item); + Lock lock(&mutex_items); + items.push_back(item); } // TODO ensure that threading doesn't cause any problems... void GlutViewer::SetVideo(const IplImage* _image) { - image = (unsigned char*)_image->imageData; + image = (unsigned char*)_image->imageData; } void GlutViewer::DrawVideo() { - if(!image) return; - - glDepthMask(GL_FALSE); - glDisable(GL_DEPTH_TEST); - - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); - - glColor3f(1, 1, 1); - glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 512, 512, 0, GL_BGR_EXT, GL_UNSIGNED_BYTE, image); - - glEnable(GL_TEXTURE_2D); - - glMatrixMode(GL_PROJECTION); - glPushMatrix(); - glLoadIdentity(); - glOrtho(0, 1, 0, 1, 0, 1); - - glMatrixMode(GL_MODELVIEW); - glLoadIdentity(); - - glBegin( GL_QUADS ); - glTexCoord2d(0.0,1.0); glVertex2d(0.0,0.0); - glTexCoord2d(1.0,1.0); glVertex2d(1.0,0.0); - glTexCoord2d(1.0,0.0); glVertex2d(1.0,1.0); - glTexCoord2d(0.0,0.0); glVertex2d(0.0,1.0); - glEnd(); - - glDisable(GL_TEXTURE_2D); - glDepthMask(GL_TRUE); - glEnable(GL_DEPTH_TEST); - - glMatrixMode(GL_PROJECTION); - glPopMatrix(); + if (!image) + return; + + glDepthMask(GL_FALSE); + glDisable(GL_DEPTH_TEST); + + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); + + glColor3f(1, 1, 1); + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 512, 512, 0, GL_BGR_EXT, + GL_UNSIGNED_BYTE, image); + + glEnable(GL_TEXTURE_2D); + + glMatrixMode(GL_PROJECTION); + glPushMatrix(); + glLoadIdentity(); + glOrtho(0, 1, 0, 1, 0, 1); + + glMatrixMode(GL_MODELVIEW); + glLoadIdentity(); + + glBegin(GL_QUADS); + glTexCoord2d(0.0, 1.0); + glVertex2d(0.0, 0.0); + glTexCoord2d(1.0, 1.0); + glVertex2d(1.0, 0.0); + glTexCoord2d(1.0, 0.0); + glVertex2d(1.0, 1.0); + glTexCoord2d(0.0, 0.0); + glVertex2d(0.0, 1.0); + glEnd(); + + glDisable(GL_TEXTURE_2D); + glDepthMask(GL_TRUE); + glEnable(GL_DEPTH_TEST); + + glMatrixMode(GL_PROJECTION); + glPopMatrix(); } /* void GlutViewer::Init(int argc, char** argv, int w, int h) { - glutInit(&argc, argv); - glutInitDisplayMode(GLUT_DEPTH | GLUT_RGB | GLUT_DOUBLE); - glutInitWindowSize(w, h); - - ar_window = glutCreateWindow("AR"); - glutDisplayFunc(DrawAr); - //glutReshapeFunc(Reshape); - - vr_window = glutCreateWindow("VR"); - glutDisplayFunc(DrawVr); - //glutReshapeFunc(Reshape); - - glEnable(GL_CULL_FACE); - glEnable(GL_DEPTH_TEST); - glEnable(GL_COLOR_MATERIAL); - - glutMouseFunc(Mouse); - glutMotionFunc(Motion); - - atexit(Exit); + glutInit(&argc, argv); + glutInitDisplayMode(GLUT_DEPTH | GLUT_RGB | GLUT_DOUBLE); + glutInitWindowSize(w, h); + + ar_window = glutCreateWindow("AR"); + glutDisplayFunc(DrawAr); + //glutReshapeFunc(Reshape); + + vr_window = glutCreateWindow("VR"); + glutDisplayFunc(DrawVr); + //glutReshapeFunc(Reshape); + + glEnable(GL_CULL_FACE); + glEnable(GL_DEPTH_TEST); + glEnable(GL_COLOR_MATERIAL); + + glutMouseFunc(Mouse); + glutMotionFunc(Motion); + + atexit(Exit); } */ diff --git a/ar_track_alvar/src/IntegralImage.cpp b/ar_track_alvar/src/IntegralImage.cpp index 40576f5..cae773b 100644 --- a/ar_track_alvar/src/IntegralImage.cpp +++ b/ar_track_alvar/src/IntegralImage.cpp @@ -23,172 +23,207 @@ #include "IntegralImage.h" -namespace alvar { - -void IntIndex::update_next_step() { - next_step = step; - estep += step_remainder; - if (estep >= steps) { - estep -= steps; - next_step++; - } +namespace alvar +{ +void IntIndex::update_next_step() +{ + next_step = step; + estep += step_remainder; + if (estep >= steps) + { + estep -= steps; + next_step++; + } } -IntIndex::IntIndex(int _res, int _steps) { - res = _res; - steps = _steps; - operator=(0); -} -int IntIndex::operator=(int v) { - index = 0; - step = res / steps; - step_remainder = res % steps; - estep = 0; - update_next_step(); - while ((index+next_step-1) < v) next(); - return index; -} -int IntIndex::next() { - index += next_step; - update_next_step(); - return index; -} -int IntIndex::get() const { - return index; -} -int IntIndex::get_next_step() const { - return next_step; -} -int IntIndex::end() const { - return res; +IntIndex::IntIndex(int _res, int _steps) +{ + res = _res; + steps = _steps; + operator=(0); +} +int IntIndex::operator=(int v) +{ + index = 0; + step = res / steps; + step_remainder = res % steps; + estep = 0; + update_next_step(); + while ((index + next_step - 1) < v) + next(); + return index; +} +int IntIndex::next() +{ + index += next_step; + update_next_step(); + return index; +} +int IntIndex::get() const +{ + return index; +} +int IntIndex::get_next_step() const +{ + return next_step; +} +int IntIndex::end() const +{ + return res; } -IntegralImage::IntegralImage() { - sum = 0; -} -IntegralImage::~IntegralImage() { - if (sum) cvReleaseImage(&sum); -} -void IntegralImage::Update(IplImage *gray) { - if ((sum == 0) || - (sum->height != gray->width+1) || - (sum->width != gray->height+1)) - { - if (sum) cvReleaseImage(&sum); - // TODO: Now we assume 'double' - is it ok? - sum = cvCreateImage(cvSize(gray->width+1, gray->height+1), IPL_DEPTH_64F, 1); - } - cvIntegral(gray, sum); -} -double IntegralImage::GetSum(CvRect &rect, int *count /*=0*/) { - int x1 = rect.x; - int x2 = rect.x + rect.width; // Note, not -1 - int y1 = rect.y; - int y2 = rect.y + rect.height; - //cout<height != gray->width + 1) || + (sum->width != gray->height + 1)) + { + if (sum) + cvReleaseImage(&sum); + // TODO: Now we assume 'double' - is it ok? + sum = cvCreateImage(cvSize(gray->width + 1, gray->height + 1), + IPL_DEPTH_64F, 1); + } + cvIntegral(gray, sum); +} +double IntegralImage::GetSum(CvRect& rect, int* count /*=0*/) +{ + int x1 = rect.x; + int x2 = rect.x + rect.width; // Note, not -1 + int y1 = rect.y; + int y2 = rect.y + rect.height; + // cout<imageData)[y2*sum->width+x2] - -((double *)sum->imageData)[y2*sum->width+x1] - -((double *)sum->imageData)[y1*sum->width+x2] - +((double *)sum->imageData)[y1*sum->width+x1]; + double v = +((double*)sum->imageData)[y2 * sum->width + x2] - + ((double*)sum->imageData)[y2 * sum->width + x1] - + ((double*)sum->imageData)[y1 * sum->width + x2] + + ((double*)sum->imageData)[y1 * sum->width + x1]; - if (count) *count = rect.width*rect.height; - return v; -} -double IntegralImage::GetAve(CvRect &rect) { - int count=1; - return GetSum(rect, &count)/count; -} -void IntegralImage::GetSubimage(const CvRect &rect, IplImage *sub) { - int yi=0; - for (IntIndex yy(rect.height, sub->height); yy.get() != yy.end(); yy.next(),yi++) { - int xi=0; - for (IntIndex xx(rect.width, sub->width); xx.get() != xx.end(); xx.next(),xi++) { - //cout<<"res: "<height<<","<width<<" - "; - //cout<height); yy.get() != yy.end(); + yy.next(), yi++) + { + int xi = 0; + for (IntIndex xx(rect.width, sub->width); xx.get() != xx.end(); + xx.next(), xi++) + { + // cout<<"res: "<height<<","<width<<" - "; + // cout<imageData[yi*sub->widthStep+xi] = (char)ave; - } - } -} -void IntegralGradient::CalculatePointNormals(IplImage *gray) { - int width = gray->width-1; - int height = gray->height-1; - if ((normalx == 0) || - (normalx->width != width) || - (normalx->height != height)) - { - if (normalx) cvReleaseImage(&normalx); - if (normaly) cvReleaseImage(&normaly); - normalx = cvCreateImage(cvSize(width, height), IPL_DEPTH_64F, 1); - normaly = cvCreateImage(cvSize(width, height), IPL_DEPTH_64F, 1); - } - for (int j=0; jimageData[yi * sub->widthStep + xi] = (char)ave; + } + } +} +void IntegralGradient::CalculatePointNormals(IplImage* gray) +{ + int width = gray->width - 1; + int height = gray->height - 1; + if ((normalx == 0) || (normalx->width != width) || + (normalx->height != height)) + { + if (normalx) + cvReleaseImage(&normalx); + if (normaly) + cvReleaseImage(&normaly); + normalx = cvCreateImage(cvSize(width, height), IPL_DEPTH_64F, 1); + normaly = cvCreateImage(cvSize(width, height), IPL_DEPTH_64F, 1); + } + for (int j = 0; j < height; j++) + { + for (int i = 0; i < width; i++) + { + /* + // As we assume top-left coordinates we have these reverse compared + to Donahue1992 double a4 = cvGet2D(gray, j, i+1).val[0]; double a3 = + cvGet2D(gray, j, i).val[0]; double a2 = cvGet2D(gray, j+1, i).val[0]; double a1 = cvGet2D(gray, j+1, i+1).val[0]; // Normal vectors; - double nx = (-a1+a2+a3-a4)/4; + double nx = (-a1+a2+a3-a4)/4; double ny = (-a1-a2+a3+a4)/4; cvSet2D(normalx, j, i, cvScalar(nx)); cvSet2D(normaly, j, i, cvScalar(ny)); - */ - // As we assume top-left coordinates we have these reverse compared to Donahue1992 - // TODO: Now we assume 8-bit gray - double a4 = (unsigned char)gray->imageData[(j)*gray->widthStep+(i+1)]; - double a3 = (unsigned char)gray->imageData[(j)*gray->widthStep+(i)]; - double a2 = (unsigned char)gray->imageData[(j+1)*gray->widthStep+(i)]; - double a1 = (unsigned char)gray->imageData[(j+1)*gray->widthStep+(i+1)]; - // Normal vectors; - double nx = (-a1+a2+a3-a4)/4; - double ny = (-a1-a2+a3+a4)/4; - ((double *)normalx->imageData)[j*normalx->width+i] = nx; - ((double *)normaly->imageData)[j*normaly->width+i] = ny; - } - } -} -IntegralGradient::IntegralGradient() { - normalx = 0; - normaly = 0; -} -IntegralGradient::~IntegralGradient() { - if (normalx) cvReleaseImage(&normalx); - if (normaly) cvReleaseImage(&normaly); -} -void IntegralGradient::Update(IplImage *gray) { - CalculatePointNormals(gray); - integx.Update(normalx); - integy.Update(normaly); -} -void IntegralGradient::GetGradient(CvRect &rect, double *dirx, double *diry, int *count /*=0*/) { - CvRect r = {rect.x, rect.y, rect.width-1, rect.height-1}; - if (count) *dirx = integx.GetSum(r, count); - else *dirx = integx.GetSum(r); - *diry = integy.GetSum(r); -} -void IntegralGradient::GetAveGradient(CvRect &rect, double *dirx, double *diry) { - int count=1; - GetGradient(rect, dirx, diry, &count); - *dirx /= count; - *diry /= count; + */ + // As we assume top-left coordinates we have these reverse compared to + // Donahue1992 + // TODO: Now we assume 8-bit gray + double a4 = (unsigned char)gray->imageData[(j)*gray->widthStep + (i + 1)]; + double a3 = (unsigned char)gray->imageData[(j)*gray->widthStep + (i)]; + double a2 = + (unsigned char)gray->imageData[(j + 1) * gray->widthStep + (i)]; + double a1 = + (unsigned char)gray->imageData[(j + 1) * gray->widthStep + (i + 1)]; + // Normal vectors; + double nx = (-a1 + a2 + a3 - a4) / 4; + double ny = (-a1 - a2 + a3 + a4) / 4; + ((double*)normalx->imageData)[j * normalx->width + i] = nx; + ((double*)normaly->imageData)[j * normaly->width + i] = ny; + } + } +} +IntegralGradient::IntegralGradient() +{ + normalx = 0; + normaly = 0; +} +IntegralGradient::~IntegralGradient() +{ + if (normalx) + cvReleaseImage(&normalx); + if (normaly) + cvReleaseImage(&normaly); +} +void IntegralGradient::Update(IplImage* gray) +{ + CalculatePointNormals(gray); + integx.Update(normalx); + integy.Update(normaly); +} +void IntegralGradient::GetGradient(CvRect& rect, double* dirx, double* diry, + int* count /*=0*/) +{ + CvRect r = { rect.x, rect.y, rect.width - 1, rect.height - 1 }; + if (count) + *dirx = integx.GetSum(r, count); + else + *dirx = integx.GetSum(r); + *diry = integy.GetSum(r); +} +void IntegralGradient::GetAveGradient(CvRect& rect, double* dirx, double* diry) +{ + int count = 1; + GetGradient(rect, dirx, diry, &count); + *dirx /= count; + *diry /= count; } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/Kalman.cpp b/ar_track_alvar/src/Kalman.cpp index 33656c6..c8e7c84 100644 --- a/ar_track_alvar/src/Kalman.cpp +++ b/ar_track_alvar/src/Kalman.cpp @@ -22,419 +22,510 @@ */ #include -#include // for std::max -#include "cxcore.h" -#include "cv.h" -#include "highgui.h" +#include // for std::max +#include +#include +#include #include "ar_track_alvar/Kalman.h" #include "ar_track_alvar/Util.h" #include "ar_track_alvar/Alvar.h" -namespace alvar { - -KalmanSensorCore::KalmanSensorCore(const KalmanSensorCore &k) { - m = k.m; - n = k.n; - z = cvCloneMat(k.z); - H = cvCloneMat(k.H); - H_trans = cvCloneMat(k.H_trans); - K = cvCloneMat(k.K); - z_pred = cvCloneMat(k.z_pred); - z_residual = cvCloneMat(k.z_residual); - x_gain = cvCloneMat(k.x_gain); -} - -KalmanSensorCore::KalmanSensorCore(int _n, int _m) { - n = _n; - m = _m; - z = cvCreateMat(m,1,CV_64FC1); cvSetZero(z); - H = cvCreateMat(m,n,CV_64FC1); cvSetZero(H); - H_trans = cvCreateMat(n,m,CV_64FC1); cvSetZero(H_trans); - K = cvCreateMat(n,m,CV_64FC1); cvSetZero(K); - z_pred = cvCreateMat(m,1,CV_64FC1); cvSetZero(z_pred); - z_residual = cvCreateMat(m,1,CV_64FC1); cvSetZero(z_residual); - x_gain = cvCreateMat(n,1,CV_64FC1); cvSetZero(x_gain); -} - -KalmanSensorCore::~KalmanSensorCore() { - cvReleaseMat(&z); - cvReleaseMat(&H); - cvReleaseMat(&H_trans); - cvReleaseMat(&K); - cvReleaseMat(&z_pred); - cvReleaseMat(&z_residual); - cvReleaseMat(&x_gain); -} - -void KalmanSensorCore::update_x(CvMat *x_pred, CvMat *x) { - // x = x_pred + K * (z - H*x_pred) - cvMatMul(H, x_pred, z_pred); - cvScaleAdd(z_pred, cvScalar(-1), z, z_residual); - cvMatMul(K, z_residual, x_gain); - cvScaleAdd(x_pred, cvScalar(1), x_gain, x); -} - -void KalmanCore::predict_x(unsigned long tick) { - // x_pred = F * x; - cvMatMul(F, x, x_pred); -} - -KalmanCore::KalmanCore(const KalmanCore &s) { - n = s.n; - x = cvCloneMat(s.x); - F = cvCloneMat(s.F); - x_pred = cvCloneMat(s.x_pred); - F_trans = cvCloneMat(s.F_trans); -} - -KalmanCore::KalmanCore(int _n) { - n = _n; - x = cvCreateMat(n,1,CV_64FC1); cvSetZero(x); - F = cvCreateMat(n,n,CV_64FC1); cvSetIdentity(F); - F_trans = cvCreateMat(n,n,CV_64FC1); cvSetIdentity(F_trans); - x_pred = cvCreateMat(n,1,CV_64FC1); cvSetZero(x_pred); -} - -KalmanCore::~KalmanCore() { - cvReleaseMat(&x); - cvReleaseMat(&F); - cvReleaseMat(&F_trans); - cvReleaseMat(&x_pred); -} - -CvMat *KalmanCore::predict() { - predict_x(0); - return x_pred; -} - -CvMat *KalmanCore::predict_update(KalmanSensorCore *sensor) { - predict(); - sensor->update_x(x_pred, x); - return x; -} - -KalmanSensor::KalmanSensor(const KalmanSensor &k) : KalmanSensorCore(k) { - R = cvCloneMat(k.R); - R_tmp = cvCloneMat(k.R_tmp); - P_tmp = cvCloneMat(k.P_tmp); -} - -KalmanSensor::KalmanSensor(int n, int _m) : KalmanSensorCore(n, _m) { - R = cvCreateMat(m,m,CV_64FC1); cvSetZero(R); - R_tmp = cvCreateMat(m,m,CV_64FC1); cvSetZero(R); - P_tmp = cvCreateMat(n,n,CV_64FC1); cvSetZero(P_tmp); -} - -KalmanSensor::~KalmanSensor() { - cvReleaseMat(&R); - cvReleaseMat(&R_tmp); - cvReleaseMat(&P_tmp); -} - -void KalmanSensor::update_K(CvMat *P_pred) { - // K = P * trans(H) * inv(H*P*trans(H) + R) - cvTranspose(H, H_trans); - cvMatMul(P_pred, H_trans, K); - cvMatMul(H, K, R_tmp); - cvScaleAdd(R_tmp, cvScalar(1), R, R_tmp); - cvInvert(R_tmp, R_tmp); - cvMatMul(H_trans, R_tmp, K); - cvMatMul(P_pred, K, K); -} - -void KalmanSensor::update_P(CvMat *P_pred, CvMat *P) { - //P = (I - K*H) * P_pred - cvMatMul(K, H, P_tmp); - cvSetIdentity(P); - cvScaleAdd(P_tmp, cvScalar(-1), P, P); - cvMatMul(P, P_pred, P); -} - -void Kalman::predict_P() { - // P_pred = F*P*trans(F) + Q - cvTranspose(F, F_trans); - cvMatMul(P, F_trans, P_pred); - cvMatMul(F, P_pred, P_pred); - cvScaleAdd(P_pred, cvScalar(1), Q, P_pred); -} - -Kalman::Kalman(int _n) : KalmanCore(_n) { - prev_tick = 0; - Q = cvCreateMat(n,n,CV_64FC1); cvSetZero(Q); - P = cvCreateMat(n,n,CV_64FC1); cvSetZero(P); - P_pred = cvCreateMat(n,n,CV_64FC1); cvSetZero(P_pred); -} - -Kalman::~Kalman() { - cvReleaseMat(&Q); - cvReleaseMat(&P); - cvReleaseMat(&P_pred); -} - -void Kalman::update_F(unsigned long tick) { - //cvSetIdentity(F); -} - -CvMat *Kalman::predict(unsigned long tick) { - update_F(tick); - predict_x(tick); - predict_P(); - return x_pred; -} - -CvMat *Kalman::predict_update(KalmanSensor *sensor, unsigned long tick) { - predict(tick); - sensor->update_H(x_pred); - sensor->update_K(P_pred); - sensor->update_x(x_pred, x); - sensor->update_P(P_pred, P); - prev_tick = tick; - return x; -} - -double Kalman::seconds_since_update(unsigned long tick) { - unsigned long tick_diff = (prev_tick ? tick-prev_tick : 0); - return ((double)tick_diff/1000.0); -} - -void KalmanSensorEkf::update_H(CvMat *x_pred) { - // By default we update the H by calculating Jacobian numerically - const double step = 0.000001; - cvZero(H); - for (int i=0; icols, mat->rows)); - for (int j=0; jrows; j++) { - for (int i=0; icols; i++) { - double d = cvGet2D(mat, j, i).val[0]; - if (d < 0) d=-d; - double c1=0, c2=0, c3=0; - if (d < 0.1) { - c1 = 0 + ((d - 0.0)/(0.1 - 0.0)*(127 - 0)); - } else if(d < 1.0) { - c1 = 127 + ((d - 0.1)/(1.0 - 0.1)*(255 - 127)); - } else if (d < 10.0) { - c1 = 255; - c2 = 0 + ((d - 1.0)/(10.0 - 1.0)*(255 - 0)); - } else if (d < 100.0) { - c1 = 255; - c2 = 255; - c3 = 0 + ((d - 10.0)/(100.0 - 10.0)*(255 - 0)); - } else { - c1 = 255; c2 = 255; c3 = 255; - } - if (d < 0) { - cvSet2D(img, j, i, cvScalar(c3, c2, c1)); // BRG - } else { - cvSet2D(img, j, i, cvScalar(c2, c1, c3)); // BGR - } - } - } - cvResetImageROI(img); -} - -void KalmanVisualize::Init() { - n = kalman->get_n(); - m = sensor->get_m(); - int img_width = std::max(3+n+3+n+5+m+5, 1+n+1+n+1+n+1+m+1+n+1); - int img_height = 1+n+1+std::max(n, m+1+m)+1; - img = cvCreateImage(cvSize(img_width, img_height), IPL_DEPTH_8U, 3); - cvSet(img, cvScalar(64,64,64)); - img_legend = cvLoadImage("Legend.png"); - if (img_legend) { - for (img_scale=1; img_scale<50; img_scale++) { - if (img_scale*img_width > img_legend->width) { - break; - } - } - img_show = cvCreateImage(cvSize(img_width*img_scale, img_legend->height + img_height*img_scale), IPL_DEPTH_8U, 3); - cvSet(img_show, cvScalar(64,64,64)); - cvSetImageROI(img_show, cvRect(0, 0, img_legend->width, img_legend->height)); - cvCopy(img_legend, img_show); - cvResetImageROI(img_show); - cvNamedWindow("KalmanVisualize"); - } else { - img_scale = 1; - img_show = cvCreateImage(cvSize(img_width*img_scale, img_height*img_scale), IPL_DEPTH_8U, 3); - cvSet(img_show, cvScalar(64,64,64)); - cvNamedWindow("KalmanVisualize",0); - } -} - -void KalmanVisualize::out_matrix(CvMat *m, char *name) { - if (m->cols == 1) { - std::cout<rows; j++) { - std::cout<<" "<rows == 1) { - std::cout<cols; i++) { - std::cout<<" "<rows; j++) { - for (int i=0; icols; i++) { - std::cout<<" "<x, 1, 1); // 1 - if (kalman_ext && sensor_ext) { - int y = std::max(2+n, 3+m+m); - img_matrix(kalman_ext->P, 1, y); // n - } -} - -void KalmanVisualize::update_post() { - img_matrix(kalman->F, 3, 1); // n - img_matrix(kalman->x_pred, 4+n, 1); // 1 - img_matrix(sensor->H, 6+n, 1); // n - img_matrix(sensor->z_pred, 7+n+n, 1); // 1 - img_matrix(sensor->z, 7+n+n, 2 + m); - img_matrix(sensor->z_residual, 9+n+n, 1); // 1 - img_matrix(sensor->K, 11+n+n, 1); // m - img_matrix(sensor->x_gain, 12+n+n+m, 1); // 1 - img_matrix(kalman->x, 14+n+n+m, 1); // 1 - if (kalman_ext && sensor_ext) { - int y = std::max(2+n, 3+m+m); - img_matrix(kalman_ext->Q, 2+n, y); // n - img_matrix(kalman_ext->P_pred, 3+n+n, y); // n - img_matrix(sensor_ext->R, 4+n+n+n, y); // m - img_matrix(kalman_ext->P, img->width - 1 - n, y); // n - } - if (img_legend) { - cvSetImageROI(img_show, cvRect(0, img_legend->height, img->width * img_scale, img->height * img_scale)); - cvResize(img, img_show, CV_INTER_NN); - cvResetImageROI(img_show); - } else { - cvResize(img, img_show, CV_INTER_NN); - } -} - -void KalmanVisualize::show() { - //out_matrix(sensor->K, "K"); - cvShowImage("KalmanVisualize", img_show); -} - -} // namespace alvar +namespace alvar +{ +KalmanSensorCore::KalmanSensorCore(const KalmanSensorCore& k) +{ + m = k.m; + n = k.n; + z = k.z.clone(); + H = k.H.clone(); + H_trans = k.H_trans.clone(); + K = k.K.clone(); + z_pred = k.z_pred.clone(); + z_residual = k.z_residual.clone(); + x_gain = k.x_gain.clone(); +} + +KalmanSensorCore::KalmanSensorCore(int _n, int _m) +{ + n = _n; + m = _m; + z = cv::Mat::zeros(m, 1, CV_64FC1); + H = cv::Mat::zeros(m, n, CV_64FC1); + H_trans = cv::Mat::zeros(n, m, CV_64FC1); + K = cv::Mat::zeros(n, m, CV_64FC1); + z_pred = cv::Mat::zeros(m, 1, CV_64FC1); + z_residual = cv::Mat::zeros(m, 1, CV_64FC1); + x_gain = cv::Mat::zeros(n, 1, CV_64FC1); +} + +KalmanSensorCore::~KalmanSensorCore() +{ + z.release(); + H.release(); + H_trans.release(); + K.release(); + z_pred.release(); + z_residual.release(); + x_gain.release(); +} + +void KalmanSensorCore::update_x(const cv::Mat& x_pred, cv::Mat& x) +{ + // x = x_pred + K * (z - H*x_pred) + z_pred = H * x_pred; + z_residual = x_pred * -1 + z; + x_gain = K * z_residual; + x = x_pred * 1 + x_gain; +} + +void KalmanCore::predict_x(unsigned long tick) +{ + // x_pred = F * x; + x_pred = F * x; +} + +KalmanCore::KalmanCore(const KalmanCore& s) +{ + n = s.n; + x = s.x.clone(); + F = s.F.clone(); + x_pred = s.x_pred.clone(); + F_trans = s.F_trans.clone(); +} + +KalmanCore::KalmanCore(int _n) +{ + n = _n; + x = cv::Mat::zeros(n, 1, CV_64FC1); + F = cv::Mat::eye(n, n, CV_64FC1); + F_trans = cv::Mat::eye(n, n, CV_64FC1); + x_pred = cv::Mat::zeros(n, 1, CV_64FC1); +} + +KalmanCore::~KalmanCore() +{ + x.release(); + F.release(); + F_trans.release(); + x_pred.release(); +} + +cv::Mat& KalmanCore::predict() +{ + predict_x(0); + return x_pred; +} + +cv::Mat& KalmanCore::predict_update(KalmanSensorCore* sensor) +{ + predict(); + sensor->update_x(x_pred, x); + return x; +} + +KalmanSensor::KalmanSensor(const KalmanSensor& k) : KalmanSensorCore(k) +{ + R = k.R.clone(); + R_tmp = k.R_tmp.clone(); + P_tmp = k.P_tmp.clone(); +} + +KalmanSensor::KalmanSensor(int n, int _m) : KalmanSensorCore(n, _m) +{ + R = cv::Mat::zeros(m, m, CV_64FC1); + R_tmp = cv::Mat::zeros(m, m, CV_64FC1); + P_tmp = cv::Mat::zeros(n, n, CV_64FC1); +} + +KalmanSensor::~KalmanSensor() +{ + R.release(); + R_tmp.release(); + P_tmp.release(); +} + +void KalmanSensor::update_K(const cv::Mat& P_pred) +{ + // K = P * trans(H) * inv(H*P*trans(H) + R) + H_trans = H.t(); + K = P_pred * H_trans; + R_tmp = H * K; + R_tmp = R_tmp * 1 + R; + R_tmp = R_tmp.inv(); + R_tmp = R_tmp.inv(); + K = H_trans * R_tmp; + K = P_pred * K; +} + +void KalmanSensor::update_P(const cv::Mat& P_pred, cv::Mat& P) +{ + // P = (I - K*H) * P_pred + P_tmp = K * H; + cv::setIdentity(P); + P = P_tmp * -1 + P; + P = P * P_pred; +} + +void Kalman::predict_P() +{ + // P_pred = F*P*trans(F) + Q + F_trans = F.t(); + P_pred = P * F_trans; + P_pred = F * P_pred; + P_pred = P_pred * 1 + Q; +} + +Kalman::Kalman(int _n) : KalmanCore(_n) +{ + prev_tick = 0; + Q = cv::Mat::zeros(n, n, CV_64FC1); + P = cv::Mat::zeros(n, n, CV_64FC1); + P_pred = cv::Mat::zeros(n, n, CV_64FC1); +} + +Kalman::~Kalman() +{ + Q.release(); + P.release(); + P_pred.release(); +} + +void Kalman::update_F(unsigned long tick) +{ + // cvSetIdentity(F); +} + +cv::Mat& Kalman::predict(unsigned long tick) +{ + update_F(tick); + predict_x(tick); + predict_P(); + return x_pred; +} + +cv::Mat& Kalman::predict_update(KalmanSensor* sensor, unsigned long tick) +{ + predict(tick); + sensor->update_H(x_pred); + sensor->update_K(P_pred); + sensor->update_x(x_pred, x); + sensor->update_P(P_pred, P); + prev_tick = tick; + return x; +} + +double Kalman::seconds_since_update(unsigned long tick) +{ + unsigned long tick_diff = (prev_tick ? tick - prev_tick : 0); + return ((double)tick_diff / 1000.0); +} + +void KalmanSensorEkf::update_H(const cv::Mat& x_pred) +{ + // By default we update the H by calculating Jacobian numerically + const double step = 0.000001; + H.setTo(cv::Scalar::all(0)); + for (int i = 0; i < n; i++) + { + cv::Mat H_column; + H_column = H.col(i); + + delta.setTo(cv::Scalar::all(0)); + delta.at(i, 0) = step; + x_plus = x_pred + delta; + delta.at(i, 0) = -step; + x_minus = x_pred + delta; + + h(x_plus, z_tmp1); + h(x_minus, z_tmp2); + H_column = z_tmp1 - z_tmp2; + H_column = H_column * (1.0 / (2 * step)); + } +} + +void KalmanSensorEkf::update_x(const cv::Mat& x_pred, cv::Mat& x) +{ + // x = x_pred + K * (z - h(x_pred)) + h(x_pred, z_pred); + z_residual = z_pred * -1 + z; + x_gain = K * z_residual; + x = x_pred * 1 + x_gain; +} + +KalmanSensorEkf::KalmanSensorEkf(const KalmanSensorEkf& k) : KalmanSensor(k) +{ + delta = k.delta.clone(); + x_plus = k.x_plus.clone(); + x_minus = k.x_minus.clone(); + z_tmp1 = k.z_tmp1.clone(); + z_tmp2 = k.z_tmp2.clone(); +} + +KalmanSensorEkf::KalmanSensorEkf(int _n, int _m) : KalmanSensor(_n, _m) +{ + delta = cv::Mat::zeros(n, 1, CV_64FC1); + x_plus = cv::Mat::zeros(n, 1, CV_64FC1); + x_minus = cv::Mat::zeros(n, 1, CV_64FC1); + z_tmp1 = cv::Mat::zeros(m, 1, CV_64FC1); + z_tmp2 = cv::Mat::zeros(m, 1, CV_64FC1); +} + +KalmanSensorEkf::~KalmanSensorEkf() +{ + delta.release(); + x_plus.release(); + x_minus.release(); + z_tmp1.release(); + z_tmp2.release(); +} + +void KalmanEkf::update_F(unsigned long tick) +{ + // By default we update the F by calculating Jacobian numerically + // TODO + double dt = (tick - prev_tick) / 1000.0; + const double step = 0.000001; + F.setTo(cv::Scalar::all(0)); + for (int i = 0; i < n; i++) + { + cv::Mat F_column; + F_column = F.col(i); + + delta.setTo(cv::Scalar::all(0)); + delta.at(i, 0) = step; + x_plus = x + delta; + delta.at(i, 0) = -step; + x_minus = x + delta; + + f(x_plus, x_tmp1, dt); + f(x_minus, x_tmp2, dt); + F_column = x_tmp1 - x_tmp2; + F_column = F_column * (1.0 / (2 * step)); + } +} + +void KalmanEkf::predict_x(unsigned long tick) +{ + double dt = (tick - prev_tick) / 1000.0; + f(x, x_pred, dt); +} + +KalmanEkf::KalmanEkf(int _n) : Kalman(_n) +{ + delta = cv::Mat::zeros(n, 1, CV_64FC1); + x_plus = cv::Mat::zeros(n, 1, CV_64FC1); + x_minus = cv::Mat::zeros(n, 1, CV_64FC1); + x_tmp1 = cv::Mat::zeros(n, 1, CV_64FC1); + x_tmp2 = cv::Mat::zeros(n, 1, CV_64FC1); +} + +KalmanEkf::~KalmanEkf() +{ + delta.release(); + x_plus.release(); + x_minus.release(); + x_tmp1.release(); + x_tmp2.release(); +} + +void KalmanVisualize::img_matrix(const cv::Mat& mat, int top, int left) +{ + int roi_t = -top; + int roi_b = -(img.rows - mat.rows - top); + int roi_l = -left; + int roi_r = -(img.cols - mat.cols - left); + img.adjustROI(roi_t, roi_b, roi_l, roi_r); + for (int j = 0; j < mat.rows; j++) + { + for (int i = 0; i < mat.cols; i++) + { + double d = mat.at(j, i); + if (d < 0) + d = -d; + double c1 = 0, c2 = 0, c3 = 0; + if (d < 0.1) + { + c1 = 0 + ((d - 0.0) / (0.1 - 0.0) * (127 - 0)); + } + else if (d < 1.0) + { + c1 = 127 + ((d - 0.1) / (1.0 - 0.1) * (255 - 127)); + } + else if (d < 10.0) + { + c1 = 255; + c2 = 0 + ((d - 1.0) / (10.0 - 1.0) * (255 - 0)); + } + else if (d < 100.0) + { + c1 = 255; + c2 = 255; + c3 = 0 + ((d - 10.0) / (100.0 - 10.0) * (255 - 0)); + } + else + { + c1 = 255; + c2 = 255; + c3 = 255; + } + if (d < 0) + { // BRG + img.at(j, i) = cv::Vec3b(c3, c2, c1); + } + else + { // BGR + img.at(j, i) = cv::Vec3b(c2, c1, c3); + } + } + } + img.adjustROI(-roi_t, -roi_b, -roi_l, -roi_r); +} + +void KalmanVisualize::Init() +{ + n = kalman->get_n(); + m = sensor->get_m(); + int img_width = std::max(3 + n + 3 + n + 5 + m + 5, + 1 + n + 1 + n + 1 + n + 1 + m + 1 + n + 1); + int img_height = 1 + n + 1 + std::max(n, m + 1 + m) + 1; + img = + cv::Mat(cv::Size(img_width, img_height), CV_8UC3, cv::Scalar(64, 64, 64)); + img_legend = cv::imread("Legend.png"); + if (!img.empty()) + { + for (img_scale = 1; img_scale < 50; img_scale++) + { + if (img_scale * img_width > img_legend.cols) + { + break; + } + } + img_show = cv::Mat(cv::Size(img_width * img_scale, + img_legend.rows + img_height * img_scale), + CV_8UC3, cv::Scalar(64, 64, 64)); + int roi_bot = img_show.rows - img_legend.rows; + int roi_right = img_show.cols - img_legend.cols; + img_show.adjustROI(0, -roi_bot, 0, -roi_right); + img_legend.copyTo(img_show); + img_show.adjustROI(0, roi_bot, 0, roi_right); + cv::namedWindow("KalmanVisualize"); + } + else + { + img_scale = 1; + img_show = cv::Mat(img_width * img_scale, img_height * img_scale, CV_8UC3, + cv::Scalar(64, 64, 64)); + cv::namedWindow("KalmanVisualize", 0); + } +} + +void KalmanVisualize::out_matrix(const cv::Mat& m, char* name) +{ + if (m.cols == 1) + { + std::cout << name << " = ["; + for (int j = 0; j < m.rows; j++) + { + std::cout << " " << m.at(j, 0); + } + std::cout << "]^T" << std::endl; + } + else if (m.rows == 1) + { + std::cout << name << " = ["; + for (int i = 0; i < m.cols; i++) + { + std::cout << " " << m.at(0, i); + } + std::cout << "]^T" << std::endl; + } + else + { + std::cout << name << " = [" << std::endl; + for (int j = 0; j < m.rows; j++) + { + for (int i = 0; i < m.cols; i++) + { + std::cout << " " << m.at(j, i); + } + std::cout << std::endl; + } + std::cout << "]" << std::endl; + } +} + +KalmanVisualize::KalmanVisualize(Kalman* _kalman, KalmanSensor* _sensor) +{ + kalman = _kalman; + sensor = _sensor; + kalman_ext = _kalman; + sensor_ext = _sensor; + Init(); +} + +KalmanVisualize::KalmanVisualize(KalmanCore* _kalman, KalmanSensorCore* _sensor) +{ + kalman = _kalman; + sensor = _sensor; + kalman_ext = NULL; + sensor_ext = NULL; + Init(); +} + +KalmanVisualize::~KalmanVisualize() +{ + img.release(); +} + +void KalmanVisualize::update_pre() +{ + img_matrix(kalman->x, 1, 1); // 1 + if (kalman_ext && sensor_ext) + { + int y = std::max(2 + n, 3 + m + m); + img_matrix(kalman_ext->P, 1, y); // n + } +} + +void KalmanVisualize::update_post() +{ + img_matrix(kalman->F, 3, 1); // n + img_matrix(kalman->x_pred, 4 + n, 1); // 1 + img_matrix(sensor->H, 6 + n, 1); // n + img_matrix(sensor->z_pred, 7 + n + n, 1); // 1 + img_matrix(sensor->z, 7 + n + n, 2 + m); + img_matrix(sensor->z_residual, 9 + n + n, 1); // 1 + img_matrix(sensor->K, 11 + n + n, 1); // m + img_matrix(sensor->x_gain, 12 + n + n + m, 1); // 1 + img_matrix(kalman->x, 14 + n + n + m, 1); // 1 + if (kalman_ext && sensor_ext) + { + int y = std::max(2 + n, 3 + m + m); + img_matrix(kalman_ext->Q, 2 + n, y); // n + img_matrix(kalman_ext->P_pred, 3 + n + n, y); // n + img_matrix(sensor_ext->R, 4 + n + n + n, y); // m + img_matrix(kalman_ext->P, img.cols - 1 - n, y); // n + } + if (!img_legend.empty()) + { + int roi_t = -img_legend.rows; + int roi_b = -img_show.rows + img.rows * img_scale + img_legend.rows; + int roi_l = 0; + int roi_r = -img_show.cols + img.cols * img_scale; + img_show.adjustROI(roi_t, roi_b, roi_l, roi_r); + cv::resize(img, img_show, img_show.size(), 0, 0, cv::INTER_NEAREST); + img_show.adjustROI(-roi_t, -roi_b, -roi_l, -roi_r); + } + else + { + cv::resize(img, img_show, img_show.size(), 0, 0, cv::INTER_NEAREST); + } +} + +void KalmanVisualize::show() +{ + // out_matrix(sensor->K, "K"); + cv::imshow("KalmanVisualize", img_show); +} + +} // namespace alvar diff --git a/ar_track_alvar/src/Line.cpp b/ar_track_alvar/src/Line.cpp index 5d3be21..6697ed6 100644 --- a/ar_track_alvar/src/Line.cpp +++ b/ar_track_alvar/src/Line.cpp @@ -22,101 +22,99 @@ */ #include "ar_track_alvar/Line.h" +#include using namespace std; -namespace alvar { +namespace alvar +{ using namespace std; -Line::Line(float params[4]) +Line::Line(const cv::Vec4f& params) { - c.x = params[2]; - c.y = params[3]; - s.x = params[0]; - s.y = params[1]; + c.x = params[2]; + c.y = params[3]; + s.x = params[0]; + s.y = params[1]; } void FitLines(vector& lines) { - } -int FitLines(vector &lines, - const vector& corners, - const vector& edge, - IplImage *grey /*=0*/) // grey image for future sub pixel accuracy +int FitLines(vector& lines, const vector& corners, + const vector& edge) { - lines.clear(); - for(unsigned j = 0; j < corners.size(); ++j) - { - int start, end, first; - int size = (int)edge.size(); - - first = corners[0]; - start = corners[j]; - - if(j < corners.size()-1) - end = corners[j+1]; - else - end = first; - - int len = 0; - - if(start < end) - len = end-start+1; - else - len = size-start+end+1; - - int ind; - double* data = new double[2*len]; - - // OpenCV routine... - CvMat* line_data = cvCreateMat(1, len, CV_32FC2); - for(int i = 0; i < len; ++i) - { - ind = i + start; - if(ind >= size) - ind = ind-size; - - double px = double(edge[ind].x); - double py = double(edge[ind].y); - CV_MAT_ELEM(*line_data, CvPoint2D32f, 0, i) = cvPoint2D32f(px, py); - } - - float params[4] = {0}; - cvFitLine(line_data, CV_DIST_L2, 0, 0.01, 0.01, params); - lines.push_back(Line(params)); - - delete [] data; - cvReleaseMat(&line_data); - - } - - return lines.size(); + lines.clear(); + for (unsigned j = 0; j < corners.size(); ++j) + { + int start, end, first; + int size = (int)edge.size(); + + first = corners[0]; + start = corners[j]; + + if (j < corners.size() - 1) + end = corners[j + 1]; + else + end = first; + + int len = 0; + + if (start < end) + len = end - start + 1; + else + len = size - start + end + 1; + + int ind; + double* data = new double[2 * len]; + + // OpenCV routine... + cv::Mat line_data = cv::Mat(1, len, CV_32FC2); + for (int i = 0; i < len; ++i) + { + ind = i + start; + if (ind >= size) + ind = ind - size; + + double px = double(edge[ind].x); + double py = double(edge[ind].y); + line_data.at(0, i) = cv::Vec2d(px, py); + } + + cv::Vec4f line; + cv::fitLine(line_data, line, cv::DIST_L2, 0, 0.01, 0.01); + lines.push_back(Line(line)); + + delete[] data; + line_data.release(); + } + + return lines.size(); } PointDouble Intersection(const Line& l1, const Line& l2) { - - double vx = l1.s.x; - double vy = l1.s.y; - double ux = l2.s.x; - double uy = l2.s.y; - double wx = l2.c.x-l1.c.x; - double wy = l2.c.y-l1.c.y; - - double s, px, py; - double tmp = vx*uy-vy*ux; - if(tmp==0) tmp = 1; - - //if(/*tmp <= 1.f && tmp >= -1.f && */tmp != 0.f && ang > 0.1) - { - s = (vy*wx-vx*wy) / (tmp); - px = l2.c.x+s*ux; - py = l2.c.y+s*uy; - } - - return PointDouble(px, py); + double vx = l1.s.x; + double vy = l1.s.y; + double ux = l2.s.x; + double uy = l2.s.y; + double wx = l2.c.x - l1.c.x; + double wy = l2.c.y - l1.c.y; + + double s, px, py; + double tmp = vx * uy - vy * ux; + if (tmp == 0) + tmp = 1; + + // if(/*tmp <= 1.f && tmp >= -1.f && */tmp != 0.f && ang > 0.1) + { + s = (vy * wx - vx * wy) / (tmp); + px = l2.c.x + s * ux; + py = l2.c.y + s * uy; + } + + return PointDouble(px, py); } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/Marker.cpp b/ar_track_alvar/src/Marker.cpp index fdaf503..4d67ec6 100644 --- a/ar_track_alvar/src/Marker.cpp +++ b/ar_track_alvar/src/Marker.cpp @@ -23,7 +23,7 @@ #include "ar_track_alvar/Alvar.h" #include "ar_track_alvar/Marker.h" -#include "highgui.h" +#include template class ALVAR_EXPORT alvar::MarkerIteratorImpl; template class ALVAR_EXPORT alvar::MarkerIteratorImpl; @@ -31,1039 +31,1457 @@ template class ALVAR_EXPORT alvar::MarkerIteratorImpl; using namespace std; -namespace alvar { +namespace alvar +{ using namespace std; #define HEADER_SIZE 8 -void Marker::VisualizeMarkerPose(IplImage *image, Camera *cam, double visualize2d_points[12][2], CvScalar color) const { - // Cube - for (int i=0; i<4; i++) { - cvLine(image, cvPoint((int)visualize2d_points[i][0], (int)visualize2d_points[i][1]), cvPoint((int)visualize2d_points[(i+1)%4][0], (int)visualize2d_points[(i+1)%4][1]), color); - cvLine(image, cvPoint((int)visualize2d_points[i][0], (int)visualize2d_points[i][1]), cvPoint((int)visualize2d_points[4+i][0], (int)visualize2d_points[4+i][1]), color); - cvLine(image, cvPoint((int)visualize2d_points[4+i][0], (int)visualize2d_points[4+i][1]), cvPoint((int)visualize2d_points[4+((i+1)%4)][0], (int)visualize2d_points[4+((i+1)%4)][1]), color); - } - // Coordinates - cvLine(image, cvPoint((int)visualize2d_points[8][0], (int)visualize2d_points[8][1]), cvPoint((int)visualize2d_points[9][0], (int)visualize2d_points[9][1]), CV_RGB(255,0,0)); - cvLine(image, cvPoint((int)visualize2d_points[8][0], (int)visualize2d_points[8][1]), cvPoint((int)visualize2d_points[10][0], (int)visualize2d_points[10][1]), CV_RGB(0,255,0)); - cvLine(image, cvPoint((int)visualize2d_points[8][0], (int)visualize2d_points[8][1]), cvPoint((int)visualize2d_points[11][0], (int)visualize2d_points[11][1]), CV_RGB(0,0,255)); +void Marker::VisualizeMarkerPose(cv::Mat& image, Camera* cam, + double visualize2d_points[12][2], + const cv::Scalar& color) const +{ + // Cube + for (int i = 0; i < 4; i++) + { + cv::line( + image, + cv::Point((int)visualize2d_points[i][0], (int)visualize2d_points[i][1]), + cv::Point((int)visualize2d_points[(i + 1) % 4][0], + (int)visualize2d_points[(i + 1) % 4][1]), + color); + cv::line( + image, + cv::Point((int)visualize2d_points[i][0], (int)visualize2d_points[i][1]), + cv::Point((int)visualize2d_points[4 + i][0], + (int)visualize2d_points[4 + i][1]), + color); + cv::line(image, + cv::Point((int)visualize2d_points[4 + i][0], + (int)visualize2d_points[4 + i][1]), + cv::Point((int)visualize2d_points[4 + ((i + 1) % 4)][0], + (int)visualize2d_points[4 + ((i + 1) % 4)][1]), + color); + } + // Coordinates + cv::line( + image, + cv::Point((int)visualize2d_points[8][0], (int)visualize2d_points[8][1]), + cv::Point((int)visualize2d_points[9][0], (int)visualize2d_points[9][1]), + CV_RGB(255, 0, 0)); + cv::line( + image, + cv::Point((int)visualize2d_points[8][0], (int)visualize2d_points[8][1]), + cv::Point((int)visualize2d_points[10][0], (int)visualize2d_points[10][1]), + CV_RGB(0, 255, 0)); + cv::line( + image, + cv::Point((int)visualize2d_points[8][0], (int)visualize2d_points[8][1]), + cv::Point((int)visualize2d_points[11][0], (int)visualize2d_points[11][1]), + CV_RGB(0, 0, 255)); } -void Marker::VisualizeMarkerContent(IplImage *image, Camera *cam, double datatext_point[2], double content_point[2]) const { +void Marker::VisualizeMarkerContent(cv::Mat& image, Camera* cam, + double datatext_point[2], + double content_point[2]) const +{ #ifdef VISUALIZE_MARKER_POINTS - for (size_t i=0; i= 0) && (x < image->width) && - (y >= 0) && (y < image->height)) - { - if (cvGet2D(marker_content, j/3, i/3).val[0]) { - cvSet2D(image, y, x, CV_RGB(255,255,255)); - } else { - cvSet2D(image, y, x, CV_RGB(0,0,0)); - } - } - } - } + // Marker data + std::stringstream val; + val << int(GetId()); + cv::putText(image, val.str(), + cv::Point((int)datatext_point[0], (int)datatext_point[1]), 0, 0.5, + CV_RGB(255, 255, 0)); + + // MarkerContent + int xc = int(content_point[0]); + int yc = int(content_point[1]); + for (int j = 0; j < res * 3; j++) + { + for (int i = 0; i < res * 3; i++) + { + int x = xc + i; + int y = yc + j; + if ((x >= 0) && (x < image.cols) && (y >= 0) && (y < image.rows)) + { + if (marker_content.at(j / 3, i / 3)) + { + image.at(y, x) = cv::Vec3b(255, 255, 255); + } + else + { + image.at(y, x) = cv::Vec3b(0, 0, 0); + } + } + } + } } -void Marker::VisualizeMarkerError(IplImage *image, Camera *cam, double errortext_point[2]) const { - CvFont font; - cvInitFont(&font, 0, 0.5, 0.5, 0); - std::stringstream val; - if (GetError(MARGIN_ERROR|DECODE_ERROR) > 0) { - val.str(""); - val< 0.01) { - val.str(""); - val< 0) + { + val.str(""); + val << int(GetError(MARGIN_ERROR) * 100) << "% "; + val << int(GetError(DECODE_ERROR) * 100) << "% "; + cv::putText(image, val.str(), + cv::Point((int)errortext_point[0], (int)errortext_point[1]), 0, + 0.5, CV_RGB(255, 0, 0)); + } + else if (GetError(TRACK_ERROR) > 0.01) + { + val.str(""); + val << int(GetError(TRACK_ERROR) * 100) << "%"; + cv::putText(image, val.str(), + cv::Point((int)errortext_point[0], (int)errortext_point[1]), 0, + 0.5, CV_RGB(128, 0, 0)); + } } -void MarkerData::VisualizeMarkerContent(IplImage *image, Camera *cam, double datatext_point[2], double content_point[2]) const { +void MarkerData::VisualizeMarkerContent(cv::Mat& image, Camera* cam, + double datatext_point[2], + double content_point[2]) const +{ #ifdef VISUALIZE_MARKER_POINTS - for (size_t i=0; iProjectPoints(&visualize3d_points_mat, &pose, &visualize2d_points_mat); - - VisualizeMarkerPose(image, cam, visualize2d_points, color); - VisualizeMarkerContent(image, cam, visualize2d_points[0], visualize2d_points[8]); - VisualizeMarkerError(image, cam, visualize2d_points[2]); +void Marker::Visualize(cv::Mat& image, Camera* cam, + const cv::Scalar& color) const +{ + double visualize3d_points[12][3] = { + // cube + { -(edge_length / 2), -(edge_length / 2), 0 }, + { -(edge_length / 2), (edge_length / 2), 0 }, + { (edge_length / 2), (edge_length / 2), 0 }, + { (edge_length / 2), -(edge_length / 2), 0 }, + { -(edge_length / 2), -(edge_length / 2), edge_length }, + { -(edge_length / 2), (edge_length / 2), edge_length }, + { (edge_length / 2), (edge_length / 2), edge_length }, + { (edge_length / 2), -(edge_length / 2), edge_length }, + // coordinates + { 0, 0, 0 }, + { edge_length, 0, 0 }, + { 0, edge_length, 0 }, + { 0, 0, edge_length }, + }; + double visualize2d_points[12][2]; + cv::Mat visualize3d_points_mat; + cv::Mat visualize2d_points_mat; + visualize3d_points_mat = cv::Mat(12, 3, CV_64F, visualize3d_points); + visualize2d_points_mat = cv::Mat(12, 2, CV_64F, visualize2d_points); + cam->ProjectPoints(visualize3d_points_mat, &pose, visualize2d_points_mat); + + VisualizeMarkerPose(image, cam, visualize2d_points, color); + VisualizeMarkerContent(image, cam, visualize2d_points[0], + visualize2d_points[8]); + VisualizeMarkerError(image, cam, visualize2d_points[2]); } -void Marker::CompareCorners(vector &_marker_corners_img, int *orientation, double *error) { - vector::iterator corners_new = _marker_corners_img.begin(); - vector::const_iterator corners_old = marker_corners_img.begin(); - vector errors(4); - for (int i=0; i<4; i++) { - errors[0] += PointSquaredDistance(marker_corners_img[i], _marker_corners_img[i]); - errors[1] += PointSquaredDistance(marker_corners_img[i], _marker_corners_img[(i+1)%4]); - errors[2] += PointSquaredDistance(marker_corners_img[i], _marker_corners_img[(i+2)%4]); - errors[3] += PointSquaredDistance(marker_corners_img[i], _marker_corners_img[(i+3)%4]); - } - *orientation = min_element(errors.begin(), errors.end()) - errors.begin(); - *error = sqrt(errors[*orientation]/4); - *error /= sqrt(max(PointSquaredDistance(marker_corners_img[0], marker_corners_img[2]), PointSquaredDistance(marker_corners_img[1], marker_corners_img[3]))); +void Marker::CompareCorners(vector& _marker_corners_img, + int* orientation, double* error) +{ + vector::iterator corners_new = _marker_corners_img.begin(); + vector::const_iterator corners_old = marker_corners_img.begin(); + vector errors(4); + for (int i = 0; i < 4; i++) + { + errors[0] += + PointSquaredDistance(marker_corners_img[i], _marker_corners_img[i]); + errors[1] += PointSquaredDistance(marker_corners_img[i], + _marker_corners_img[(i + 1) % 4]); + errors[2] += PointSquaredDistance(marker_corners_img[i], + _marker_corners_img[(i + 2) % 4]); + errors[3] += PointSquaredDistance(marker_corners_img[i], + _marker_corners_img[(i + 3) % 4]); + } + *orientation = min_element(errors.begin(), errors.end()) - errors.begin(); + *error = sqrt(errors[*orientation] / 4); + *error /= sqrt( + max(PointSquaredDistance(marker_corners_img[0], marker_corners_img[2]), + PointSquaredDistance(marker_corners_img[1], marker_corners_img[3]))); } -void Marker::CompareContent(vector &_marker_corners_img, IplImage *gray, Camera *cam, int *orientation) const { - // TODO: Note, that to use this method you need to straighten the content - // TODO: This method can be used with image based trackingt - +void Marker::CompareContent(vector& _marker_corners_img, + cv::Mat& gray, Camera* cam, int* orientation) const +{ + // TODO: Note, that to use this method you need to straighten the content + // TODO: This method can be used with image based trackingt } -bool Marker::UpdateContent(vector &_marker_corners_img, IplImage *gray, Camera *cam, int frame_no /*= 0*/) { - return UpdateContentBasic(_marker_corners_img, gray, cam, frame_no); +bool Marker::UpdateContent(vector& _marker_corners_img, + cv::Mat& gray, Camera* cam, int frame_no /*= 0*/) +{ + return UpdateContentBasic(_marker_corners_img, gray, cam, frame_no); } -bool Marker::UpdateContentBasic(vector &_marker_corners_img, IplImage *gray, Camera *cam, int frame_no /*= 0*/) { - vector marker_corners_img_undist; - marker_corners_img_undist.resize(_marker_corners_img.size()); - copy(_marker_corners_img.begin(), _marker_corners_img.end(), marker_corners_img_undist.begin()); - - // Figure out the marker point position in the image - Homography H; - vector marker_points_img(marker_points.size()); - marker_points_img.resize(marker_points.size()); - cam->Undistort(marker_corners_img_undist); - H.Find(marker_corners, marker_corners_img_undist); - H.ProjectPoints(marker_points, marker_points_img); - cam->Distort(marker_points_img); - - ros_marker_points_img.clear(); - - // Read the content - int x, y; - double min = 255.0, max = 0.0; - for (int j=0; jheight; j++) { - for (int i=0; iwidth; i++) { - x = (int)(0.5+Limit(marker_points_img[(j*marker_content->width)+i].x, 1, gray->width-2)); - y = (int)(0.5+Limit(marker_points_img[(j*marker_content->width)+i].y, 1, gray->height-2)); - - marker_points_img[(j*marker_content->width)+i].val = (int)cvGetReal2D(gray, y, x); - - ros_marker_points_img.push_back(PointDouble(x,y)); - - /* - // Use median of 5 neighbor pixels - vector vals; - vals.clear(); - vals.push_back(); - vals.push_back((int)cvGetReal2D(gray, y-1, x)); - vals.push_back((int)cvGetReal2D(gray, y, x-1)); - vals.push_back((int)cvGetReal2D(gray, y+1, x)); - vals.push_back((int)cvGetReal2D(gray, y, x+1)); - nth_element(vals.begin(), vals.begin()+2, vals.end()); - tmp = vals[2]; - */ - - cvSet2D(marker_content, j, i, cvScalar(marker_points_img[(j*marker_content->width)+i].val)); - if(marker_points_img[(j*marker_content->width)+i].val > max) max = marker_points_img[(j*marker_content->width)+i].val; - if(marker_points_img[(j*marker_content->width)+i].val < min) min = marker_points_img[(j*marker_content->width)+i].val; - } - } - - // Take few additional points from border and just - // outside the border to make the right thresholding - vector marker_margin_w_img(marker_margin_w.size()); - vector marker_margin_b_img(marker_margin_b.size()); - H.ProjectPoints(marker_margin_w, marker_margin_w_img); - H.ProjectPoints(marker_margin_b, marker_margin_b_img); - cam->Distort(marker_margin_w_img); - cam->Distort(marker_margin_b_img); - - min = max = 0; // Now min and max values are averages over black and white border pixels. - for (size_t i=0; iwidth-1)); - y = (int)(0.5+Limit(marker_margin_w_img[i].y, 0, gray->height-1)); - marker_margin_w_img[i].val = (int)cvGetReal2D(gray, y, x); - max += marker_margin_w_img[i].val; - //if(marker_margin_w_img[i].val > max) max = marker_margin_w_img[i].val; - //if(marker_margin_w_img[i].val < min) min = marker_margin_w_img[i].val; - } - for (size_t i=0; iwidth-1)); - y = (int)(0.5+Limit(marker_margin_b_img[i].y, 0, gray->height-1)); - marker_margin_b_img[i].val = (int)cvGetReal2D(gray, y, x); - min += marker_margin_b_img[i].val; - //if(marker_margin_b_img[i].val > max) max = marker_margin_b_img[i].val; - //if(marker_margin_b_img[i].val < min) min = marker_margin_b_img[i].val; - ros_marker_points_img.push_back(PointDouble(x,y)); - } - max /= marker_margin_w_img.size(); - min /= marker_margin_b_img.size(); - - // Threshold the marker content - cvThreshold(marker_content, marker_content, (max+min)/2.0, 255, CV_THRESH_BINARY); - - // Count erroneous margin nodes - int erroneous = 0; - int total = 0; - for (size_t i=0; i (max+min)/2.0) erroneous++; - total++; - } - margin_error = (double)erroneous/total; - track_error; +bool Marker::UpdateContentBasic(vector& _marker_corners_img, + cv::Mat& gray, Camera* cam, + int frame_no /*= 0*/) +{ + vector marker_corners_img_undist(_marker_corners_img); + + // Figure out the marker point position in the image + Homography H; + vector marker_points_img(marker_points.size()); + marker_points_img.resize(marker_points.size()); + cam->Undistort(marker_corners_img_undist); + H.Find(marker_corners, marker_corners_img_undist); + H.ProjectPoints(marker_points, marker_points_img); + cam->Distort(marker_points_img); + + ros_marker_points_img.clear(); + + // Read the content + int x, y; + double min = 255.0, max = 0.0; + for (int j = 0; j < marker_content.rows; j++) + { + for (int i = 0; i < marker_content.cols; i++) + { + x = (int)(0.5 + Limit(marker_points_img[(j * marker_content.cols) + i].x, + 1, gray.cols - 2)); + y = (int)(0.5 + Limit(marker_points_img[(j * marker_content.cols) + i].y, + 1, gray.rows - 2)); + + marker_points_img[(j * marker_content.cols) + i].val = + (int)gray.at(y, x); + + ros_marker_points_img.push_back(PointDouble(x, y)); + + /* + // Use median of 5 neighbor pixels + vector vals; + vals.clear(); + vals.push_back(); + vals.push_back((int)cvGetReal2D(gray, y-1, x)); + vals.push_back((int)cvGetReal2D(gray, y, x-1)); + vals.push_back((int)cvGetReal2D(gray, y+1, x)); + vals.push_back((int)cvGetReal2D(gray, y, x+1)); + nth_element(vals.begin(), vals.begin()+2, vals.end()); + tmp = vals[2]; + */ + + marker_content.at(j, i) = + marker_points_img[(j * marker_content.cols) + i].val; + if (marker_points_img[(j * marker_content.cols) + i].val > max) + max = marker_points_img[(j * marker_content.cols) + i].val; + if (marker_points_img[(j * marker_content.cols) + i].val < min) + min = marker_points_img[(j * marker_content.cols) + i].val; + } + } + + // Take few additional points from border and just + // outside the border to make the right thresholding + vector marker_margin_w_img(marker_margin_w.size()); + vector marker_margin_b_img(marker_margin_b.size()); + H.ProjectPoints(marker_margin_w, marker_margin_w_img); + H.ProjectPoints(marker_margin_b, marker_margin_b_img); + cam->Distort(marker_margin_w_img); + cam->Distort(marker_margin_b_img); + + min = max = 0; // Now min and max values are averages over black and white + // border pixels. + for (size_t i = 0; i < marker_margin_w_img.size(); i++) + { + x = (int)(0.5 + Limit(marker_margin_w_img[i].x, 0, gray.cols - 1)); + y = (int)(0.5 + Limit(marker_margin_w_img[i].y, 0, gray.rows - 1)); + marker_margin_w_img[i].val = (int)gray.at(y, x); + max += marker_margin_w_img[i].val; + // if(marker_margin_w_img[i].val > max) max = marker_margin_w_img[i].val; + // if(marker_margin_w_img[i].val < min) min = marker_margin_w_img[i].val; + } + for (size_t i = 0; i < marker_margin_b_img.size(); i++) + { + x = (int)(0.5 + Limit(marker_margin_b_img[i].x, 0, gray.cols - 1)); + y = (int)(0.5 + Limit(marker_margin_b_img[i].y, 0, gray.rows - 1)); + marker_margin_b_img[i].val = (int)gray.at(y, x); + min += marker_margin_b_img[i].val; + // if(marker_margin_b_img[i].val > max) max = marker_margin_b_img[i].val; + // if(marker_margin_b_img[i].val < min) min = marker_margin_b_img[i].val; + ros_marker_points_img.push_back(PointDouble(x, y)); + } + max /= marker_margin_w_img.size(); + min /= marker_margin_b_img.size(); + + // Threshold the marker content + cv::threshold(marker_content, marker_content, (max + min) / 2.0, 255, + cv::THRESH_BINARY); + + // Count erroneous margin nodes + int erroneous = 0; + int total = 0; + for (size_t i = 0; i < marker_margin_w_img.size(); i++) + { + if (marker_margin_w_img[i].val < (max + min) / 2.0) + erroneous++; + total++; + } + for (size_t i = 0; i < marker_margin_b_img.size(); i++) + { + if (marker_margin_b_img[i].val > (max + min) / 2.0) + erroneous++; + total++; + } + margin_error = (double)erroneous / total; + track_error; #ifdef VISUALIZE_MARKER_POINTS - // Now we fill also this temporary debug table for visualizing marker code reading - // TODO: this whole vector is only for debug purposes - marker_allpoints_img.clear(); - for (size_t i=0; i (max+min)/2.0) p.val=255; // error - else p.val=0; // ok - marker_allpoints_img.push_back(p); - } - for (size_t i=0; i (max + min) / 2.0) + p.val = 255; // error + else + p.val = 0; // ok + marker_allpoints_img.push_back(p); + } + for (size_t i = 0; i < marker_points_img.size(); i++) + { + PointDouble p = marker_points_img[i]; + p.val = 128; // Unknown? + marker_allpoints_img.push_back(p); + } #endif - return true; + return true; } -void Marker::UpdatePose(vector &_marker_corners_img, Camera *cam, int orientation, int frame_no /* =0 */, bool update_pose /* =true */) { - marker_corners_img.resize(_marker_corners_img.size()); - copy(_marker_corners_img.begin(), _marker_corners_img.end(), marker_corners_img.begin()); - - // Calculate exterior orientation - if(orientation > 0) - std::rotate(marker_corners_img.begin(), marker_corners_img.begin() + orientation, marker_corners_img.end()); - - if (update_pose) cam->CalcExteriorOrientation(marker_corners, marker_corners_img, &pose); +void Marker::UpdatePose(vector& _marker_corners_img, Camera* cam, + int orientation, int frame_no /* =0 */, + bool update_pose /* =true */) +{ + marker_corners_img = _marker_corners_img; + + // Calculate exterior orientation + if (orientation > 0) + { + std::rotate(marker_corners_img.begin(), + marker_corners_img.begin() + orientation, + marker_corners_img.end()); + } + + if (update_pose) + { + cam->CalcExteriorOrientation(marker_corners, marker_corners_img, &pose); + } } -bool Marker::DecodeContent(int *orientation) { - *orientation = 0; - decode_error = 0; - return true; +bool Marker::DecodeContent(int* orientation) +{ + *orientation = 0; + decode_error = 0; + return true; } -void Marker::SaveMarkerImage(const char *filename, int save_res) const { - double scale; - if (save_res == 0) { - // TODO: More intelligent deduction of a minimum save_res - save_res = int((res+margin+margin)*12); - } - scale = double(save_res)/double(res+margin+margin); - - IplImage *img = cvCreateImage(cvSize(save_res, save_res), IPL_DEPTH_8U, 1); - IplImage *img_content = cvCreateImage(cvSize(int(res*scale+0.5), int(res*scale+0.5)), IPL_DEPTH_8U, 1); - cvZero(img); - CvMat submat; - cvGetSubRect(img, &submat, cvRect(int(margin*scale), int(margin*scale), int(res*scale), int(res*scale))); - cvResize(marker_content, img_content, CV_INTER_NN); - cvCopy(img_content, &submat); - cvSaveImage(filename, img); - cvReleaseImage(&img_content); - cvReleaseImage(&img); +void Marker::SaveMarkerImage(const char* filename, int save_res) const +{ + double scale; + if (save_res == 0) + { + // TODO: More intelligent deduction of a minimum save_res + save_res = int((res + margin + margin) * 12); + } + scale = double(save_res) / double(res + margin + margin); + + cv::Mat img = cv::Mat::zeros(save_res, save_res, CV_8UC1); + cv::Mat img_content = cv::Mat( + cv::Size(int(res * scale + 0.5), int(res * scale + 0.5)), CV_8UC1); + cv::Mat submat = img(cv::Rect(int(margin * scale), int(margin * scale), + int(res * scale), int(res * scale))); + cv::resize(marker_content, img_content, img_content.size(), 0, 0, + cv::INTER_NEAREST); + img_content.copyTo(submat); + cv::imwrite(filename, img); + img_content.release(); + img.release(); } -void Marker::ScaleMarkerToImage(IplImage *image) const { - const int multiplier=96; - IplImage *img = cvCreateImage(cvSize(int(multiplier*(res+margin+margin)+0.5), int(multiplier*(res+margin+margin)+0.5)), IPL_DEPTH_8U, 1); - IplImage *img_content = cvCreateImage(cvSize(int(multiplier*res+0.5), int(multiplier*res+0.5)), IPL_DEPTH_8U, 1); - cvZero(img); - CvMat submat; - cvGetSubRect(img, &submat, cvRect(int(multiplier*margin+0.5), int(multiplier*margin+0.5), int(multiplier*res+0.5), int(multiplier*res+0.5))); - cvResize(marker_content, img_content, CV_INTER_NN); - cvCopy(img_content, &submat); - cvResize(img, image, CV_INTER_NN); - cvReleaseImage(&img_content); - cvReleaseImage(&img); +void Marker::ScaleMarkerToImage(cv::Mat& image) const +{ + const int multiplier = 96; + cv::Mat img = + cv::Mat::zeros(cv::Size(int(multiplier * (res + margin + margin) + 0.5), + int(multiplier * (res + margin + margin) + 0.5)), + CV_8UC1); + cv::Mat img_content = cv::Mat( + cv::Size(int(multiplier * res + 0.5), int(multiplier * res + 0.5)), + CV_8UC1); + cv::Mat submat = img( + cv::Rect(int(multiplier * margin + 0.5), int(multiplier * margin + 0.5), + int(multiplier * res + 0.5), int(multiplier * res + 0.5))); + cv::resize(marker_content, img_content, img_content.size(), 0, 0, + cv::INTER_NEAREST); + img_content.copyTo(submat); + cv::resize(img, image, image.size(), 0, 0, cv::INTER_NEAREST); + img_content.release(); + img.release(); } -void Marker::SetMarkerSize(double _edge_length, int _res, double _margin) { - // TODO: Is this right place for default marker size? - edge_length = (_edge_length?_edge_length:1); - res = _res; //(_res?_res:10); - margin = (_margin?_margin:1); - double x_min = -0.5*edge_length; - double y_min = -0.5*edge_length; - double x_max = 0.5*edge_length; - double y_max = 0.5*edge_length; - double cx_min = (x_min * res)/(res + margin + margin); - double cy_min = (y_min * res)/(res + margin + margin); - double cx_max = (x_max * res)/(res + margin + margin); - double cy_max = (y_max * res)/(res + margin + margin); - double step = edge_length / (res + margin + margin); - - // marker_corners - marker_corners_img.resize(4); - - // Same order as the detected corners - marker_corners.clear(); - marker_corners.push_back(PointDouble(x_min, y_min)); - marker_corners.push_back(PointDouble(x_max, y_min)); - marker_corners.push_back(PointDouble(x_max, y_max)); - marker_corners.push_back(PointDouble(x_min, y_max)); - - // Rest can be done only if we have existing resolution - if (res <= 0) return; - - // marker_points - marker_points.clear(); - for(int j = 0; j < res; ++j) { - for(int i = 0; i < res; ++i) { - PointDouble pt; - pt.y = cy_max - (step*j) - (step/2); - pt.x = cx_min + (step*i) + (step/2); - marker_points.push_back(pt); - } - } - - // Samples to be used in margins - // TODO: Now this works only if the "margin" is without decimals - // TODO: This should be made a lot cleaner - marker_margin_w.clear(); - marker_margin_b.clear(); - for(int j = -1; j<=margin-1; j++) { - PointDouble pt; - // Sides - for (int i=0; i x_max) || (pt.y > y_max)) - { - marker_margin_w.push_back(pt); - } - else - if ((pt.x < cx_min) || (pt.y < cy_min) || - (pt.x > cx_max) || (pt.y > cy_max)) - { - marker_margin_b.push_back(pt); - } - } - } - /* - //double step = edge_length / (res + margin + margin); - for(int j = 0; j < res+margin+margin+2; ++j) { - for(int i = 0; i < res+margin+margin+2; ++i) { - PointDouble pt; - pt.y = y_min - step/2 + step*j; - pt.x = x_min - step/2 + step*i; - if ((pt.x < x_min) || (pt.y < y_min) || - (pt.x > x_max) || (pt.y > y_max)) - { - marker_margin_w.push_back(pt); - } - else - if ((pt.x < cx_min) || (pt.y < cy_min) || - (pt.x > cx_max) || (pt.y > cy_max)) - { - marker_margin_b.push_back(pt); - } - } - } - */ - /* - marker_margin_w.clear(); - marker_margin_b.clear(); - for (double y=y_min-(step/2); y x_max) || (y > y_max)) - { - marker_margin_w.push_back(pt); - } - else - if ((x < cx_min) || (y < cy_min) || - (x > cx_max) || (y > cy_max)) - { - marker_margin_b.push_back(pt); - } - } - } - */ - /* - marker_points.clear(); - marker_margin_w.clear(); - marker_margin_b.clear(); - for(int j = 0; j < res+margin+margin+2; ++j) { - for(int i = 0; i < res+margin+margin+2; ++i) { - PointDouble pt; - } - } - */ - - // marker content - if (marker_content) cvReleaseMat(&marker_content); - marker_content = cvCreateMat(res, res, CV_8U); - cvSet(marker_content, cvScalar(255)); +void Marker::SetMarkerSize(double _edge_length, int _res, double _margin) +{ + // TODO: Is this right place for default marker size? + edge_length = (_edge_length ? _edge_length : 1); + res = _res; //(_res?_res:10); + margin = (_margin ? _margin : 1); + double x_min = -0.5 * edge_length; + double y_min = -0.5 * edge_length; + double x_max = 0.5 * edge_length; + double y_max = 0.5 * edge_length; + double cx_min = (x_min * res) / (res + margin + margin); + double cy_min = (y_min * res) / (res + margin + margin); + double cx_max = (x_max * res) / (res + margin + margin); + double cy_max = (y_max * res) / (res + margin + margin); + double step = edge_length / (res + margin + margin); + + // marker_corners + marker_corners_img.resize(4); + + // Same order as the detected corners + marker_corners.clear(); + marker_corners.push_back(PointDouble(x_min, y_min)); + marker_corners.push_back(PointDouble(x_max, y_min)); + marker_corners.push_back(PointDouble(x_max, y_max)); + marker_corners.push_back(PointDouble(x_min, y_max)); + + // Rest can be done only if we have existing resolution + if (res <= 0) + { + return; + } + + // marker_points + marker_points.clear(); + for (int j = 0; j < res; ++j) + { + for (int i = 0; i < res; ++i) + { + PointDouble pt; + pt.y = cy_max - (step * j) - (step / 2); + pt.x = cx_min + (step * i) + (step / 2); + marker_points.push_back(pt); + } + } + + // Samples to be used in margins + // TODO: Now this works only if the "margin" is without decimals + // TODO: This should be made a lot cleaner + marker_margin_w.clear(); + marker_margin_b.clear(); + for (int j = -1; j <= margin - 1; j++) + { + PointDouble pt; + // Sides + for (int i = 0; i < res; i++) + { + pt.x = cx_min + step * i + step / 2; + pt.y = y_min + step * j + step / 2; + if (j < 0) + marker_margin_w.push_back(pt); + else + marker_margin_b.push_back(pt); + pt.y = y_max - step * j - step / 2; + if (j < 0) + marker_margin_w.push_back(pt); + else + marker_margin_b.push_back(pt); + pt.y = cy_min + step * i + step / 2; + pt.x = x_min + step * j + step / 2; + if (j < 0) + marker_margin_w.push_back(pt); + else + marker_margin_b.push_back(pt); + pt.x = x_max - step * j - step / 2; + if (j < 0) + marker_margin_w.push_back(pt); + else + marker_margin_b.push_back(pt); + } + // Corners + for (int i = -1; i <= margin - 1; i++) + { + pt.x = x_min + step * i + step / 2; + pt.y = y_min + step * j + step / 2; + if ((j < 0) || (i < 0)) + marker_margin_w.push_back(pt); + else + marker_margin_b.push_back(pt); + pt.x = x_min + step * i + step / 2; + pt.y = y_max - step * j - step / 2; + if ((j < 0) || (i < 0)) + marker_margin_w.push_back(pt); + else + marker_margin_b.push_back(pt); + pt.x = x_max - step * i - step / 2; + pt.y = y_max - step * j - step / 2; + if ((j < 0) || (i < 0)) + marker_margin_w.push_back(pt); + else + marker_margin_b.push_back(pt); + pt.x = x_max - step * i - step / 2; + pt.y = y_min + step * j + step / 2; + if ((j < 0) || (i < 0)) + marker_margin_w.push_back(pt); + else + marker_margin_b.push_back(pt); + } + } + /* + for(int j = -margin-1; j < res+margin+margin+2; ++j) { + for(int i = 0; i < res+margin+margin+2; ++i) { + PointDouble pt; + pt.y = y_min - step/2 + step*j; + pt.x = x_min - step/2 + step*i; + if ((pt.x < x_min) || (pt.y < y_min) || + (pt.x > x_max) || (pt.y > y_max)) + { + marker_margin_w.push_back(pt); + } + else + if ((pt.x < cx_min) || (pt.y < cy_min) || + (pt.x > cx_max) || (pt.y > cy_max)) + { + marker_margin_b.push_back(pt); + } + } + } + /* + //double step = edge_length / (res + margin + margin); + for(int j = 0; j < res+margin+margin+2; ++j) { + for(int i = 0; i < res+margin+margin+2; ++i) { + PointDouble pt; + pt.y = y_min - step/2 + step*j; + pt.x = x_min - step/2 + step*i; + if ((pt.x < x_min) || (pt.y < y_min) || + (pt.x > x_max) || (pt.y > y_max)) + { + marker_margin_w.push_back(pt); + } + else + if ((pt.x < cx_min) || (pt.y < cy_min) || + (pt.x > cx_max) || (pt.y > cy_max)) + { + marker_margin_b.push_back(pt); + } + } + } + */ + /* + marker_margin_w.clear(); + marker_margin_b.clear(); + for (double y=y_min-(step/2); y x_max) || (y > y_max)) + { + marker_margin_w.push_back(pt); + } + else + if ((x < cx_min) || (y < cy_min) || + (x > cx_max) || (y > cy_max)) + { + marker_margin_b.push_back(pt); + } + } + } + */ + /* + marker_points.clear(); + marker_margin_w.clear(); + marker_margin_b.clear(); + for(int j = 0; j < res+margin+margin+2; ++j) { + for(int i = 0; i < res+margin+margin+2; ++i) { + PointDouble pt; + } + } + */ + + // marker content + if (!marker_content.empty()) + marker_content.release(); + marker_content = cv::Mat(res, res, CV_8U, cv::Scalar(255)); } -Marker::~Marker() { - if (marker_content) cvReleaseMat(&marker_content); +Marker::~Marker() +{ + if (!marker_content.empty()) + marker_content.release(); } Marker::Marker(double _edge_length, int _res, double _margin) { - marker_content = NULL; - margin_error = 0; - decode_error = 0; - track_error = 0; - SetMarkerSize(_edge_length, _res, _margin); - ros_orientation = -1; - ros_corners_3D.resize(4); - valid=false; + margin_error = 0; + decode_error = 0; + track_error = 0; + SetMarkerSize(_edge_length, _res, _margin); + ros_orientation = -1; + ros_corners_3D.resize(4); + valid = false; } -Marker::Marker(const Marker& m) { - marker_content = NULL; - SetMarkerSize(m.edge_length, m.res, m.margin); - - pose = m.pose; - margin_error = m.margin_error; - decode_error = m.decode_error; - track_error = m.track_error; - cvCopy(m.marker_content, marker_content); - ros_orientation = m.ros_orientation; - - ros_marker_points_img.resize(m.ros_marker_points_img.size()); - copy(m.ros_marker_points_img.begin(), m.ros_marker_points_img.end(), ros_marker_points_img.begin()); - marker_corners.resize(m.marker_corners.size()); - copy(m.marker_corners.begin(), m.marker_corners.end(), marker_corners.begin()); - marker_points.resize(m.marker_points.size()); - copy(m.marker_points.begin(), m.marker_points.end(), marker_points.begin()); - marker_corners_img.resize(m.marker_corners_img.size()); - copy(m.marker_corners_img.begin(), m.marker_corners_img.end(), marker_corners_img.begin()); - ros_corners_3D.resize(m.ros_corners_3D.size()); - copy(m.ros_corners_3D.begin(), m.ros_corners_3D.end(), ros_corners_3D.begin()); - - valid = m.valid; +Marker::Marker(const Marker& m) +{ + SetMarkerSize(m.edge_length, m.res, m.margin); + + pose = m.pose; + margin_error = m.margin_error; + decode_error = m.decode_error; + track_error = m.track_error; + m.marker_content.copyTo(marker_content); + ros_orientation = m.ros_orientation; + + ros_marker_points_img.resize(m.ros_marker_points_img.size()); + copy(m.ros_marker_points_img.begin(), m.ros_marker_points_img.end(), + ros_marker_points_img.begin()); + marker_corners.resize(m.marker_corners.size()); + copy(m.marker_corners.begin(), m.marker_corners.end(), + marker_corners.begin()); + marker_points.resize(m.marker_points.size()); + copy(m.marker_points.begin(), m.marker_points.end(), marker_points.begin()); + marker_corners_img.resize(m.marker_corners_img.size()); + copy(m.marker_corners_img.begin(), m.marker_corners_img.end(), + marker_corners_img.begin()); + ros_corners_3D.resize(m.ros_corners_3D.size()); + copy(m.ros_corners_3D.begin(), m.ros_corners_3D.end(), + ros_corners_3D.begin()); + + valid = m.valid; #ifdef VISUALIZE_MARKER_POINTS - marker_allpoints_img.resize(m.marker_allpoints_img.size()); - copy(m.marker_allpoints_img.begin(), m.marker_allpoints_img.end(), marker_allpoints_img.begin()); + marker_allpoints_img.resize(m.marker_allpoints_img.size()); + copy(m.marker_allpoints_img.begin(), m.marker_allpoints_img.end(), + marker_allpoints_img.begin()); #endif } -bool MarkerArtoolkit::DecodeContent(int *orientation) { - int a = (int)cvGetReal2D(marker_content, 0, 0); - int b = (int)cvGetReal2D(marker_content, res-1, 0); - int c = (int)cvGetReal2D(marker_content, res-1, res-1); - int d = (int)cvGetReal2D(marker_content, 0, res-1); - if (!a && !b && c) *orientation = 0; - else if (!b && !c && d) *orientation = 1; - else if (!c && !d && a) *orientation = 2; - else if (!d && !a && b) *orientation = 3; - else return false; - - Bitset bs; - bs.clear(); - for (int j = 0; j < res; j++) { - for (int i = 0; i < res ; i++) { - if (*orientation == 0) { - if ((j == 0) && (i == 0)) continue; - if ((j == res-1) && (i == 0)) continue; - if ((j == res-1) && (i == res-1)) continue; - if (cvGetReal2D(marker_content, j, i)) bs.push_back(false); - else bs.push_back(true); - } - else if (*orientation == 1) { - if (((res-i-1) == res-1) && (j == 0)) continue; - if (((res-i-1) == res-1) && (j == res-1)) continue; - if (((res-i-1) == 0) && (j == res-1)) continue; - if (cvGetReal2D(marker_content, res-i-1, j)) bs.push_back(false); - else bs.push_back(true); - } - else if (*orientation == 2) { - if (((res-j-1) == res-1) && ((res-i-1) == res-1)) continue; - if (((res-j-1) == 0) && ((res-i-1) == res-1)) continue; - if (((res-j-1) == 0) && ((res-i-1) == 0)) continue; - if (cvGetReal2D(marker_content, res-j-1, res-i-1)) bs.push_back(false); - else bs.push_back(true); - } - else if (*orientation == 3) { - if ((i == 0) && ((res-j-1) == res-1)) continue; - if ((i == 0) && ((res-j-1) == 0)) continue; - if ((i == res-1) && ((res-j-1) == 0)) continue; - if (cvGetReal2D(marker_content, i, res-j-1)) bs.push_back(false); - else bs.push_back(true); - } - } - } - id = bs.ulong(); - return true; +bool MarkerArtoolkit::DecodeContent(int* orientation) +{ + int a = (int)marker_content.at(0, 0); + int b = (int)marker_content.at(res - 1, 0); + int c = (int)marker_content.at(res - 1, res - 1); + int d = (int)marker_content.at(0, res - 1); + if (!a && !b && c) + *orientation = 0; + else if (!b && !c && d) + *orientation = 1; + else if (!c && !d && a) + *orientation = 2; + else if (!d && !a && b) + *orientation = 3; + else + return false; + + Bitset bs; + bs.clear(); + for (int j = 0; j < res; j++) + { + for (int i = 0; i < res; i++) + { + if (*orientation == 0) + { + if ((j == 0) && (i == 0)) + continue; + if ((j == res - 1) && (i == 0)) + continue; + if ((j == res - 1) && (i == res - 1)) + continue; + if (marker_content.at(j, i)) + bs.push_back(false); + else + bs.push_back(true); + } + else if (*orientation == 1) + { + if (((res - i - 1) == res - 1) && (j == 0)) + continue; + if (((res - i - 1) == res - 1) && (j == res - 1)) + continue; + if (((res - i - 1) == 0) && (j == res - 1)) + continue; + if (marker_content.at(res - i - 1, j)) + bs.push_back(false); + else + bs.push_back(true); + } + else if (*orientation == 2) + { + if (((res - j - 1) == res - 1) && ((res - i - 1) == res - 1)) + continue; + if (((res - j - 1) == 0) && ((res - i - 1) == res - 1)) + continue; + if (((res - j - 1) == 0) && ((res - i - 1) == 0)) + continue; + if (marker_content.at(res - j - 1, res - i - 1)) + bs.push_back(false); + else + bs.push_back(true); + } + else if (*orientation == 3) + { + if ((i == 0) && ((res - j - 1) == res - 1)) + continue; + if ((i == 0) && ((res - j - 1) == 0)) + continue; + if ((i == res - 1) && ((res - j - 1) == 0)) + continue; + if (marker_content.at(i, res - j - 1)) + bs.push_back(false); + else + bs.push_back(true); + } + } + } + id = bs.ulong(); + return true; } -void MarkerArtoolkit::SetContent(unsigned long _id) { - // Fill in the content values - margin_error = 0; - decode_error = 0; - id = _id; - Bitset bs; - bs.push_back_meaningful(_id); - for (int j = res-1; j >= 0; j--) { - for (int i = res-1; i >= 0 ; i--) { - if ((j == 0) && (i == 0)) - cvSetReal2D(marker_content, j, i, 0); - else if ((j == res-1) && (i == 0)) - cvSetReal2D(marker_content, j, i, 0); - else if ((j == res-1) && (i == res-1)) - cvSetReal2D(marker_content, j, i, 255); - else { - if (bs.Length() && bs.pop_back()) - cvSetReal2D(marker_content, j, i, 0); - else - cvSetReal2D(marker_content, j, i, 255); - } - } - } +void MarkerArtoolkit::SetContent(unsigned long _id) +{ + // Fill in the content values + margin_error = 0; + decode_error = 0; + id = _id; + Bitset bs; + bs.push_back_meaningful(_id); + for (int j = res - 1; j >= 0; j--) + { + for (int i = res - 1; i >= 0; i--) + { + if ((j == 0) && (i == 0)) + marker_content.at(j, i) = 0; + else if ((j == res - 1) && (i == 0)) + marker_content.at(j, i) = 0; + else if ((j == res - 1) && (i == res - 1)) + marker_content.at(j, i) = 255; + else + { + if (bs.Length() && bs.pop_back()) + marker_content.at(j, i) = 0; + else + marker_content.at(j, i) = 255; + } + } + } } -void MarkerData::DecodeOrientation(int *error, int *total, int *orientation) { - int i,j; - vector errors(4); - int color = 255; - - // Resolution identification - j = res/2; - for (i=0; i (res/2)) { - (*total)++; - if ((int)cvGetReal2D(marker_content, j, i) != 255) errors[0]++; - if ((int)cvGetReal2D(marker_content, i, j) != 255) errors[1]++; - if ((int)cvGetReal2D(marker_content, j, i) != 0) errors[2]++; - if ((int)cvGetReal2D(marker_content, i, j) != 0) errors[3]++; - } - } - *orientation = min_element(errors.begin(), errors.end()) - errors.begin(); - *error = int(errors[*orientation]); - //*orientation = 0; // ttehop +void MarkerData::DecodeOrientation(int* error, int* total, int* orientation) +{ + int i, j; + vector errors(4); + int color = 255; + + // Resolution identification + j = res / 2; + for (i = 0; i < res; i++) + { + (*total)++; + if ((int)marker_content.at(j, i) != color) + errors[0]++; + if ((int)marker_content.at(i, j) != color) + errors[1]++; + color = (color ? 0 : 255); + } + errors[2] = errors[0]; + errors[3] = errors[1]; + + // Orientation identification + i = res / 2; + for (j = (res / 2) - 2; j <= (res / 2) + 2; j++) + { + if (j < (res / 2)) + { + (*total)++; + if ((int)marker_content.at(j, i) != 0) + errors[0]++; + if ((int)marker_content.at(i, j) != 0) + errors[1]++; + if ((int)marker_content.at(j, i) != 255) + errors[2]++; + if ((int)marker_content.at(i, j) != 255) + errors[3]++; + } + else if (j > (res / 2)) + { + (*total)++; + if ((int)marker_content.at(j, i) != 255) + errors[0]++; + if ((int)marker_content.at(i, j) != 255) + errors[1]++; + if ((int)marker_content.at(j, i) != 0) + errors[2]++; + if ((int)marker_content.at(i, j) != 0) + errors[3]++; + } + } + *orientation = min_element(errors.begin(), errors.end()) - errors.begin(); + *error = int(errors[*orientation]); + //*orientation = 0; // ttehop } -bool MarkerData::DetectResolution(vector &_marker_corners_img, IplImage *gray, Camera *cam) { - vector marker_corners_img_undist; - marker_corners_img_undist.resize(_marker_corners_img.size()); - copy(_marker_corners_img.begin(), _marker_corners_img.end(), marker_corners_img_undist.begin()); - - // line_points - std::vector line_points; - PointDouble pt; - line_points.clear(); - pt.x=0; pt.y=0; line_points.push_back(pt); - pt.x=-0.5*edge_length; pt.y=0; line_points.push_back(pt); - pt.x=+0.5*edge_length; pt.y=0; line_points.push_back(pt); - pt.x=0; pt.y=-0.5*edge_length; line_points.push_back(pt); - pt.x=0; pt.y=+0.5*edge_length; line_points.push_back(pt); - - // Figure out the marker point position in the image - // TODO: Note that line iterator cannot iterate outside image - // therefore we need to distort the endpoints and iterate straight lines. - // Right way would be to iterate undistorted lines and distort line points. - Homography H; - vector line_points_img(line_points.size()); - line_points_img.resize(line_points.size()); - cam->Undistort(marker_corners_img_undist); - H.Find(marker_corners, marker_corners_img_undist); - H.ProjectPoints(line_points, line_points_img); - cam->Distort(line_points_img); - - // Now we have undistorted line end points - // Find lines and then distort the whole line - int white_count[4] = {0}; // white counts for lines 1->0, 2->0, 3->0, 4->0 - CvPoint pt1, pt2; - pt2.x = int(line_points_img[0].x); - pt2.y = int(line_points_img[0].y); - if ((pt2.x < 0) || (pt2.y < 0) || - (pt2.x >= gray->width) || (pt2.y >= gray->height)) - { - return false; - } - bool white=true; - for (int i=0; i<4; i++) { - CvLineIterator iterator; - pt1.x = int(line_points_img[i+1].x); - pt1.y = int(line_points_img[i+1].y); - if ((pt1.x < 0) || (pt1.y < 0) || - (pt1.x >= gray->width) || (pt1.y >= gray->height)) - { - return false; - } - int count = cvInitLineIterator(gray, pt1, pt2, &iterator, 8, 0); - std::vector vals; - for(int ii = 0; ii < count; ii++ ){ - CV_NEXT_LINE_POINT(iterator); - vals.push_back(*(iterator.ptr)); - } - uchar vmin = *(std::min_element(vals.begin(), vals.end())); - uchar vmax = *(std::max_element(vals.begin(), vals.end())); - uchar thresh = (vmin+vmax)/2; - white=true; - int bc=0, wc=0, N=2; - for (size_t ii=0; ii= N)) { - white=false; - } - else if (!white && (wc >= N)) { - white=true; - white_count[i]++; - } - } - } - - if ((white_count[0]+white_count[1]) == (white_count[2]+white_count[3])) return false; - else if ((white_count[0]+white_count[1]) > (white_count[2]+white_count[3])) { - if (white_count[0] != white_count[1]) return false; - if (white_count[0] < 2) return false; - int nof_whites = white_count[0]*2-(white?1:0); // 'white' contains middle color - int new_res = 2*nof_whites-1; - SetMarkerSize(edge_length, new_res, margin); - } - else { - if (white_count[2] != white_count[3]) return false; - if (white_count[2] < 2) return false; - if (((white_count[2]%2) == 0) != white) return false; - int nof_whites = white_count[2]*2-(white?1:0); - int new_res = 2*nof_whites-1; - SetMarkerSize(edge_length, new_res, margin); - } - return true; +bool MarkerData::DetectResolution(vector& _marker_corners_img, + cv::Mat& gray, Camera* cam) +{ + vector marker_corners_img_undist; + marker_corners_img_undist.resize(_marker_corners_img.size()); + copy(_marker_corners_img.begin(), _marker_corners_img.end(), + marker_corners_img_undist.begin()); + + // line_points + std::vector line_points; + PointDouble pt; + line_points.clear(); + pt.x = 0; + pt.y = 0; + line_points.push_back(pt); + pt.x = -0.5 * edge_length; + pt.y = 0; + line_points.push_back(pt); + pt.x = +0.5 * edge_length; + pt.y = 0; + line_points.push_back(pt); + pt.x = 0; + pt.y = -0.5 * edge_length; + line_points.push_back(pt); + pt.x = 0; + pt.y = +0.5 * edge_length; + line_points.push_back(pt); + + // Figure out the marker point position in the image + // TODO: Note that line iterator cannot iterate outside image + // therefore we need to distort the endpoints and iterate straight + // lines. Right way would be to iterate undistorted lines and distort + // line points. + Homography H; + vector line_points_img(line_points.size()); + line_points_img.resize(line_points.size()); + cam->Undistort(marker_corners_img_undist); + H.Find(marker_corners, marker_corners_img_undist); + H.ProjectPoints(line_points, line_points_img); + cam->Distort(line_points_img); + + // Now we have undistorted line end points + // Find lines and then distort the whole line + int white_count[4] = { 0 }; // white counts for lines 1->0, 2->0, 3->0, 4->0 + cv::Point pt1, pt2; + pt2.x = int(line_points_img[0].x); + pt2.y = int(line_points_img[0].y); + if ((pt2.x < 0) || (pt2.y < 0) || (pt2.x >= gray.cols) || + (pt2.y >= gray.rows)) + { + return false; + } + bool white = true; + for (int i = 0; i < 4; i++) + { + pt1.x = int(line_points_img[i + 1].x); + pt1.y = int(line_points_img[i + 1].y); + if ((pt1.x < 0) || (pt1.y < 0) || (pt1.x >= gray.cols) || + (pt1.y >= gray.rows)) + { + return false; + } + cv::LineIterator iterator(gray, pt1, pt2, 8, false); + std::vector vals; + for (int ii = 0; ii < iterator.count; ii++, ++iterator) + { + vals.push_back(*(iterator.ptr)); + } + uchar vmin = *(std::min_element(vals.begin(), vals.end())); + uchar vmax = *(std::max_element(vals.begin(), vals.end())); + uchar thresh = (vmin + vmax) / 2; + white = true; + int bc = 0, wc = 0, N = 2; + for (size_t ii = 0; ii < vals.size(); ii++) + { + // change the color status if we had + // N subsequent pixels of the other color + if (vals[ii] < thresh) + { + bc++; + wc = 0; + } + else + { + wc++; + bc = 0; + } + + if (white && (bc >= N)) + { + white = false; + } + else if (!white && (wc >= N)) + { + white = true; + white_count[i]++; + } + } + } + + if ((white_count[0] + white_count[1]) == (white_count[2] + white_count[3])) + return false; + else if ((white_count[0] + white_count[1]) > + (white_count[2] + white_count[3])) + { + if (white_count[0] != white_count[1]) + return false; + if (white_count[0] < 2) + return false; + int nof_whites = + white_count[0] * 2 - (white ? 1 : 0); // 'white' contains middle color + int new_res = 2 * nof_whites - 1; + SetMarkerSize(edge_length, new_res, margin); + } + else + { + if (white_count[2] != white_count[3]) + return false; + if (white_count[2] < 2) + return false; + if (((white_count[2] % 2) == 0) != white) + return false; + int nof_whites = white_count[2] * 2 - (white ? 1 : 0); + int new_res = 2 * nof_whites - 1; + SetMarkerSize(edge_length, new_res, margin); + } + return true; } -bool MarkerData::UpdateContent(vector &_marker_corners_img, IplImage *gray, Camera *cam, int frame_no /*= 0*/) { - if (res == 0) { - if (!DetectResolution(_marker_corners_img, gray, cam)) return false; - } - return UpdateContentBasic(_marker_corners_img, gray, cam, frame_no); +bool MarkerData::UpdateContent(vector& _marker_corners_img, + cv::Mat& gray, Camera* cam, int frame_no /*= 0*/) +{ + if (res == 0) + { + if (!DetectResolution(_marker_corners_img, gray, cam)) + return false; + } + return UpdateContentBasic(_marker_corners_img, gray, cam, frame_no); } -int MarkerData::DecodeCode(int orientation, BitsetExt *bs, int *erroneous, int *total, - unsigned char* content_type) +int MarkerData::DecodeCode(int orientation, BitsetExt* bs, int* erroneous, + int* total, unsigned char* content_type) { - // TODO: The orientation isn't fully understood? - //for (int j = res-1; j >= 0; j--) { - for (int j = 0; j < res; j++) { - for (int i = 0; i < res ; i++) { - // TODO: Does this work ok for larger markers? - if ((orientation == 0) || (orientation == 2)) { - if (j == res/2) continue; - if ((i == res/2) && (j >= (res/2)-2) && (j <= (res/2)+2)) continue; - } else { - if (i == res/2) continue; - if ((j == res/2) && (i >= (res/2)-2) && (i <= (res/2)+2)) continue; - } - int color = 0; - if (orientation == 0) color = (int)cvGetReal2D(marker_content, j, i); - else if (orientation == 1) color = (int)cvGetReal2D(marker_content, res-i-1, j); - else if (orientation == 2) color = (int)cvGetReal2D(marker_content, res-j-1, res-i-1); - else if (orientation == 3) color = (int)cvGetReal2D(marker_content, i, res-j-1); - if (color) bs->push_back(false); - else bs->push_back(true); - (*total)++; - } - } - - unsigned char flags = 0; - int errors = 0; - - // if we have larger than 16-bit code, then we have a header; 16-bit code has a - // hamming(8,4) coded number - if(bs->Length() > 16){ - // read header (8-bit hamming(8,4) -> 4-bit flags) - BitsetExt header; - - for(int i = 0; i < HEADER_SIZE; i++) - header.push_back(bs->pop_front()); - - errors = header.hamming_dec(8); - if(errors == -1){ - //OutputDebugString("header decoding failed!!!!!\n"); - return errors; - } - - flags = header.uchar(); - } - else - flags &= MARKER_CONTENT_TYPE_NUMBER; - - // check which hamming we are using - //bs->Output(cout); cout<hamming_dec(16); - else errors = bs->hamming_dec(8); - *content_type = flags & 0x7; - - if (errors > 0) (*erroneous) += errors; - return errors; + // TODO: The orientation isn't fully understood? + // for (int j = res-1; j >= 0; j--) { + for (int j = 0; j < res; j++) + { + for (int i = 0; i < res; i++) + { + // TODO: Does this work ok for larger markers? + if ((orientation == 0) || (orientation == 2)) + { + if (j == res / 2) + continue; + if ((i == res / 2) && (j >= (res / 2) - 2) && (j <= (res / 2) + 2)) + continue; + } + else + { + if (i == res / 2) + continue; + if ((j == res / 2) && (i >= (res / 2) - 2) && (i <= (res / 2) + 2)) + continue; + } + int color = 0; + if (orientation == 0) + color = (int)marker_content.at(j, i); + else if (orientation == 1) + color = (int)marker_content.at(res - i - 1, j); + else if (orientation == 2) + color = (int)marker_content.at(res - j - 1, res - i - 1); + else if (orientation == 3) + color = (int)marker_content.at(i, res - j - 1); + if (color) + bs->push_back(false); + else + bs->push_back(true); + (*total)++; + } + } + + unsigned char flags = 0; + int errors = 0; + + // if we have larger than 16-bit code, then we have a header; 16-bit code has + // a hamming(8,4) coded number + if (bs->Length() > 16) + { + // read header (8-bit hamming(8,4) -> 4-bit flags) + BitsetExt header; + + for (int i = 0; i < HEADER_SIZE; i++) + header.push_back(bs->pop_front()); + + errors = header.hamming_dec(8); + if (errors == -1) + { + // OutputDebugString("header decoding failed!!!!!\n"); + return errors; + } + + flags = header.uchar(); + } + else + flags &= MARKER_CONTENT_TYPE_NUMBER; + + // check which hamming we are using + // bs->Output(cout); cout<hamming_dec(16); + else + errors = bs->hamming_dec(8); + *content_type = flags & 0x7; + + if (errors > 0) + (*erroneous) += errors; + return errors; } -void MarkerData::Read6bitStr(BitsetExt *bs, char *s, size_t s_max_len) { - deque bits = bs->GetBits(); - deque::const_iterator iter; - size_t len = 0; - int bitpos = 5; - unsigned long c=0; - for (iter = bits.begin(); iter != bits.end(); iter++) { - if (*iter) c |= (0x01 << bitpos); - bitpos--; - if (bitpos < 0) { - if (c == 000) s[len] = ':'; - else if ((c >= 001) && (c <= 032)) s[len] = 'a' + (char)c - 1; - else if ((c >= 033) && (c <= 044)) s[len] = '0' + (char)c - 1; - else if (c == 045) s[len] = '+'; - else if (c == 046) s[len] = '-'; - else if (c == 047) s[len] = '*'; - else if (c == 050) s[len] = '/'; - else if (c == 051) s[len] = '('; - else if (c == 052) s[len] = ')'; - else if (c == 053) s[len] = '$'; - else if (c == 054) s[len] = '='; - else if (c == 055) s[len] = ' '; - else if (c == 056) s[len] = ','; - else if (c == 057) s[len] = '.'; - else if (c == 060) s[len] = '#'; - else if (c == 061) s[len] = '['; - else if (c == 062) s[len] = ']'; - else if (c == 063) s[len] = '%'; - else if (c == 064) s[len] = '\"'; - else if (c == 065) s[len] = '_'; - else if (c == 066) s[len] = '!'; - else if (c == 067) s[len] = '&'; - else if (c == 070) s[len] = '\''; - else if (c == 071) s[len] = '?'; - else if (c == 072) s[len] = '<'; - else if (c == 073) s[len] = '>'; - else if (c == 074) s[len] = '@'; - else if (c == 075) s[len] = '\\'; - else if (c == 076) s[len] = '^'; - else if (c == 077) s[len] = ';'; - else s[len] = '?'; - len++; - if (len >= (s_max_len-1)) break; - bitpos=5; c=0; - } - } - s[len] = 0; +void MarkerData::Read6bitStr(BitsetExt* bs, char* s, size_t s_max_len) +{ + deque bits = bs->GetBits(); + deque::const_iterator iter; + size_t len = 0; + int bitpos = 5; + unsigned long c = 0; + for (iter = bits.begin(); iter != bits.end(); iter++) + { + if (*iter) + c |= (0x01 << bitpos); + bitpos--; + if (bitpos < 0) + { + if (c == 000) + s[len] = ':'; + else if ((c >= 001) && (c <= 032)) + s[len] = 'a' + (char)c - 1; + else if ((c >= 033) && (c <= 044)) + s[len] = '0' + (char)c - 1; + else if (c == 045) + s[len] = '+'; + else if (c == 046) + s[len] = '-'; + else if (c == 047) + s[len] = '*'; + else if (c == 050) + s[len] = '/'; + else if (c == 051) + s[len] = '('; + else if (c == 052) + s[len] = ')'; + else if (c == 053) + s[len] = '$'; + else if (c == 054) + s[len] = '='; + else if (c == 055) + s[len] = ' '; + else if (c == 056) + s[len] = ','; + else if (c == 057) + s[len] = '.'; + else if (c == 060) + s[len] = '#'; + else if (c == 061) + s[len] = '['; + else if (c == 062) + s[len] = ']'; + else if (c == 063) + s[len] = '%'; + else if (c == 064) + s[len] = '\"'; + else if (c == 065) + s[len] = '_'; + else if (c == 066) + s[len] = '!'; + else if (c == 067) + s[len] = '&'; + else if (c == 070) + s[len] = '\''; + else if (c == 071) + s[len] = '?'; + else if (c == 072) + s[len] = '<'; + else if (c == 073) + s[len] = '>'; + else if (c == 074) + s[len] = '@'; + else if (c == 075) + s[len] = '\\'; + else if (c == 076) + s[len] = '^'; + else if (c == 077) + s[len] = ';'; + else + s[len] = '?'; + len++; + if (len >= (s_max_len - 1)) + break; + bitpos = 5; + c = 0; + } + } + s[len] = 0; } -bool MarkerData::DecodeContent(int *orientation) { - //bool decode(vector& colors, int *orientation, double *error) { - *orientation = 0; - - BitsetExt bs; - int erroneous=0; - int total=0; - - DecodeOrientation(&erroneous, &total, orientation); - int err = DecodeCode(*orientation, &bs, &erroneous, &total, &content_type); - if(err == -1) { - // couldn't fix - decode_error = DBL_MAX; - return false; - } - - if(content_type == MARKER_CONTENT_TYPE_NUMBER){ - data.id = bs.ulong(); - } - else { - Read6bitStr(&bs, data.str, MAX_MARKER_STRING_LEN); - } - - decode_error = (double)(erroneous)/total; - - return true; +bool MarkerData::DecodeContent(int* orientation) +{ + // bool decode(vector& colors, int *orientation, double *error) { + *orientation = 0; + + BitsetExt bs; + int erroneous = 0; + int total = 0; + + DecodeOrientation(&erroneous, &total, orientation); + int err = DecodeCode(*orientation, &bs, &erroneous, &total, &content_type); + if (err == -1) + { + // couldn't fix + decode_error = DBL_MAX; + return false; + } + + if (content_type == MARKER_CONTENT_TYPE_NUMBER) + { + data.id = bs.ulong(); + } + else + { + Read6bitStr(&bs, data.str, MAX_MARKER_STRING_LEN); + } + + decode_error = (double)(erroneous) / total; + + return true; } -void MarkerData::Add6bitStr(BitsetExt *bs, char *s) { - while (*s) { - unsigned char c = (unsigned char)*s; - if (c == ':') bs->push_back((unsigned char)0,6); - else if ((c >= 'A') && (c <= 'Z')) bs->push_back((unsigned char)(001 + c - 'A'),6); - else if ((c >= 'a') && (c <= 'z')) bs->push_back((unsigned char)(001 + c - 'a'),6); - else if ((c >= '0') && (c <= '9')) bs->push_back((unsigned char)(033 + c - '0'),6); - else if (c == '+') bs->push_back((unsigned char)045,6); - else if (c == '-') bs->push_back((unsigned char)046,6); - else if (c == '*') bs->push_back((unsigned char)047,6); - else if (c == '/') bs->push_back((unsigned char)050,6); - else if (c == '(') bs->push_back((unsigned char)051,6); - else if (c == ')') bs->push_back((unsigned char)052,6); - else if (c == '$') bs->push_back((unsigned char)053,6); - else if (c == '=') bs->push_back((unsigned char)054,6); - else if (c == ' ') bs->push_back((unsigned char)055,6); - else if (c == ',') bs->push_back((unsigned char)056,6); - else if (c == '.') bs->push_back((unsigned char)057,6); - else if (c == '#') bs->push_back((unsigned char)060,6); - else if (c == '[') bs->push_back((unsigned char)061,6); - else if (c == ']') bs->push_back((unsigned char)062,6); - else if (c == '%') bs->push_back((unsigned char)063,6); - else if (c == '\"') bs->push_back((unsigned char)064,6); - else if (c == '_') bs->push_back((unsigned char)065,6); - else if (c == '!') bs->push_back((unsigned char)066,6); - else if (c == '&') bs->push_back((unsigned char)067,6); - else if (c == '\'') bs->push_back((unsigned char)070,6); - else if (c == '?') bs->push_back((unsigned char)071,6); - else if (c == '<') bs->push_back((unsigned char)072,6); - else if (c == '>') bs->push_back((unsigned char)073,6); - else if (c == '@') bs->push_back((unsigned char)074,6); - else if (c == '\\') bs->push_back((unsigned char)075,6); - else if (c == '^') bs->push_back((unsigned char)076,6); - else if (c == ';') bs->push_back((unsigned char)077,6); - else bs->push_back((unsigned char)071,6); - s++; - } +void MarkerData::Add6bitStr(BitsetExt* bs, char* s) +{ + while (*s) + { + unsigned char c = (unsigned char)*s; + if (c == ':') + bs->push_back((unsigned char)0, 6); + else if ((c >= 'A') && (c <= 'Z')) + bs->push_back((unsigned char)(001 + c - 'A'), 6); + else if ((c >= 'a') && (c <= 'z')) + bs->push_back((unsigned char)(001 + c - 'a'), 6); + else if ((c >= '0') && (c <= '9')) + bs->push_back((unsigned char)(033 + c - '0'), 6); + else if (c == '+') + bs->push_back((unsigned char)045, 6); + else if (c == '-') + bs->push_back((unsigned char)046, 6); + else if (c == '*') + bs->push_back((unsigned char)047, 6); + else if (c == '/') + bs->push_back((unsigned char)050, 6); + else if (c == '(') + bs->push_back((unsigned char)051, 6); + else if (c == ')') + bs->push_back((unsigned char)052, 6); + else if (c == '$') + bs->push_back((unsigned char)053, 6); + else if (c == '=') + bs->push_back((unsigned char)054, 6); + else if (c == ' ') + bs->push_back((unsigned char)055, 6); + else if (c == ',') + bs->push_back((unsigned char)056, 6); + else if (c == '.') + bs->push_back((unsigned char)057, 6); + else if (c == '#') + bs->push_back((unsigned char)060, 6); + else if (c == '[') + bs->push_back((unsigned char)061, 6); + else if (c == ']') + bs->push_back((unsigned char)062, 6); + else if (c == '%') + bs->push_back((unsigned char)063, 6); + else if (c == '\"') + bs->push_back((unsigned char)064, 6); + else if (c == '_') + bs->push_back((unsigned char)065, 6); + else if (c == '!') + bs->push_back((unsigned char)066, 6); + else if (c == '&') + bs->push_back((unsigned char)067, 6); + else if (c == '\'') + bs->push_back((unsigned char)070, 6); + else if (c == '?') + bs->push_back((unsigned char)071, 6); + else if (c == '<') + bs->push_back((unsigned char)072, 6); + else if (c == '>') + bs->push_back((unsigned char)073, 6); + else if (c == '@') + bs->push_back((unsigned char)074, 6); + else if (c == '\\') + bs->push_back((unsigned char)075, 6); + else if (c == '^') + bs->push_back((unsigned char)076, 6); + else if (c == ';') + bs->push_back((unsigned char)077, 6); + else + bs->push_back((unsigned char)071, 6); + s++; + } } -int MarkerData::UsableDataBits(int marker_res, int hamming) { - if (marker_res < 5) return 0; - if (!(marker_res % 2)) return 0; - int bits = marker_res * marker_res; - if (marker_res > 5) bits -= 8; // With larger resolutions we reserve 8 bits for hamming(8,4) encoded 4 flags - bits -= marker_res; // center line indicating the resolution - bits -= 4; // the four pixels indicating the orientation - int tail = bits % hamming; - if (tail < 3) bits -= tail; // hamming can't use tail pixels if there is only 2 or 1 of them - return bits; +int MarkerData::UsableDataBits(int marker_res, int hamming) +{ + if (marker_res < 5) + return 0; + if (!(marker_res % 2)) + return 0; + int bits = marker_res * marker_res; + if (marker_res > 5) + bits -= 8; // With larger resolutions we reserve 8 bits for hamming(8,4) + // encoded 4 flags + bits -= marker_res; // center line indicating the resolution + bits -= 4; // the four pixels indicating the orientation + int tail = bits % hamming; + if (tail < 3) + bits -= + tail; // hamming can't use tail pixels if there is only 2 or 1 of them + return bits; } -void MarkerData::SetContent(MarkerContentType _content_type, unsigned long _id, const char *_str, bool force_strong_hamming, bool verbose) { - // Fill in the content values - content_type = _content_type; - margin_error = 0; - decode_error = 0; - if (content_type == MARKER_CONTENT_TYPE_NUMBER) { - data.id = _id; - } else { - STRCPY(data.str, MAX_MARKER_STRING_LEN, _str); - } - // Encode - const int max_marker_res = 127; - BitsetExt bs_flags(verbose); - BitsetExt bs_data(verbose); - int enc_bits; // How many encoded bits fits in the marker - int data_bits; // How many data bits fit inside the encoded bits - int hamming; // Do we use 8-bit or 16-bit hamming? - if (content_type == MARKER_CONTENT_TYPE_NUMBER) { - bs_data.push_back_meaningful(data.id); - for (res=5; res= bs_data.Length()) break; - if ((res > 5) && !force_strong_hamming) { - hamming = 16; - enc_bits = UsableDataBits(res, hamming); - data_bits = BitsetExt::count_hamming_dec_len(hamming, enc_bits); - if (data_bits >= bs_data.Length()) break; - } - } - bs_data.fill_zeros_left(data_bits); - bs_data.hamming_enc(hamming); - if (verbose) { - cout<<"Using hamming("< 5) { - if (hamming == 16) bs_flags.push_back(true); - else bs_flags.push_back(false); - bs_flags.push_back((unsigned long)0,3); - bs_flags.hamming_enc(8); - if (verbose) { - cout<<"flags src: "; bs_flags.Output(cout); cout<= bs_data.Length()) break; - if (!force_strong_hamming) { - hamming = 16; - enc_bits = UsableDataBits(res, hamming); - data_bits = BitsetExt::count_hamming_dec_len(hamming, enc_bits); - if (data_bits >= bs_data.Length()) break; - } - } - while (bs_data.Length() < ((data_bits/6)*6)) { - bs_data.push_back((unsigned char)055,6); // Add space - } - while (bs_data.Length() < data_bits) { - bs_data.push_back(false); // Add 0 - } - bs_data.hamming_enc(hamming); - if (hamming == 16) bs_flags.push_back(true); - else bs_flags.push_back(false); - if (content_type == MARKER_CONTENT_TYPE_STRING) bs_flags.push_back((unsigned long)1,3); - else if (content_type == MARKER_CONTENT_TYPE_FILE) bs_flags.push_back((unsigned long)2,3); - else if (content_type == MARKER_CONTENT_TYPE_HTTP) bs_flags.push_back((unsigned long)3,3); - bs_flags.hamming_enc(8); - if (verbose) { - cout<<"Using hamming("< bs(bs_flags.GetBits()); - bs.insert(bs.end(), bs_data.GetBits().begin(), bs_data.GetBits().end()); - deque::const_iterator iter = bs.begin(); - SetMarkerSize(edge_length, res, margin); - cvSet(marker_content, cvScalar(255)); - for (int j=0; j= (res/2)-2)) { - cvSet2D(marker_content, j, i, cvScalar(0)); - } else if ((i == res/2) && (j > res/2) && (j <= (res/2)+2)) { - cvSet2D(marker_content, j, i, cvScalar(255)); - } else { - if (iter != bs.end()) { - if (*iter) cvSet2D(marker_content, j, i, cvScalar(0)); - iter++; - } - } - } - } +void MarkerData::SetContent(MarkerContentType _content_type, unsigned long _id, + const char* _str, bool force_strong_hamming, + bool verbose) +{ + // Fill in the content values + content_type = _content_type; + margin_error = 0; + decode_error = 0; + if (content_type == MARKER_CONTENT_TYPE_NUMBER) + { + data.id = _id; + } + else + { + STRCPY(data.str, MAX_MARKER_STRING_LEN, _str); + } + // Encode + const int max_marker_res = 127; + BitsetExt bs_flags(verbose); + BitsetExt bs_data(verbose); + int enc_bits; // How many encoded bits fits in the marker + int data_bits; // How many data bits fit inside the encoded bits + int hamming; // Do we use 8-bit or 16-bit hamming? + if (content_type == MARKER_CONTENT_TYPE_NUMBER) + { + bs_data.push_back_meaningful(data.id); + for (res = 5; res < max_marker_res; res += 2) + { + hamming = 8; + enc_bits = UsableDataBits(res, hamming); + data_bits = BitsetExt::count_hamming_dec_len(hamming, enc_bits); + if (data_bits >= bs_data.Length()) + break; + if ((res > 5) && !force_strong_hamming) + { + hamming = 16; + enc_bits = UsableDataBits(res, hamming); + data_bits = BitsetExt::count_hamming_dec_len(hamming, enc_bits); + if (data_bits >= bs_data.Length()) + break; + } + } + bs_data.fill_zeros_left(data_bits); + bs_data.hamming_enc(hamming); + if (verbose) + { + cout << "Using hamming(" << hamming << ") for " << res << "x" << res + << " marker" << endl; + cout << bs_data.Length() << " bits are filled into " << data_bits; + cout << " bits, and encoded into " << enc_bits << " bits" << endl; + cout << "data src: "; + bs_data.Output(cout); + cout << endl; + cout << "data enc: "; + bs_data.Output(cout); + cout << endl; + } + if (res > 5) + { + if (hamming == 16) + bs_flags.push_back(true); + else + bs_flags.push_back(false); + bs_flags.push_back((unsigned long)0, 3); + bs_flags.hamming_enc(8); + if (verbose) + { + cout << "flags src: "; + bs_flags.Output(cout); + cout << endl; + cout << "flags enc: "; + bs_flags.Output(cout); + cout << endl; + } + } + } + else + { + Add6bitStr(&bs_data, data.str); + for (res = 7; res < max_marker_res; res += 2) + { + hamming = 8; + enc_bits = UsableDataBits(res, hamming); + data_bits = BitsetExt::count_hamming_dec_len(hamming, enc_bits); + if (data_bits >= bs_data.Length()) + break; + if (!force_strong_hamming) + { + hamming = 16; + enc_bits = UsableDataBits(res, hamming); + data_bits = BitsetExt::count_hamming_dec_len(hamming, enc_bits); + if (data_bits >= bs_data.Length()) + break; + } + } + while (bs_data.Length() < ((data_bits / 6) * 6)) + { + bs_data.push_back((unsigned char)055, 6); // Add space + } + while (bs_data.Length() < data_bits) + { + bs_data.push_back(false); // Add 0 + } + bs_data.hamming_enc(hamming); + if (hamming == 16) + bs_flags.push_back(true); + else + bs_flags.push_back(false); + if (content_type == MARKER_CONTENT_TYPE_STRING) + bs_flags.push_back((unsigned long)1, 3); + else if (content_type == MARKER_CONTENT_TYPE_FILE) + bs_flags.push_back((unsigned long)2, 3); + else if (content_type == MARKER_CONTENT_TYPE_HTTP) + bs_flags.push_back((unsigned long)3, 3); + bs_flags.hamming_enc(8); + if (verbose) + { + cout << "Using hamming(" << hamming << ") for " << res << "x" << res + << " marker" << endl; + cout << bs_data.Length() << " bits are filled into " << data_bits; + cout << " bits, and encoded into " << enc_bits << " bits" << endl; + cout << "data src: "; + bs_data.Output(cout); + cout << endl; + cout << "data enc: "; + bs_data.Output(cout); + cout << endl; + cout << "flags src: "; + bs_flags.Output(cout); + cout << endl; + cout << "flags enc: "; + bs_flags.Output(cout); + cout << endl; + } + } + + // Fill in the marker content + deque bs(bs_flags.GetBits()); + bs.insert(bs.end(), bs_data.GetBits().begin(), bs_data.GetBits().end()); + deque::const_iterator iter = bs.begin(); + SetMarkerSize(edge_length, res, margin); + marker_content = cv::Scalar(255); + for (int j = 0; j < res; j++) + { + for (int i = 0; i < res; i++) + { + if (j == res / 2) + { + if (i % 2) + marker_content.at(j, i) = 0; + } + else if ((i == res / 2) && (j < res / 2) && (j >= (res / 2) - 2)) + { + marker_content.at(j, i) = 0; + } + else if ((i == res / 2) && (j > res / 2) && (j <= (res / 2) + 2)) + { + marker_content.at(j, i) = 255; + } + else + { + if (iter != bs.end()) + { + if (*iter) + marker_content.at(j, i) = 0; + iter++; + } + } + } + } } -} // namespace alvar +} // namespace alvar \ No newline at end of file diff --git a/ar_track_alvar/src/MarkerDetector.cpp b/ar_track_alvar/src/MarkerDetector.cpp index 258ba32..efb0e03 100644 --- a/ar_track_alvar/src/MarkerDetector.cpp +++ b/ar_track_alvar/src/MarkerDetector.cpp @@ -29,181 +29,221 @@ template class ALVAR_EXPORT alvar::MarkerDetector; using namespace std; -namespace alvar { - MarkerDetectorImpl::MarkerDetectorImpl() { - SetMarkerSize(); - SetOptions(); - labeling = NULL; - } - - MarkerDetectorImpl::~MarkerDetectorImpl() { - if (labeling) delete labeling; - } - - void MarkerDetectorImpl::TrackMarkersReset() { - _track_markers_clear(); - } - - void MarkerDetectorImpl::TrackMarkerAdd(int id, PointDouble corners[4]) { - Marker *mn = new_M(edge_length, res, margin); - if (map_edge_length.find(id) != map_edge_length.end()) { - mn->SetMarkerSize(map_edge_length[id], res, margin); - } - - mn->SetId(id); - mn->marker_corners_img.clear(); - mn->marker_corners_img.push_back(corners[0]); - mn->marker_corners_img.push_back(corners[1]); - mn->marker_corners_img.push_back(corners[2]); - mn->marker_corners_img.push_back(corners[3]); - _track_markers_push_back(mn); - delete mn; - } +namespace alvar +{ +MarkerDetectorImpl::MarkerDetectorImpl() +{ + SetMarkerSize(); + SetOptions(); + labeling = NULL; +} + +MarkerDetectorImpl::~MarkerDetectorImpl() +{ + if (labeling) + delete labeling; +} - void MarkerDetectorImpl::SetMarkerSize(double _edge_length, int _res, double _margin) { - edge_length = _edge_length; - res = _res; - margin = _margin; - map_edge_length.clear(); // TODO: Should we clear these here? +void MarkerDetectorImpl::TrackMarkersReset() +{ + _track_markers_clear(); +} + +void MarkerDetectorImpl::TrackMarkerAdd(int id, PointDouble corners[4]) +{ + Marker* mn = new_M(edge_length, res, margin); + if (map_edge_length.find(id) != map_edge_length.end()) + { + mn->SetMarkerSize(map_edge_length[id], res, margin); } - void MarkerDetectorImpl::SetMarkerSizeForId(unsigned long id, double _edge_length) { - map_edge_length[id] = _edge_length; - } - - void MarkerDetectorImpl::SetOptions(bool _detect_pose_grayscale) { - detect_pose_grayscale = _detect_pose_grayscale; - } - - int MarkerDetectorImpl::Detect(IplImage *image, - Camera *cam, - bool track, - bool visualize, - double max_new_marker_error, - double max_track_error, - LabelingMethod labeling_method, - bool update_pose) - { - assert(image->origin == 0); // Currently only top-left origin supported - double error=-1; - - // Swap marker tables - _swap_marker_tables(); - _markers_clear(); - - switch(labeling_method) - { - case CVSEQ : - - if(!labeling) - labeling = new LabelingCvSeq(); - ((LabelingCvSeq*)labeling)->SetOptions(detect_pose_grayscale); - break; - } - - labeling->SetCamera(cam); - labeling->LabelSquares(image, visualize); - vector >& blob_corners = labeling->blob_corners; - IplImage* gray = labeling->gray; - - int orientation; - - // When tracking we find the best matching blob and test if it is near enough? - if (track) { - for (size_t ii=0; ii<_track_markers_size(); ii++) { - Marker *mn = _track_markers_at(ii); - if (mn->GetError(Marker::DECODE_ERROR|Marker::MARGIN_ERROR) > 0) continue; // We track only perfectly decoded markers - int track_i=-1; - int track_orientation=0; - double track_error=1e200; - for(unsigned i = 0; i < blob_corners.size()/*blobs_ret.size()*/; ++i) { - if (blob_corners[i].empty()) continue; - mn->CompareCorners(blob_corners[i], &orientation, &error); - if (error < track_error) { - track_i = i; - track_orientation = orientation; - track_error = error; - } - } - if (track_error <= max_track_error) { - mn->SetError(Marker::DECODE_ERROR, 0); - mn->SetError(Marker::MARGIN_ERROR, 0); - mn->SetError(Marker::TRACK_ERROR, track_error); - mn->UpdateContent(blob_corners[track_i], gray, cam); //Maybe should only do this when kinect is being used? Don't think it hurts anything... - mn->UpdatePose(blob_corners[track_i], cam, track_orientation, update_pose); - _markers_push_back(mn); - blob_corners[track_i].clear(); // We don't want to handle this again... - if (visualize) mn->Visualize(image, cam, CV_RGB(255,255,0)); - } - } - } - - // Now we go through the rest of the blobs -- in case there are new markers... - for(size_t i = 0; i < blob_corners.size(); ++i) - { - if (blob_corners[i].empty()) continue; - - Marker *mn = new_M(edge_length, res, margin); - bool ub = mn->UpdateContent(blob_corners[i], gray, cam); - bool db = mn->DecodeContent(&orientation); - if (ub && db && - (mn->GetError(Marker::MARGIN_ERROR | Marker::DECODE_ERROR) <= max_new_marker_error)) - { - if (map_edge_length.find(mn->GetId()) != map_edge_length.end()) { - mn->SetMarkerSize(map_edge_length[mn->GetId()], res, margin); - } - mn->UpdatePose(blob_corners[i], cam, orientation, update_pose); - mn->ros_orientation = orientation; - _markers_push_back(mn); - - if (visualize) mn->Visualize(image, cam, CV_RGB(255,0,0)); - } - - delete mn; - } - - return (int) _markers_size(); - } - - int MarkerDetectorImpl::DetectAdditional(IplImage *image, Camera *cam, bool visualize, double max_track_error) - { - assert(image->origin == 0); // Currently only top-left origin supported - if(!labeling) return -1; - double error=-1; - int orientation; - int count=0; - vector >& blob_corners = labeling->blob_corners; - - for (size_t ii=0; ii<_track_markers_size(); ii++) { - Marker *mn = _track_markers_at(ii); - if (mn->GetError(Marker::DECODE_ERROR|Marker::MARGIN_ERROR) > 0) continue; // We track only perfectly decoded markers - int track_i=-1; - int track_orientation=0; - double track_error=1e200; - for(unsigned i = 0; i < blob_corners.size(); ++i) { - if (blob_corners[i].empty()) continue; - mn->CompareCorners(blob_corners[i], &orientation, &error); - if (error < track_error) { - track_i = i; - track_orientation = orientation; - track_error = error; - } - } - if (track_error <= max_track_error) { - mn->SetError(Marker::DECODE_ERROR, 0); - mn->SetError(Marker::MARGIN_ERROR, 0); - mn->SetError(Marker::TRACK_ERROR, track_error); - mn->UpdatePose(blob_corners[track_i], cam, track_orientation); - _markers_push_back(mn); - count++; - blob_corners[track_i].clear(); // We don't want to handle this again... - - if (visualize) { - mn->Visualize(image, cam, CV_RGB(0,255,255)); - } - } - } - return count; - } + mn->SetId(id); + mn->marker_corners_img.clear(); + mn->marker_corners_img.push_back(corners[0]); + mn->marker_corners_img.push_back(corners[1]); + mn->marker_corners_img.push_back(corners[2]); + mn->marker_corners_img.push_back(corners[3]); + _track_markers_push_back(mn); + delete mn; +} +void MarkerDetectorImpl::SetMarkerSize(double _edge_length, int _res, + double _margin) +{ + edge_length = _edge_length; + res = _res; + margin = _margin; + map_edge_length.clear(); // TODO: Should we clear these here? } + +void MarkerDetectorImpl::SetMarkerSizeForId(unsigned long id, + double _edge_length) +{ + map_edge_length[id] = _edge_length; +} + +void MarkerDetectorImpl::SetOptions(bool _detect_pose_grayscale) +{ + detect_pose_grayscale = _detect_pose_grayscale; +} + +int MarkerDetectorImpl::Detect(cv::Mat& image, Camera* cam, bool track, + bool visualize, double max_new_marker_error, + double max_track_error, + LabelingMethod labeling_method, bool update_pose) +{ + double error = -1; + + // Swap marker tables + _swap_marker_tables(); + _markers_clear(); + + switch (labeling_method) + { + case CVSEQ: + + if (!labeling) + labeling = new LabelingCvSeq(); + ((LabelingCvSeq*)labeling)->SetOptions(detect_pose_grayscale); + break; + } + + labeling->SetCamera(cam); + labeling->LabelSquares(image, visualize); + vector >& blob_corners = labeling->blob_corners; + cv::Mat gray = labeling->gray; + + int orientation; + + // When tracking we find the best matching blob and test if it is near enough? + if (track) + { + for (size_t ii = 0; ii < _track_markers_size(); ii++) + { + Marker* mn = _track_markers_at(ii); + if (mn->GetError(Marker::DECODE_ERROR | Marker::MARGIN_ERROR) > 0) + { + continue; // We track only perfectly decoded markers + } + + int track_i = -1; + int track_orientation = 0; + double track_error = 1e200; + for (unsigned i = 0; i < blob_corners.size() /*blobs_ret.size()*/; ++i) + { + if (blob_corners[i].empty()) + { + continue; + } + mn->CompareCorners(blob_corners[i], &orientation, &error); + + if (error < track_error) + { + track_i = i; + track_orientation = orientation; + track_error = error; + } + } + if (track_error <= max_track_error) + { + mn->SetError(Marker::DECODE_ERROR, 0); + mn->SetError(Marker::MARGIN_ERROR, 0); + mn->SetError(Marker::TRACK_ERROR, track_error); + mn->UpdateContent(blob_corners[track_i], gray, + cam); // Maybe should only do this when kinect is + // being used? Don't think it hurts anything... + mn->UpdatePose(blob_corners[track_i], cam, track_orientation, + update_pose); + _markers_push_back(mn); + blob_corners[track_i].clear(); // We don't want to handle this again... + if (visualize) + mn->Visualize(image, cam, CV_RGB(255, 255, 0)); + } + } + } + + // Now we go through the rest of the blobs -- in case there are new markers... + for (size_t i = 0; i < blob_corners.size(); ++i) + { + if (blob_corners[i].empty()) + { + continue; + } + + Marker* mn = new_M(edge_length, res, margin); + bool ub = mn->UpdateContent(blob_corners[i], gray, cam); + bool db = mn->DecodeContent(&orientation); + if (ub && db && + (mn->GetError(Marker::MARGIN_ERROR | Marker::DECODE_ERROR) <= + max_new_marker_error)) + { + if (map_edge_length.find(mn->GetId()) != map_edge_length.end()) + { + mn->SetMarkerSize(map_edge_length[mn->GetId()], res, margin); + } + mn->UpdatePose(blob_corners[i], cam, orientation, update_pose); + mn->ros_orientation = orientation; + _markers_push_back(mn); + + if (visualize) + { + mn->Visualize(image, cam, CV_RGB(255, 0, 0)); + } + } + + delete mn; + } + + return (int)_markers_size(); +} + +int MarkerDetectorImpl::DetectAdditional(cv::Mat& image, Camera* cam, + bool visualize, double max_track_error) +{ + if (!labeling) + return -1; + double error = -1; + int orientation; + int count = 0; + vector >& blob_corners = labeling->blob_corners; + + for (size_t ii = 0; ii < _track_markers_size(); ii++) + { + Marker* mn = _track_markers_at(ii); + if (mn->GetError(Marker::DECODE_ERROR | Marker::MARGIN_ERROR) > 0) + continue; // We track only perfectly decoded markers + int track_i = -1; + int track_orientation = 0; + double track_error = 1e200; + for (unsigned i = 0; i < blob_corners.size(); ++i) + { + if (blob_corners[i].empty()) + continue; + mn->CompareCorners(blob_corners[i], &orientation, &error); + if (error < track_error) + { + track_i = i; + track_orientation = orientation; + track_error = error; + } + } + if (track_error <= max_track_error) + { + mn->SetError(Marker::DECODE_ERROR, 0); + mn->SetError(Marker::MARGIN_ERROR, 0); + mn->SetError(Marker::TRACK_ERROR, track_error); + mn->UpdatePose(blob_corners[track_i], cam, track_orientation); + _markers_push_back(mn); + count++; + blob_corners[track_i].clear(); // We don't want to handle this again... + + if (visualize) + { + mn->Visualize(image, cam, CV_RGB(0, 255, 255)); + } + } + } + return count; +} + +} // namespace alvar diff --git a/ar_track_alvar/src/MultiMarker.cpp b/ar_track_alvar/src/MultiMarker.cpp index 9aa389f..ca9a1c1 100644 --- a/ar_track_alvar/src/MultiMarker.cpp +++ b/ar_track_alvar/src/MultiMarker.cpp @@ -28,348 +28,409 @@ using namespace std; -namespace alvar { +namespace alvar +{ using namespace std; -int MultiMarker::pointcloud_index(int marker_id, int marker_corner, bool add_if_missing /*=false*/) { - return (get_id_index(marker_id ,add_if_missing)*4)+marker_corner; +int MultiMarker::pointcloud_index(int marker_id, int marker_corner, + bool add_if_missing /*=false*/) +{ + return (get_id_index(marker_id, add_if_missing) * 4) + marker_corner; } int MultiMarker::get_id_index(int id, bool add_if_missing /*=false*/) { - for(size_t i = 0; i < marker_indices.size(); ++i) { - if(marker_indices.at(i) == id) - return (int) i; - } - if (!add_if_missing) return -1; - marker_indices.push_back(id); - marker_status.push_back(0); - return (marker_indices.size()-1); + for (size_t i = 0; i < marker_indices.size(); ++i) + { + if (marker_indices.at(i) == id) + return (int)i; + } + if (!add_if_missing) + return -1; + marker_indices.push_back(id); + marker_status.push_back(0); + return (marker_indices.size() - 1); } void MultiMarker::Reset() { - fill(marker_status.begin(), marker_status.end(), 0); - pointcloud.clear(); + fill(marker_status.begin(), marker_status.end(), 0); + pointcloud.clear(); } -bool MultiMarker::SaveXML(const char* fname) { - TiXmlDocument document; - document.LinkEndChild(new TiXmlDeclaration("1.0", "UTF-8", "no")); - document.LinkEndChild(new TiXmlElement("multimarker")); - TiXmlElement *xml_root = document.RootElement(); - - int n_markers = marker_indices.size(); - xml_root->SetAttribute("markers", n_markers); - - for(int i = 0; i < n_markers; ++i) { - TiXmlElement *xml_marker = new TiXmlElement("marker"); - xml_root->LinkEndChild(xml_marker); - - xml_marker->SetAttribute("index", marker_indices[i]); - xml_marker->SetAttribute("status", marker_status[i]); - - for(int j = 0; j < 4; ++j) { - TiXmlElement *xml_corner = new TiXmlElement("corner"); - xml_marker->LinkEndChild(xml_corner); - CvPoint3D64f X = pointcloud[pointcloud_index(marker_indices[i], j)]; - xml_corner->SetDoubleAttribute("x", X.x); - xml_corner->SetDoubleAttribute("y", X.y); - xml_corner->SetDoubleAttribute("z", X.z); - } - } - return document.SaveFile(fname); +bool MultiMarker::SaveXML(const char* fname) +{ + TiXmlDocument document; + document.LinkEndChild(new TiXmlDeclaration("1.0", "UTF-8", "no")); + document.LinkEndChild(new TiXmlElement("multimarker")); + TiXmlElement* xml_root = document.RootElement(); + + int n_markers = marker_indices.size(); + xml_root->SetAttribute("markers", n_markers); + + for (int i = 0; i < n_markers; ++i) + { + TiXmlElement* xml_marker = new TiXmlElement("marker"); + xml_root->LinkEndChild(xml_marker); + + xml_marker->SetAttribute("index", marker_indices[i]); + xml_marker->SetAttribute("status", marker_status[i]); + + for (int j = 0; j < 4; ++j) + { + TiXmlElement* xml_corner = new TiXmlElement("corner"); + xml_marker->LinkEndChild(xml_corner); + cv::Point3d X = pointcloud[pointcloud_index(marker_indices[i], j)]; + xml_corner->SetDoubleAttribute("x", X.x); + xml_corner->SetDoubleAttribute("y", X.y); + xml_corner->SetDoubleAttribute("z", X.z); + } + } + return document.SaveFile(fname); } -bool MultiMarker::SaveText(const char* fname) { - size_t n_markers = marker_indices.size(); - - fstream file_op(fname, ios::out); +bool MultiMarker::SaveText(const char* fname) +{ + size_t n_markers = marker_indices.size(); - file_op<QueryIntAttribute("markers", &n_markers) != TIXML_SUCCESS) return false; - - pointcloud.clear(); - marker_indices.resize(n_markers); - marker_status.resize(n_markers); - - TiXmlElement *xml_marker = xml_root->FirstChildElement("marker"); - for(int i = 0; i < n_markers; ++i) { - if (!xml_marker) return false; - - int index, status; - if (xml_marker->QueryIntAttribute("index", &index) != TIXML_SUCCESS) return false; - if (xml_marker->QueryIntAttribute("status", &status) != TIXML_SUCCESS) return false; - marker_indices[i] = index; - marker_status[i] = status; - if(i==0) master_id = index; - - TiXmlElement *xml_corner = xml_marker->FirstChildElement("corner"); - for(int j = 0; j < 4; ++j) { - if (!xml_corner) return false; - - CvPoint3D64f X; - if (xml_corner->QueryDoubleAttribute("x", &X.x) != TIXML_SUCCESS) return false; - if (xml_corner->QueryDoubleAttribute("y", &X.y) != TIXML_SUCCESS) return false; - if (xml_corner->QueryDoubleAttribute("z", &X.z) != TIXML_SUCCESS) return false; - pointcloud[pointcloud_index(marker_indices[i], j)] = X; - - xml_corner = (TiXmlElement*)xml_corner->NextSibling("corner"); - } - - xml_marker = (TiXmlElement*)xml_marker->NextSibling("marker"); - } - return true; +bool MultiMarker::Save(const char* fname, FILE_FORMAT format) +{ + switch (format) + { + case FILE_FORMAT_XML: + return SaveXML(fname); + case FILE_FORMAT_TEXT: + case FILE_FORMAT_DEFAULT: + return SaveText(fname); + default: + return false; + } } -bool MultiMarker::LoadText(const char* fname) { - fstream file_op(fname, ios::in); - - if (!file_op) { +bool MultiMarker::LoadXML(const char* fname) +{ + TiXmlDocument document; + if (!document.LoadFile(fname)) + return false; + TiXmlElement* xml_root = document.RootElement(); + + int n_markers; + if (xml_root->QueryIntAttribute("markers", &n_markers) != TIXML_SUCCESS) + return false; + + pointcloud.clear(); + marker_indices.resize(n_markers); + marker_status.resize(n_markers); + + TiXmlElement* xml_marker = xml_root->FirstChildElement("marker"); + for (int i = 0; i < n_markers; ++i) + { + if (!xml_marker) + return false; + + int index, status; + if (xml_marker->QueryIntAttribute("index", &index) != TIXML_SUCCESS) + return false; + if (xml_marker->QueryIntAttribute("status", &status) != TIXML_SUCCESS) + return false; + marker_indices[i] = index; + marker_status[i] = status; + if (i == 0) + master_id = index; + + TiXmlElement* xml_corner = xml_marker->FirstChildElement("corner"); + for (int j = 0; j < 4; ++j) + { + if (!xml_corner) return false; - } - - size_t n_markers; - file_op>>n_markers; - pointcloud.clear(); - marker_indices.resize(n_markers); - marker_status.resize(n_markers); + cv::Point3d X; + if (xml_corner->QueryDoubleAttribute("x", &X.x) != TIXML_SUCCESS) + return false; + if (xml_corner->QueryDoubleAttribute("y", &X.y) != TIXML_SUCCESS) + return false; + if (xml_corner->QueryDoubleAttribute("z", &X.z) != TIXML_SUCCESS) + return false; + pointcloud[pointcloud_index(marker_indices[i], j)] = X; - for(size_t i = 0; i < n_markers; ++i){ - file_op>>marker_indices[i]; - } + xml_corner = (TiXmlElement*)xml_corner->NextSibling("corner"); + } - for(size_t i = 0; i < n_markers; ++i){ - file_op>>marker_status[i]; - } + xml_marker = (TiXmlElement*)xml_marker->NextSibling("marker"); + } + return true; +} - for(size_t i = 0; i < n_markers; ++i) - for(size_t j = 0; j < 4; ++j) - { - CvPoint3D64f X; - file_op>>X.x; - file_op>>X.y; - file_op>>X.z; - pointcloud[pointcloud_index(marker_indices[i], j)] = X; - } +bool MultiMarker::LoadText(const char* fname) +{ + fstream file_op(fname, ios::in); + + if (!file_op) + { + return false; + } + + size_t n_markers; + file_op >> n_markers; + + pointcloud.clear(); + marker_indices.resize(n_markers); + marker_status.resize(n_markers); + + for (size_t i = 0; i < n_markers; ++i) + { + file_op >> marker_indices[i]; + } + + for (size_t i = 0; i < n_markers; ++i) + { + file_op >> marker_status[i]; + } + + for (size_t i = 0; i < n_markers; ++i) + for (size_t j = 0; j < 4; ++j) + { + cv::Point3d X; + file_op >> X.x; + file_op >> X.y; + file_op >> X.z; + pointcloud[pointcloud_index(marker_indices[i], j)] = X; + } - file_op.close(); + file_op.close(); - return true; + return true; } -bool MultiMarker::Load(const char* fname, FILE_FORMAT format) { - switch (format) { - case FILE_FORMAT_XML: - return LoadXML(fname); - case FILE_FORMAT_TEXT: - case FILE_FORMAT_DEFAULT: - return LoadText(fname); - default: - return false; - } +bool MultiMarker::Load(const char* fname, FILE_FORMAT format) +{ + switch (format) + { + case FILE_FORMAT_XML: + return LoadXML(fname); + case FILE_FORMAT_TEXT: + case FILE_FORMAT_DEFAULT: + return LoadText(fname); + default: + return false; + } } MultiMarker::MultiMarker(vector& indices) { - marker_indices.resize(indices.size()); - copy(indices.begin(), indices.end(), marker_indices.begin()); - marker_status.resize(indices.size()); - fill(marker_status.begin(), marker_status.end(), 0); + marker_indices.resize(indices.size()); + copy(indices.begin(), indices.end(), marker_indices.begin()); + marker_status.resize(indices.size()); + fill(marker_status.begin(), marker_status.end(), 0); } -void MultiMarker::PointCloudReset() { - pointcloud.clear(); +void MultiMarker::PointCloudReset() +{ + pointcloud.clear(); } -void MultiMarker::PointCloudCorners3d(double edge_length, Pose &pose, CvPoint3D64f corners[4]) { - // Transformation from origin to current marker - CvMat *m3 = cvCreateMat(4,4,CV_64F); cvSetIdentity(m3); - pose.GetMatrix(m3); - - for(size_t j = 0; j < 4; ++j) - { - // TODO: This should be exactly the same as in Marker class. - // Should we get the values from there somehow? - double X_data[4] = {0, 0, 0, 1}; - if (j == 0) { - int zzzz=2; - //X_data[0] = -0.5*edge_length; - //X_data[1] = -0.5*edge_length; - } else if (j == 1) { - X_data[0] = +0.5*edge_length; - X_data[1] = -0.5*edge_length; - } else if (j == 2) { - X_data[0] = +0.5*edge_length; - X_data[1] = +0.5*edge_length; - } else if (j == 3) { - X_data[0] = -0.5*edge_length; - X_data[1] = +0.5*edge_length; - } - - CvMat X = cvMat(4, 1, CV_64F, X_data); - cvMatMul(m3, &X, &X); - - corners[j].x = X_data[0] / X_data[3]; - corners[j].y = X_data[1] / X_data[3]; - corners[j].z = X_data[2] / X_data[3]; - } - cvReleaseMat(&m3); +void MultiMarker::PointCloudCorners3d(double edge_length, Pose& pose, + cv::Point3d corners[4]) +{ + // Transformation from origin to current marker + cv::Mat m3 = cv::Mat::eye(4, 4, CV_64F); + pose.GetMatrix(m3); + + for (size_t j = 0; j < 4; ++j) + { + // TODO: This should be exactly the same as in Marker class. + // Should we get the values from there somehow? + double X_data[4] = { 0, 0, 0, 1 }; + if (j == 0) + { + int zzzz = 2; + // X_data[0] = -0.5*edge_length; + // X_data[1] = -0.5*edge_length; + } + else if (j == 1) + { + X_data[0] = +0.5 * edge_length; + X_data[1] = -0.5 * edge_length; + } + else if (j == 2) + { + X_data[0] = +0.5 * edge_length; + X_data[1] = +0.5 * edge_length; + } + else if (j == 3) + { + X_data[0] = -0.5 * edge_length; + X_data[1] = +0.5 * edge_length; + } + + cv::Mat X = cv::Mat(4, 1, CV_64F, X_data); + X = m3 * X; + + corners[j].x = X_data[0] / X_data[3]; + corners[j].y = X_data[1] / X_data[3]; + corners[j].z = X_data[2] / X_data[3]; + } + m3.release(); } -void MultiMarker::PointCloudAdd(int marker_id, double edge_length, Pose &pose) { - CvPoint3D64f corners[4]; - PointCloudCorners3d(edge_length, pose, corners); - for(size_t j = 0; j < 4; ++j) { - pointcloud[pointcloud_index(marker_id, j, true)] = corners[j]; - marker_status[get_id_index(marker_id, true)]=1; - } +void MultiMarker::PointCloudAdd(int marker_id, double edge_length, Pose& pose) +{ + cv::Point3d corners[4]; + PointCloudCorners3d(edge_length, pose, corners); + for (size_t j = 0; j < 4; ++j) + { + pointcloud[pointcloud_index(marker_id, j, true)] = corners[j]; + marker_status[get_id_index(marker_id, true)] = 1; + } } -void MultiMarker::PointCloudCopy(const MultiMarker *m) { - pointcloud.clear(); - pointcloud = m->pointcloud; // TODO: Is this copy operation ok? - marker_indices.resize(m->marker_indices.size()); - marker_status.resize(m->marker_status.size()); - copy(m->marker_indices.begin(), m->marker_indices.end(), marker_indices.begin()); - copy(m->marker_status.begin(), m->marker_status.end(), marker_status.begin()); +void MultiMarker::PointCloudCopy(const MultiMarker* m) +{ + pointcloud.clear(); + pointcloud = m->pointcloud; // TODO: Is this copy operation ok? + marker_indices.resize(m->marker_indices.size()); + marker_status.resize(m->marker_status.size()); + copy(m->marker_indices.begin(), m->marker_indices.end(), + marker_indices.begin()); + copy(m->marker_status.begin(), m->marker_status.end(), marker_status.begin()); } -void MultiMarker::PointCloudGet(int marker_id, int point, - double &x, double &y, double &z) { - CvPoint3D64f p3d = pointcloud[pointcloud_index(marker_id, point)]; +void MultiMarker::PointCloudGet(int marker_id, int point, double& x, double& y, + double& z) +{ + cv::Point3d p3d = pointcloud[pointcloud_index(marker_id, point)]; x = p3d.x; y = p3d.y; z = p3d.z; } -bool MultiMarker::IsValidMarker(int marker_id) { - int idx = get_id_index(marker_id); - return idx != -1 && marker_status[idx] != 0; +bool MultiMarker::IsValidMarker(int marker_id) +{ + int idx = get_id_index(marker_id); + return idx != -1 && marker_status[idx] != 0; } - -double MultiMarker::_GetPose(MarkerIterator &begin, MarkerIterator &end, Camera* cam, Pose& pose, IplImage* image) { - vector world_points; - vector image_points; - - // Reset the marker_status to 1 for all markers in point_cloud - for (size_t i=0; i 0) marker_status[i]=1; - } - - // For every detected marker - for (MarkerIterator &i = begin.reset(); i != end; ++i) - { - const Marker* marker = *i; - int id = marker->GetId(); - int index = get_id_index(id); - if (index < 0) continue; - - // But only if we have corresponding points in the pointcloud - if (marker_status[index] > 0) { - for(size_t j = 0; j < marker->marker_corners.size(); ++j) - { - CvPoint3D64f Xnew = pointcloud[pointcloud_index(id, (int)j)]; - world_points.push_back(Xnew); - image_points.push_back(marker->marker_corners_img.at(j)); - if (image) cvCircle(image, cvPoint(int(marker->marker_corners_img[j].x), int(marker->marker_corners_img[j].y)), 3, CV_RGB(0,255,0)); - } - marker_status[index] = 2; // Used for tracking - } - } - - if (world_points.size() < 4) return -1; - - double rod[3], tra[3]; - CvMat rot_mat = cvMat(3, 1,CV_64F, rod); - CvMat tra_mat = cvMat(3, 1,CV_64F, tra); - double error=0; // TODO: Now we don't calculate any error value - cam->CalcExteriorOrientation(world_points, image_points, &rot_mat, &tra_mat); - pose.SetRodriques(&rot_mat); - pose.SetTranslation(&tra_mat); - return error; +double MultiMarker::_GetPose(MarkerIterator& begin, MarkerIterator& end, + Camera* cam, Pose& pose, cv::Mat& image) +{ + vector world_points; + vector image_points; + + // Reset the marker_status to 1 for all markers in point_cloud + for (size_t i = 0; i < marker_status.size(); i++) + { + if (marker_status[i] > 0) + marker_status[i] = 1; + } + + // For every detected marker + for (MarkerIterator& i = begin.reset(); i != end; ++i) + { + const Marker* marker = *i; + int id = marker->GetId(); + int index = get_id_index(id); + if (index < 0) + continue; + + // But only if we have corresponding points in the pointcloud + if (marker_status[index] > 0) + { + for (size_t j = 0; j < marker->marker_corners.size(); ++j) + { + world_points.push_back(pointcloud[pointcloud_index(id, (int)j)]); + image_points.push_back(marker->marker_corners_img.at(j)); + if (!image.empty()) + cv::circle(image, + cv::Point(int(marker->marker_corners_img[j].x), + int(marker->marker_corners_img[j].y)), + 3, CV_RGB(0, 255, 0)); + } + marker_status[index] = 2; // Used for tracking + } + } + + if (world_points.size() < 4) + return -1; + + cv::Mat rot_mat = cv::Mat(3, 1, CV_64F); + cv::Mat tra_mat = cv::Mat(3, 1, CV_64F); + double error = 0; // TODO: Now we don't calculate any error value + cam->CalcExteriorOrientation(world_points, image_points, rot_mat, tra_mat); + pose.SetRodriques(rot_mat); + pose.SetTranslation(tra_mat); + return error; } - -int MultiMarker::_SetTrackMarkers(MarkerDetectorImpl &marker_detector, Camera* cam, Pose& pose, IplImage *image) { - int count=0; - marker_detector.TrackMarkersReset(); - for(size_t i = 0; i < marker_indices.size(); ++i) { - int id = marker_indices[i]; - // If the marker wasn't tracked lets add it to be trackable - if (marker_status[i] == 1) { - vector pw(4); - pw[0] = pointcloud[pointcloud_index(id, 0)]; - pw[1] = pointcloud[pointcloud_index(id, 1)]; - pw[2] = pointcloud[pointcloud_index(id, 2)]; - pw[3] = pointcloud[pointcloud_index(id, 3)]; - vector pi(4); - cam->ProjectPoints(pw, &pose, pi); - PointDouble p[4]; // TODO: This type copying is so silly!!! - p[0].x = pi[0].x; - p[0].y = pi[0].y; - p[1].x = pi[1].x; - p[1].y = pi[1].y; - p[2].x = pi[2].x; - p[2].y = pi[2].y; - p[3].x = pi[3].x; - p[3].y = pi[3].y; - if (image) { - cvLine(image, cvPoint(int(p[0].x), int(p[0].y)), cvPoint(int(p[1].x), int(p[1].y)), CV_RGB(255,0,0)); - cvLine(image, cvPoint(int(p[1].x), int(p[1].y)), cvPoint(int(p[2].x), int(p[2].y)), CV_RGB(255,0,0)); - cvLine(image, cvPoint(int(p[2].x), int(p[2].y)), cvPoint(int(p[3].x), int(p[3].y)), CV_RGB(255,0,0)); - cvLine(image, cvPoint(int(p[3].x), int(p[3].y)), cvPoint(int(p[0].x), int(p[0].y)), CV_RGB(255,0,0)); - } - marker_detector.TrackMarkerAdd(id, p); - count++; - } - } - return count; +int MultiMarker::_SetTrackMarkers(MarkerDetectorImpl& marker_detector, + Camera* cam, Pose& pose, cv::Mat& image) +{ + int count = 0; + marker_detector.TrackMarkersReset(); + for (size_t i = 0; i < marker_indices.size(); ++i) + { + int id = marker_indices[i]; + // If the marker wasn't tracked lets add it to be trackable + if (marker_status[i] == 1) + { + vector pw(4); + pw[0] = pointcloud[pointcloud_index(id, 0)]; + pw[1] = pointcloud[pointcloud_index(id, 1)]; + pw[2] = pointcloud[pointcloud_index(id, 2)]; + pw[3] = pointcloud[pointcloud_index(id, 3)]; + vector pi(4); + cam->ProjectPoints(pw, &pose, pi); + PointDouble p[4]; // TODO: This type copying is so silly!!! + p[0].x = pi[0].x; + p[0].y = pi[0].y; + p[1].x = pi[1].x; + p[1].y = pi[1].y; + p[2].x = pi[2].x; + p[2].y = pi[2].y; + p[3].x = pi[3].x; + p[3].y = pi[3].y; + if (!image.empty()) + { + cv::line(image, cv::Point(int(p[0].x), int(p[0].y)), + cv::Point(int(p[1].x), int(p[1].y)), CV_RGB(255, 0, 0)); + cv::line(image, cv::Point(int(p[1].x), int(p[1].y)), + cv::Point(int(p[2].x), int(p[2].y)), CV_RGB(255, 0, 0)); + cv::line(image, cv::Point(int(p[2].x), int(p[2].y)), + cv::Point(int(p[3].x), int(p[3].y)), CV_RGB(255, 0, 0)); + cv::line(image, cv::Point(int(p[3].x), int(p[3].y)), + cv::Point(int(p[0].x), int(p[0].y)), CV_RGB(255, 0, 0)); + } + marker_detector.TrackMarkerAdd(id, p); + count++; + } + } + return count; } -} +} // namespace alvar diff --git a/ar_track_alvar/src/MultiMarkerBundle.cpp b/ar_track_alvar/src/MultiMarkerBundle.cpp index 680be3b..2e5f563 100644 --- a/ar_track_alvar/src/MultiMarkerBundle.cpp +++ b/ar_track_alvar/src/MultiMarkerBundle.cpp @@ -22,252 +22,291 @@ */ #include "ar_track_alvar/MultiMarkerBundle.h" +#include using namespace std; -namespace alvar { +namespace alvar +{ using namespace std; -MultiMarkerBundle::MultiMarkerBundle(std::vector& indices) - : MultiMarker(indices) +MultiMarkerBundle::MultiMarkerBundle(std::vector& indices) + : MultiMarker(indices) { - MeasurementsReset(); + MeasurementsReset(); } MultiMarkerBundle::~MultiMarkerBundle() { } -void MultiMarkerBundle::MeasurementsReset() { - optimization_error=-1; - optimization_keyframes=0; - optimization_markers=0; - optimizing=false; - camera_poses.clear(); - measurements.clear(); +void MultiMarkerBundle::MeasurementsReset() +{ + optimization_error = -1; + optimization_keyframes = 0; + optimization_markers = 0; + optimizing = false; + camera_poses.clear(); + measurements.clear(); } -int n_images; // TODO: This should not be global (use the param instead) -int n_markers; // TODO: This should not be global (use the param instead) -Camera *camera; // TODO: This should not be global (use the param instead) +int n_images; // TODO: This should not be global (use the param instead) +int n_markers; // TODO: This should not be global (use the param instead) +Camera* camera; // TODO: This should not be global (use the param instead) -void Est(CvMat* state, CvMat* estimation, void *param) +void Est(cv::Mat& state, cv::Mat& estimation, void* param) { + // State: cam1, cam2, cam3, cam4, ..., X1(x,y,z), X2, X3, ... + // Estimation: (u11,v11), (u) - // State: cam1, cam2, cam3, cam4, ..., X1(x,y,z), X2, X3, ... - // Estimation: (u11,v11), (u) - - // For every image observation (camera)... - for(int i = 0; i < n_images; ++i) - { - // Get camera from state - Pose p; p.SetQuaternion(&(state->data.db[i*7+3])); + // For every image observation (camera)... + for (int i = 0; i < n_images; ++i) + { + // Get camera from state + Pose p; + p.SetQuaternion(&state.at(i * 7 + 3)); - double tra[3]; - double rodr[3]; - CvMat mat_translation_vector = cvMat(3, 1, CV_64F, tra); - CvMat mat_rotation_vector = cvMat(3, 1, CV_64F, rodr); + double tra[3]; + double rodr[3]; + cv::Mat mat_translation_vector = cv::Mat(3, 1, CV_64F, tra); + cv::Mat mat_rotation_vector = cv::Mat(3, 1, CV_64F, rodr); - memcpy(tra, &(state->data.db[i*7]), 3*sizeof(double)); - p.GetRodriques(&mat_rotation_vector); + memcpy(tra, &state.at(i * 7), 3 * sizeof(double)); + p.GetRodriques(mat_rotation_vector); - // For every point in marker field - int n_points = n_markers*4; - for(int j = 0; j < n_points; ++j) - { - int index = n_images*7 + 3*j; + // For every point in marker field + int n_points = n_markers * 4; + for (int j = 0; j < n_points; ++j) + { + int index = n_images * 7 + 3 * j; - double object_points[3] = {state->data.db[index+0], - state->data.db[index+1], - state->data.db[index+2]}; + double object_points[3] = { state.at(index + 0), + state.at(index + 1), + state.at(index + 2) }; + cv::Mat mat_object_points; + mat_object_points = cv::Mat(1, 1, CV_64FC3, object_points); - CvMat mat_object_points; - cvInitMatHeader(&mat_object_points, 1, 1, CV_64FC3, object_points); + double proj[2] = { 0 }; + cv::Mat mat_proj = cv::Mat(1, 1, CV_64FC2, proj); - double proj[2]={0}; - CvMat mat_proj = cvMat(1, 1, CV_64FC2, proj); + cv::projectPoints(mat_object_points, mat_rotation_vector, + mat_translation_vector, camera->calib_K, + camera->calib_D, mat_proj); - cvProjectPoints2(&mat_object_points, &mat_rotation_vector, - &mat_translation_vector, &(camera->calib_K), - &(camera->calib_D), &mat_proj); - - index = i*n_points*2 + j*2; - estimation->data.db[index+0] = proj[0]; - estimation->data.db[index+1] = proj[1]; - } - } + index = i * n_points * 2 + j * 2; + estimation.at(index + 0) = proj[0]; + estimation.at(index + 1) = proj[1]; + } + } } -bool MultiMarkerBundle::Optimize(Camera *_cam, double stop, int max_iter, Optimization::OptimizeMethod method) +bool MultiMarkerBundle::Optimize(Camera* _cam, double stop, int max_iter, + Optimization::OptimizeMethod method) { - // Est() needs these - // TODO: Better way to deliver them??? Other than using global variables??? - camera = _cam; - n_images = camera_poses.size(); - n_markers = marker_indices.size(); + // Est() needs these + // TODO: Better way to deliver them??? Other than using global variables??? + camera = _cam; + n_images = camera_poses.size(); + n_markers = marker_indices.size(); + + if (n_images < 1) + { + cout << "Too few images! At least 1 images needed." << endl; + return false; + } - if(n_images < 1) - { - cout<<"Too few images! At least 1 images needed."< 0) { - cvmSet(parameters_mat, index+0, 0, pointcloud[pointcloud_index(id,j)].x); - cvmSet(parameters_mat, index+1, 0, pointcloud[pointcloud_index(id,j)].y); - cvmSet(parameters_mat, index+2, 0, pointcloud[pointcloud_index(id,j)].z); - } else { - // We don't optimize known-initialized parameters? - cvSet2D(parameters_mask_mat, index+0, 0, cvScalar(0)); - cvSet2D(parameters_mask_mat, index+1, 0, cvScalar(0)); - cvSet2D(parameters_mask_mat, index+2, 0, cvScalar(0)); - } - } - } - // Fill in the frame data. Camera poses into parameters and corners into measurements - int n_measurements = 0; // number of actual measurement data points. - for (size_t f=0; f < frames; f++) { - //cout<<"frame "<data.db[f*7+0])); - CvMat qua = cvMat(4, 1, CV_64F, &(parameters_mat->data.db[f*7+3])); - camera_poses[f].GetTranslation(&tra); - camera_poses[f].GetQuaternion(&qua); - // Measurements - for(size_t i = 0; i < marker_indices.size(); ++i) { - int id = marker_indices[i]; - if (measurements.find(measurements_index(f,id,0)) != measurements.end()) { - for (int j=0; j<4; j++) { - //cout< 0) optimization_markers++; - Optimization optimization(n_params, n_meas); - cout<<"Optimizing with "< 3) && (optimization_error > stop)) { - CvMat *err = optimization.GetErr(); - int max_k=-1; - double max=0; - for (int k=0; kheight; k++) { - if (cvmGet(err, k, 0) > max) { - max = cvmGet(err, k, 0); - max_k = k; - } - } - if (max_k >= 0) { - // We remove all 8 corner measurements - int f = (max_k - (max_k % (n_markers*4*2))) / (n_markers*4*2); - max_k -= f*(n_markers*4*2); - int id = (max_k - (max_k % (4*2))) / (4*2); - cout<<"Optimization error over the threshold -- remove the biggest outlier: "; - cout<<"frame "<(index + 0, 0) = 0; + parameters_mask_mat.at(index + 1, 0) = 0; + parameters_mask_mat.at(index + 2, 0) = 0; + } + if (marker_status[i] > 0) + { + parameters_mat.at(index + 0, 0) = + pointcloud[pointcloud_index(id, j)].x; + parameters_mat.at(index + 1, 0) = + pointcloud[pointcloud_index(id, j)].y; + parameters_mat.at(index + 2, 0) = + pointcloud[pointcloud_index(id, j)].z; + } + else + { + // We don't optimize known-initialized parameters? + parameters_mask_mat.at(index + 0, 0) = 0; + parameters_mask_mat.at(index + 1, 0) = 0; + parameters_mask_mat.at(index + 2, 0) = 0; + } + } + } + // Fill in the frame data. Camera poses into parameters and corners into + // measurements + int n_measurements = 0; // number of actual measurement data points. + for (size_t f = 0; f < frames; f++) + { + // cout<<"frame "<(f * 7 + 0))); + cv::Mat qua = + cv::Mat(4, 1, CV_64F, &(parameters_mat.at(f * 7 + 3))); + camera_poses[f].GetTranslation(tra); + camera_poses[f].GetQuaternion(qua); + // Measurements + for (size_t i = 0; i < marker_indices.size(); ++i) + { + int id = marker_indices[i]; + if (measurements.find(measurements_index(f, id, 0)) != measurements.end()) + { + for (int j = 0; j < 4; j++) + { + // cout<(index + 0, 0) = + measurements[measurements_index(f, id, j)].x; + measurements_mat.at(index + 1, 0) = + measurements[measurements_index(f, id, j)].y; + n_measurements += 2; + } + } + else + { + for (int j = 0; j < 4; j++) + { + // hop int index = f*(n_markers*4*2) + id*(4*2) + j*2; + int index = f * (n_markers * 4 * 2) + i * (4 * 2) + j * 2; + weight_mat.at(index + 0, 0) = 0.0; + weight_mat.at(index + 1, 0) = 0.0; + } + } + } + } + // out_matrix(measurements_mat, "measurements"); + // out_matrix(parameters_mat, "parameters"); + optimization_keyframes = n_images; + optimization_markers = 0; + for (size_t i = 0; i < marker_indices.size(); ++i) + if (marker_status[i] > 0) + optimization_markers++; + Optimization optimization(n_params, n_meas); + cout << "Optimizing with " << optimization_keyframes << " keyframes and " + << optimization_markers << " markers" << endl; + optimization_error = optimization.Optimize( + parameters_mat, measurements_mat, stop, max_iter, Est, 0, method, + parameters_mask_mat, cv::Mat(), weight_mat); + optimization_error /= n_measurements; + cout << "Optimization error per corner: " << optimization_error << endl; + /* + if ((frames > 3) && (optimization_error > stop)) { + cv::Mat *err = optimization.GetErr(); + int max_k=-1; + double max=0; + for (int k=0; kheight; k++) { + if (cvmGet(err, k, 0) > max) { + max = cvmGet(err, k, 0); + max_k = k; + } + } + if (max_k >= 0) { + // We remove all 8 corner measurements + int f = (max_k - (max_k % (n_markers*4*2))) / (n_markers*4*2); + max_k -= f*(n_markers*4*2); + int id = (max_k - (max_k % (4*2))) / (4*2); + cout<<"Optimization error over the threshold -- remove the biggest + outlier: "; cout<<"frame "<(index + 0, 0); + pointcloud[pointcloud_index(id, j)].y = + parameters_mat.at(index + 1, 0); + pointcloud[pointcloud_index(id, j)].z = + parameters_mat.at(index + 2, 0); + } + } - cvReleaseMat(¶meters_mat); - cvReleaseMat(¶meters_mask_mat); - cvReleaseMat(&measurements_mat); + parameters_mat.release(); + parameters_mask_mat.release(); + measurements_mat.release(); - optimizing = false; - return true; + optimizing = false; + return true; } -void MultiMarkerBundle::_MeasurementsAdd(MarkerIterator &begin, MarkerIterator &end, const Pose& camera_pose) { - camera_poses.push_back(camera_pose); - int frame_no = camera_poses.size()-1; - //cout<<"Adding measurements for frame "<GetId(); - int index = get_id_index(id); - if (index < 0) continue; - //cout<<"Id "<marker_corners_img[j]; - //cout<at(i).marker_corners_img[j].x<<" "; - } - //cout<GetId(); + int index = get_id_index(id); + if (index < 0) + continue; + // cout<<"Id "<marker_corners_img[j]; + // cout<at(i).marker_corners_img[j].x<<" "; + } + // cout<& indices) - : MultiMarker(indices) + : MultiMarker(indices) { - pointcloud_filtered = new FilterMedian[indices.size()*4*3]; - for (size_t i=0; i= filter_buffer_max) { - marker_status[get_id_index(marker_id)]=1; - } - } - } +void MultiMarkerFiltered::PointCloudAverage(int marker_id, double edge_length, + Pose& pose) +{ + if (marker_id == 0) + { + if (marker_status[get_id_index(marker_id)] == 0) + PointCloudAdd(marker_id, edge_length, pose); + } + else + { + CvPoint3D64f corners[4]; + PointCloudCorners3d(edge_length, pose, corners); + for (size_t j = 0; j < 4; ++j) + { + int id_index = get_id_index(marker_id); + if (id_index < 0) + continue; + int index = id_index * 4 * 3 + j * 3; + CvPoint3D64f p; + p.x = pointcloud_filtered[index + 0].next(corners[j].x); + p.y = pointcloud_filtered[index + 1].next(corners[j].y); + p.z = pointcloud_filtered[index + 2].next(corners[j].z); + pointcloud[pointcloud_index(marker_id, j)] = p; + // The marker isn't used for pose calculation until we are quite sure + // about it ??? + if (pointcloud_filtered[index + 0].getCurrentSize() >= filter_buffer_max) + { + marker_status[get_id_index(marker_id)] = 1; + } + } + } } -double MultiMarkerFiltered::_Update(MarkerIterator &begin, MarkerIterator &end, +double MultiMarkerFiltered::_Update(MarkerIterator& begin, MarkerIterator& end, Camera* cam, Pose& pose, IplImage* image) { - if (_GetPose(begin, end, cam, pose, NULL) == -1) return -1; + if (_GetPose(begin, end, cam, pose, NULL) == -1) + return -1; - // For every detected marker - for (MarkerIterator &i = begin.reset(); i != end; ++i) - { - const Marker* marker = *i; - int id = marker->GetId(); - int index = get_id_index(id); - if (index < 0) continue; + // For every detected marker + for (MarkerIterator& i = begin.reset(); i != end; ++i) + { + const Marker* marker = *i; + int id = marker->GetId(); + int index = get_id_index(id); + if (index < 0) + continue; - // Initialize marker pose - if (marker_status[index] == 0) - { - double cam_posed[16]; - double mar_posed[16]; + // Initialize marker pose + if (marker_status[index] == 0) + { + double cam_posed[16]; + double mar_posed[16]; - CvMat cam_mat = cvMat(4, 4, CV_64F, cam_posed); - CvMat mar_mat = cvMat(4, 4, CV_64F, mar_posed); + cv::Mat cam_mat = cvMat(4, 4, CV_64F, cam_posed); + cv::Mat mar_mat = cvMat(4, 4, CV_64F, mar_posed); - pose.GetMatrix(&cam_mat); - marker->pose.GetMatrix(&mar_mat); + pose.GetMatrix(&cam_mat); + marker->pose.GetMatrix(&mar_mat); - cvInvert(&cam_mat, &cam_mat); - cvMatMul(&cam_mat, &mar_mat, &mar_mat); + cvInvert(&cam_mat, &cam_mat); + cvMatMul(&cam_mat, &mar_mat, &mar_mat); - Pose p; - p.SetMatrix(&mar_mat); - PointCloudAverage(id, marker->GetMarkerEdgeLength(), p); - } - } + Pose p; + p.SetMatrix(&mar_mat); + PointCloudAverage(id, marker->GetMarkerEdgeLength(), p); + } + } - // When the pointcloud is updated we will get the new "better" pose... - return _GetPose(begin, end, cam, pose, image); + // When the pointcloud is updated we will get the new "better" pose... + return _GetPose(begin, end, cam, pose, image); } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/MultiMarkerInitializer.cpp b/ar_track_alvar/src/MultiMarkerInitializer.cpp index a87b294..ddb02c6 100644 --- a/ar_track_alvar/src/MultiMarkerInitializer.cpp +++ b/ar_track_alvar/src/MultiMarkerInitializer.cpp @@ -26,133 +26,169 @@ using namespace std; -namespace alvar { +namespace alvar +{ using namespace std; -MultiMarkerInitializer::MultiMarkerInitializer(std::vector& indices, int _filter_buffer_min, int _filter_buffer_max) - : MultiMarker(indices), filter_buffer_min(_filter_buffer_min) { +MultiMarkerInitializer::MultiMarkerInitializer(std::vector& indices, + int _filter_buffer_min, + int _filter_buffer_max) + : MultiMarker(indices), filter_buffer_min(_filter_buffer_min) +{ + marker_detected.resize(indices.size()); + pointcloud_filtered = new FilterMedian[indices.size() * 4 * 3]; + for (size_t i = 0; i < indices.size() * 4 * 3; i++) + { + pointcloud_filtered[i].setWindowSize(_filter_buffer_max); + } - marker_detected.resize(indices.size()); - pointcloud_filtered = new FilterMedian[indices.size()*4*3]; - for (size_t i=0; i > new_measurements; - for (MarkerIterator &i = begin.reset(); i != end; ++i) { - const Marker* marker = *i; - int index = get_id_index(marker->GetId()); - if (index == -1) continue; - MarkerMeasurement m; - m.SetId(marker->GetId()); - m.SetMarkerSize(marker->GetMarkerEdgeLength(), marker->GetRes(), marker->GetMargin()); - m.pose = marker->pose; - m.marker_corners_img = i->marker_corners_img; - new_measurements.push_back(m); - marker_detected[index] = true; - } +void MultiMarkerInitializer::MeasurementsAdd(MarkerIterator& begin, + MarkerIterator& end) +{ + // copy markers into measurements. + vector > + new_measurements; + for (MarkerIterator& i = begin.reset(); i != end; ++i) + { + const Marker* marker = *i; + int index = get_id_index(marker->GetId()); + if (index == -1) + continue; + MarkerMeasurement m; + m.SetId(marker->GetId()); + m.SetMarkerSize(marker->GetMarkerEdgeLength(), marker->GetRes(), + marker->GetMargin()); + m.pose = marker->pose; + m.marker_corners_img = i->marker_corners_img; + new_measurements.push_back(m); + marker_detected[index] = true; + } - // If we have seen the 0 marker the first time we push it into point could. - for (MarkerIterator &i = begin.reset(); i != end; ++i) { - const Marker* marker = *i; - int id = marker->GetId(); - int index = get_id_index(id); - // Initialize marker pose - //if (id == 0 && marker_status[index] == 0) - if (index == 0 && marker_status[index] == 0) - { - Pose pose; - CvPoint3D64f corners[4]; - PointCloudCorners3d(marker->GetMarkerEdgeLength(), pose, corners); - for(size_t j = 0; j < 4; ++j) { - int p_index = pointcloud_index(id, j); - pointcloud[p_index] = corners[j]; - } - marker_status[index] = 1; - } - } + // If we have seen the 0 marker the first time we push it into point could. + for (MarkerIterator& i = begin.reset(); i != end; ++i) + { + const Marker* marker = *i; + int id = marker->GetId(); + int index = get_id_index(id); + // Initialize marker pose + // if (id == 0 && marker_status[index] == 0) + if (index == 0 && marker_status[index] == 0) + { + Pose pose; + cv::Point3d corners[4]; + PointCloudCorners3d(marker->GetMarkerEdgeLength(), pose, corners); + for (size_t j = 0; j < 4; ++j) + { + int p_index = pointcloud_index(id, j); + pointcloud[p_index] = corners[j]; + } + marker_status[index] = 1; + } + } - measurements.push_back(new_measurements); + measurements.push_back(new_measurements); } -void MultiMarkerInitializer::MeasurementsReset() { - measurements.clear(); - PointCloudReset(); - fill(marker_status.begin(), marker_status.end(), 0); - fill(marker_detected.begin(), marker_detected.end(), false); +void MultiMarkerInitializer::MeasurementsReset() +{ + measurements.clear(); + PointCloudReset(); + fill(marker_status.begin(), marker_status.end(), 0); + fill(marker_detected.begin(), marker_detected.end(), false); - for (size_t i=0; i > &markers = *mi; - Pose pose; - MarkerIteratorImpl m_begin(markers.begin()); - MarkerIteratorImpl m_end(markers.end()); - double err = _GetPose(m_begin, m_end, cam, pose, NULL); - if (err >= 0) { - // If pose is found, estimate marker poses for those that are still unkown. - found_new = updateMarkerPoses(markers, pose); - } - } - } +int MultiMarkerInitializer::Initialize(Camera* cam) +{ + for (bool found_new = true; found_new;) + { + found_new = false; + // Iterate through all measurements, try to compute Pose for each. + for (MeasurementIterator mi = measurements.begin(); + mi != measurements.end(); ++mi) + { + vector >& + markers = *mi; + Pose pose; + MarkerIteratorImpl m_begin(markers.begin()); + MarkerIteratorImpl m_end(markers.end()); + + double err = _GetPose(m_begin, m_end, cam, pose); + if (err >= 0) + { + // If pose is found, estimate marker poses for those that are still + // unkown. + found_new = updateMarkerPoses(markers, pose); + } + } + } - // Check if every marker has been seen - int n_detected = 0; - for (unsigned int i = 0; i < marker_indices.size(); ++i) { - cout << i << " " << marker_detected[i] << " " << marker_status[i] << "\n"; - if (marker_detected[i] && marker_status[i] != 0) ++n_detected; - } - return n_detected; + // Check if every marker has been seen + int n_detected = 0; + for (unsigned int i = 0; i < marker_indices.size(); ++i) + { + cout << i << " " << marker_detected[i] << " " << marker_status[i] << "\n"; + if (marker_detected[i] && marker_status[i] != 0) + ++n_detected; + } + return n_detected; } -bool MultiMarkerInitializer::updateMarkerPoses(vector > &markers, const Pose &pose) { - bool found_new = false; - for (vector >::iterator i = markers.begin(); i != markers.end(); ++i) { - MarkerMeasurement &marker = *i; - int id = marker.GetId(); - int index = get_id_index(id); - if (index > 0 && !marker.globalPose) { - // Compute absolute marker pose. - double cam_posed[16]; - double mar_posed[16]; - CvMat cam_mat = cvMat(4, 4, CV_64F, cam_posed); - CvMat mar_mat = cvMat(4, 4, CV_64F, mar_posed); - pose.GetMatrix(&cam_mat); - marker.pose.GetMatrix(&mar_mat); - cvInvert(&cam_mat, &cam_mat); - cvMatMul(&cam_mat, &mar_mat, &mar_mat); - marker.pose.SetMatrix(&mar_mat); - // Put marker into point cloud - CvPoint3D64f corners[4]; - PointCloudCorners3d(marker.GetMarkerEdgeLength(), marker.pose, corners); - for(size_t j = 0; j < 4; ++j) { - CvPoint3D64f p; - int p_index = pointcloud_index(id, j); - p.x = pointcloud_filtered[3*p_index+0].next(corners[j].x); - p.y = pointcloud_filtered[3*p_index+1].next(corners[j].y); - p.z = pointcloud_filtered[3*p_index+2].next(corners[j].z); - if (pointcloud_filtered[3*p_index+0].getCurrentSize() >= filter_buffer_min) { - pointcloud[p_index] = p; - marker_status[index] = 1; - } - } - marker.globalPose = 1; - found_new = true; - } - } - return found_new; +bool MultiMarkerInitializer::updateMarkerPoses( + vector >& + markers, + const Pose& pose) +{ + bool found_new = false; + for (vector >::iterator i = + markers.begin(); + i != markers.end(); ++i) + { + MarkerMeasurement& marker = *i; + int id = marker.GetId(); + int index = get_id_index(id); + if (index > 0 && !marker.globalPose) + { + // Compute absolute marker pose. + double cam_posed[16]; + double mar_posed[16]; + cv::Mat cam_mat = cv::Mat(4, 4, CV_64F, cam_posed); + cv::Mat mar_mat = cv::Mat(4, 4, CV_64F, mar_posed); + pose.GetMatrix(cam_mat); + marker.pose.GetMatrix(mar_mat); + cam_mat = cam_mat.inv(); + mar_mat = cam_mat * mar_mat; + marker.pose.SetMatrix(mar_mat); + // Put marker into point cloud + cv::Point3d corners[4]; + PointCloudCorners3d(marker.GetMarkerEdgeLength(), marker.pose, corners); + for (size_t j = 0; j < 4; ++j) + { + cv::Point3d p; + int p_index = pointcloud_index(id, j); + p.x = pointcloud_filtered[3 * p_index + 0].next(corners[j].x); + p.y = pointcloud_filtered[3 * p_index + 1].next(corners[j].y); + p.z = pointcloud_filtered[3 * p_index + 2].next(corners[j].z); + if (pointcloud_filtered[3 * p_index + 0].getCurrentSize() >= + filter_buffer_min) + { + pointcloud[p_index] = p; + marker_status[index] = 1; + } + } + marker.globalPose = 1; + found_new = true; + } + } + return found_new; } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/Mutex.cpp b/ar_track_alvar/src/Mutex.cpp index 26cd798..50547e7 100644 --- a/ar_track_alvar/src/Mutex.cpp +++ b/ar_track_alvar/src/Mutex.cpp @@ -25,26 +25,25 @@ #include "ar_track_alvar/Mutex_private.h" -namespace alvar { - -Mutex::Mutex() - : d(new MutexPrivate()) +namespace alvar +{ +Mutex::Mutex() : d(new MutexPrivate()) { } Mutex::~Mutex() { - delete d; + delete d; } void Mutex::lock() { - return d->lock(); + return d->lock(); } void Mutex::unlock() { - return d->unlock(); + return d->unlock(); } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/Mutex_unix.cpp b/ar_track_alvar/src/Mutex_unix.cpp index 33ce33e..9d92e39 100644 --- a/ar_track_alvar/src/Mutex_unix.cpp +++ b/ar_track_alvar/src/Mutex_unix.cpp @@ -25,39 +25,37 @@ #include -namespace alvar { - +namespace alvar +{ class MutexPrivateData { public: - MutexPrivateData() - : mMutex() - { - } + MutexPrivateData() : mMutex() + { + } - pthread_mutex_t mMutex; + pthread_mutex_t mMutex; }; -MutexPrivate::MutexPrivate() - : d(new MutexPrivateData()) +MutexPrivate::MutexPrivate() : d(new MutexPrivateData()) { - pthread_mutex_init(&d->mMutex, NULL); + pthread_mutex_init(&d->mMutex, NULL); } MutexPrivate::~MutexPrivate() { - pthread_mutex_destroy(&d->mMutex); - delete d; + pthread_mutex_destroy(&d->mMutex); + delete d; } void MutexPrivate::lock() { - pthread_mutex_lock(&d->mMutex); + pthread_mutex_lock(&d->mMutex); } void MutexPrivate::unlock() { - pthread_mutex_unlock(&d->mMutex); + pthread_mutex_unlock(&d->mMutex); } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/Mutex_win.cpp b/ar_track_alvar/src/Mutex_win.cpp index b69da4a..028d741 100644 --- a/ar_track_alvar/src/Mutex_win.cpp +++ b/ar_track_alvar/src/Mutex_win.cpp @@ -25,39 +25,37 @@ #include -namespace alvar { - +namespace alvar +{ class MutexPrivateData { public: - MutexPrivateData() - : mCriticalSection() - { - } + MutexPrivateData() : mCriticalSection() + { + } - CRITICAL_SECTION mCriticalSection; + CRITICAL_SECTION mCriticalSection; }; -MutexPrivate::MutexPrivate() - : d(new MutexPrivateData()) +MutexPrivate::MutexPrivate() : d(new MutexPrivateData()) { - InitializeCriticalSection(&d->mCriticalSection); + InitializeCriticalSection(&d->mCriticalSection); } MutexPrivate::~MutexPrivate() { - DeleteCriticalSection(&d->mCriticalSection); - delete d; + DeleteCriticalSection(&d->mCriticalSection); + delete d; } void MutexPrivate::lock() { - EnterCriticalSection(&d->mCriticalSection); + EnterCriticalSection(&d->mCriticalSection); } void MutexPrivate::unlock() { - LeaveCriticalSection(&d->mCriticalSection); + LeaveCriticalSection(&d->mCriticalSection); } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/Optimization.cpp b/ar_track_alvar/src/Optimization.cpp index ea60c2a..fbbe833 100644 --- a/ar_track_alvar/src/Optimization.cpp +++ b/ar_track_alvar/src/Optimization.cpp @@ -24,284 +24,287 @@ #include "ar_track_alvar/Alvar.h" #include "ar_track_alvar/Optimization.h" #include "time.h" -#include "highgui.h" +#include #include using namespace std; -namespace alvar { - +namespace alvar +{ Optimization::Optimization(int n_params, int n_meas) { - estimate_param = 0; - J = cvCreateMat(n_meas, n_params, CV_64F); cvZero(J); - JtJ = cvCreateMat(n_params, n_params, CV_64F); cvZero(JtJ); - tmp = cvCreateMat(n_params, n_meas, CV_64F); cvZero(tmp); - W = cvCreateMat(n_meas, n_meas, CV_64F); cvZero(W); - diag = cvCreateMat(n_params, n_params, CV_64F); cvZero(diag); - err = cvCreateMat(n_meas, 1, CV_64F); cvZero(err); - delta = cvCreateMat(n_params, 1, CV_64F); cvZero(delta); - x_minus = cvCreateMat(n_params, 1, CV_64F); cvZero(x_minus); - x_plus = cvCreateMat(n_params, 1, CV_64F); cvZero(x_plus); - x_tmp1 = cvCreateMat(n_meas, 1, CV_64F); cvZero(x_tmp1); - x_tmp2 = cvCreateMat(n_meas, 1, CV_64F); cvZero(x_tmp2); - tmp_par = cvCreateMat(n_params, 1, CV_64F); cvZero(tmp_par); + estimate_param = 0; + J = cv::Mat::zeros(n_meas, n_params, CV_64F); + JtJ = cv::Mat::zeros(n_params, n_params, CV_64F); + tmp = cv::Mat::zeros(n_params, n_meas, CV_64F); + W = cv::Mat::zeros(n_meas, n_meas, CV_64F); + diag = cv::Mat::zeros(n_params, n_params, CV_64F); + err = cv::Mat::zeros(n_meas, 1, CV_64F); + delta = cv::Mat::zeros(n_params, 1, CV_64F); + x_minus = cv::Mat::zeros(n_params, 1, CV_64F); + x_plus = cv::Mat::zeros(n_params, 1, CV_64F); + x_tmp1 = cv::Mat::zeros(n_meas, 1, CV_64F); + x_tmp2 = cv::Mat::zeros(n_meas, 1, CV_64F); + tmp_par = cv::Mat::zeros(n_params, 1, CV_64F); } Optimization::~Optimization() { - cvReleaseMat(&J); - cvReleaseMat(&JtJ); - cvReleaseMat(&diag); - cvReleaseMat(&tmp); - cvReleaseMat(&W); - cvReleaseMat(&err); - cvReleaseMat(&delta); - cvReleaseMat(&x_plus); - cvReleaseMat(&x_minus); - cvReleaseMat(&x_tmp1); - cvReleaseMat(&x_tmp2); - cvReleaseMat(&tmp_par); - estimate_param = 0; + J.release(); + JtJ.release(); + diag.release(); + tmp.release(); + W.release(); + err.release(); + delta.release(); + x_plus.release(); + x_minus.release(); + x_tmp1.release(); + x_tmp2.release(); + tmp_par.release(); + estimate_param = 0; } double Optimization::CalcTukeyWeight(double residual, double c) { - //const double c = 3; // squared distance in the model tracker - double ret=0; - - if(fabs(residual) <= c) - { - double tmp = 1.0-((residual/c)*(residual/c)); - ret = ((c*c)/6.0)*(1.0-tmp*tmp*tmp); - } - else - ret = (c*c)/6.0; - - if(residual) - ret = fabs(sqrt(ret)/residual); - else - ret = 1.0; // ??? - - return ret; + // const double c = 3; // squared distance in the model tracker + double ret = 0; + + if (fabs(residual) <= c) + { + double tmp = 1.0 - ((residual / c) * (residual / c)); + ret = ((c * c) / 6.0) * (1.0 - tmp * tmp * tmp); + } + else + ret = (c * c) / 6.0; + + if (residual) + ret = fabs(sqrt(ret) / residual); + else + ret = 1.0; // ??? + + return ret; } double Optimization::CalcTukeyWeightSimple(double residual, double c) { - //const double c = 3; - double ret=0; - - double x2 = residual*residual; - if(x2cols; i++) - { - CvMat J_column; - cvGetCol(J, &J_column, i); - - cvZero(delta); - cvmSet(delta, i, 0, step); - cvAdd(x, delta, x_plus); - cvmSet(delta, i, 0, -step); - cvAdd(x, delta, x_minus); - - Estimate(x_plus, x_tmp1, estimate_param); - Estimate(x_minus, x_tmp2, estimate_param); - cvSub(x_tmp1, x_tmp2, &J_column); - cvScale(&J_column, &J_column, 1.0/(2*step)); - } + const double step = 0.001; + + J.setTo(cv::Scalar::all(0)); + for (int i = 0; i < J.cols; i++) + { + cv::Mat J_column = J.col(i); + + delta.setTo(cv::Scalar::all(0)); + delta.at(i, 0) = step; + x_plus = x + delta; + delta.at(i, 0) = -step; + x_minus = x + delta; + + Estimate(x_plus, x_tmp1, estimate_param); + Estimate(x_minus, x_tmp2, estimate_param); + J_column = x_tmp1 - x_tmp2; + J_column = J_column * 1.0 / (2 * step); + } } - -double Optimization::Optimize(CvMat* parameters, // Initial values are set - CvMat* measurements, // Some observations - double stop, - int max_iter, - EstimateCallback Estimate, - void *param, - OptimizeMethod method, - CvMat* parameters_mask, // Mask indicating non-constant parameters) - CvMat* J_mat, - CvMat* weights) +double Optimization::Optimize( + cv::Mat& parameters, // Initial values are set + cv::Mat& measurements, // Some observations + double stop, int max_iter, EstimateCallback Estimate, void* param, + OptimizeMethod method, + const cv::Mat& parameters_mask, // Mask indicating non-constant parameters) + const cv::Mat& J_mat, const cv::Mat& weights) { - - int n_params = parameters->rows; - int n_meas = measurements->rows; - double error_new = 0; - double error_old = 0; - double n1, n2; - int cntr = 0; - estimate_param = param; - lambda = 0.001; - - while(true) - { - if(!J_mat) - CalcJacobian(parameters, J, Estimate); - else - J = J_mat; - - // Zero the columns for constant parameters - // TODO: Make this into a J-sized mask matrix before the iteration loop - if(parameters_mask) - for (int i=0; irows; i++) { - if (cvGet2D(parameters_mask, i, 0).val[0] == 0) { - CvRect rect; - rect.height = J->rows; rect.width = 1; - rect.y = 0; rect.x = i; - CvMat foo; - cvGetSubRect(J, &foo, rect); - cvZero(&foo); - } - } - - Estimate(parameters, x_tmp1, estimate_param); - cvSub(measurements, x_tmp1, err); // err = residual - error_old = cvNorm(err, 0, CV_L2); - - switch(method) - { - case (GAUSSNEWTON) : - - cvMulTransposed(J, JtJ, 1); - cvInv(JtJ, JtJ, CV_SVD); - cvGEMM(JtJ, J, 1.0, 0, 0, tmp, CV_GEMM_B_T); // inv(JtJ)Jt - cvMatMul(tmp, err, delta); - cvAdd(delta, parameters, parameters); - - // Lopetusehto - n1 = cvNorm(delta); - n2 = cvNorm(parameters); - - if( ((n1/n2) < stop) || - (cntr >= max_iter) ) - goto end; - - break; - - case (LEVENBERGMARQUARDT) : - - cvSetIdentity(diag, cvRealScalar(lambda)); - - if(weights) - for(int k = 0; k < W->rows; ++k) - cvmSet(W, k, k, weights->data.db[k]); - - // JtWJ - if(weights) - { - cvGEMM(J, W, 1, 0, 0, tmp, CV_GEMM_A_T); - cvGEMM(tmp, J, 1, 0, 0, JtJ, 0); - } - else - cvMulTransposed(J, JtJ, 1); - - // JtJ + lambda*I - // or JtWJ + lambda*I if weights are used... - cvAdd(JtJ, diag, JtJ); - cvInv(JtJ, JtJ, CV_SVD); - cvGEMM(JtJ, J, 1.0, 0, 0, tmp, CV_GEMM_B_T); - - if(weights) - cvGEMM(tmp, W, 1, 0, 0, tmp, 0); - - cvMatMul(tmp, err, delta); - cvAdd(delta, parameters, tmp_par); - - Estimate(tmp_par, x_tmp1, estimate_param); - cvSub(measurements, x_tmp1, err); - - error_new = cvNorm(err, 0, CV_L2); - - if(error_new < error_old) - { - cvCopy(tmp_par, parameters); - lambda = lambda/10.0; - } - else - { - lambda = lambda*10.0; - } - if(lambda>10) lambda = 10; - if(lambda<0.00001) lambda = 0.00001; - - n1 = cvNorm(delta); - n2 = cvNorm(parameters); - - if( (n1/n2) < stop || - (cntr >= max_iter) ) - { - goto end; - } - - break; - - case (TUKEY_LM) : - - cvSetIdentity(diag, cvRealScalar(lambda)); - - // Tukey weights - for(int k = 0; k < W->rows; ++k) - { - if(weights) // If using weight vector - if(weights->data.db[k] != -1.0) // If true weight given - cvmSet(W, k, k, weights->data.db[k]); // Use given weight - else - cvmSet(W, k, k, CalcTukeyWeight(err->data.db[k], 3)); // otherwise use Tukey weight - else - cvmSet(W, k, k, CalcTukeyWeight(err->data.db[k], 3)); // Otherwise use Tukey weight - } - - cvGEMM(J, W, 1, 0, 0, tmp, CV_GEMM_A_T); - cvGEMM(tmp, J, 1, 0, 0, JtJ, 0); - cvAdd(JtJ, diag, JtJ); - cvInv(JtJ, JtJ, CV_SVD); - cvGEMM(JtJ, J, 1.0, 0, 0, tmp, CV_GEMM_B_T); - cvGEMM(tmp, W, 1, 0, 0, tmp, 0); - cvMatMul(tmp, err, delta); - cvAdd(delta, parameters, tmp_par); - - Estimate(tmp_par, x_tmp1, estimate_param); - cvSub(measurements, x_tmp1, err); - - error_new = cvNorm(err, 0, CV_L2); - - if(error_new < error_old) - { - cvCopy(tmp_par, parameters); - lambda = lambda/10.0; - } - else - { - lambda = lambda*10.0; - } - if(lambda>10) lambda = 10; - if(lambda<0.00001) lambda = 0.00001; - - n1 = cvNorm(delta); - n2 = cvNorm(parameters); - - if( ((n1/n2) < stop) || - (cntr >= max_iter) ) - { - goto end; - } - - break; - } - ++cntr; - } - -end : - - return error_old; + int n_params = parameters.rows; + int n_meas = measurements.rows; + double error_new = 0; + double error_old = 0; + double n1, n2; + int cntr = 0; + estimate_param = param; + lambda = 0.001; + + while (true) + { + if (J_mat.empty()) + CalcJacobian(parameters, J, Estimate); + else + J = J_mat; + + // Zero the columns for constant parameters + // TODO: Make this into a J-sized mask matrix before the iteration loop + if (!parameters_mask.empty()) + for (int i = 0; i < parameters_mask.rows; i++) + { + if (parameters_mask.at(i, 0) == 0) + { + cv::Rect rect; + rect.height = J.rows; + rect.width = 1; + rect.y = 0; + rect.x = i; + cv::Mat foo = J(rect); + foo.setTo(cv::Scalar::all(0)); + } + } + + Estimate(parameters, x_tmp1, estimate_param); + err = measurements - x_tmp1; // err = residual + error_old = cv::norm(err, cv::NORM_L2); + + switch (method) + { + case (GAUSSNEWTON): + + cv::mulTransposed(J, JtJ, true); + cv::invert(JtJ, JtJ, cv::DECOMP_SVD); + cv::gemm(JtJ, J, 1.0, 0, 0, tmp, cv::GEMM_2_T); // inv(JtJ)Jt + + delta = tmp * err; + parameters = delta + parameters; + + // Lopetusehto + n1 = cv::norm(delta); + n2 = cv::norm(parameters); + + if (((n1 / n2) < stop) || (cntr >= max_iter)) + goto end; + + break; + + case (LEVENBERGMARQUARDT): + cv::setIdentity(diag, cv::Scalar(lambda)); + + if (!weights.empty()) + for (int k = 0; k < W.rows; ++k) + W.at(k, k) = weights.at(k); + + // JtWJ + if (!weights.empty()) + { + cv::gemm(J, W, 1, 0, 0, tmp, cv::GEMM_1_T); + cv::gemm(tmp, J, 1, 0, 0, JtJ, 0); + } + else + cv::mulTransposed(J, JtJ, true); + + // JtJ + lambda*I + // or JtWJ + lambda*I if weights are used... + JtJ = JtJ + diag; + cv::invert(JtJ, JtJ, cv::DECOMP_SVD); + cv::gemm(JtJ, J, 1.0, 0, 0, tmp, cv::GEMM_2_T); + + if (!weights.empty()) + cv::gemm(tmp, W, 1, 0, 0, tmp, 0); + + delta = tmp * err; + tmp_par = delta * parameters; + + Estimate(tmp_par, x_tmp1, estimate_param); + err = measurements - x_tmp1; + + error_new = cv::norm(err, cv::NORM_L2); + + if (error_new < error_old) + { + tmp_par.copyTo(parameters); + lambda = lambda / 10.0; + } + else + { + lambda = lambda * 10.0; + } + if (lambda > 10) + lambda = 10; + if (lambda < 0.00001) + lambda = 0.00001; + + n1 = cv::norm(delta); + n2 = cv::norm(parameters); + + if ((n1 / n2) < stop || (cntr >= max_iter)) + { + goto end; + } + + break; + + case (TUKEY_LM): + + cv::setIdentity(diag, cv::Scalar(lambda)); + + // Tukey weights + for (int k = 0; k < W.rows; ++k) + { + if (!weights.empty()) // If using weight vector + if (weights.at(k) != -1.0) // If true weight given + W.at(k, k) = weights.at(k); // Use given weight + else + W.at(k, k) = CalcTukeyWeight( + err.at(k), 3); // otherwise use Tukey weight + else + W.at(k, k) = CalcTukeyWeight( + err.at(k), 3); // Otherwise use Tukey weight + } + + cv::gemm(J, W, 1, 0, 0, tmp, cv::GEMM_1_T); + cv::gemm(tmp, J, 1, 0, 0, JtJ, 0); + JtJ = JtJ * diag; + cv::invert(JtJ, JtJ, cv::DECOMP_SVD); + cv::gemm(JtJ, J, 1.0, 0, 0, tmp, cv::GEMM_2_T); + cv::gemm(tmp, W, 1, 0, 0, tmp, 0); + delta = tmp * err; + tmp_par = delta * parameters; + + Estimate(tmp_par, x_tmp1, estimate_param); + err = measurements - x_tmp1; + + error_new = cv::norm(err, cv::NORM_L2); + + if (error_new < error_old) + { + tmp_par.copyTo(parameters); + lambda = lambda / 10.0; + } + else + { + lambda = lambda * 10.0; + } + if (lambda > 10) + lambda = 10; + if (lambda < 0.00001) + lambda = 0.00001; + + n1 = cv::norm(delta); + n2 = cv::norm(parameters); + + if (((n1 / n2) < stop) || (cntr >= max_iter)) + { + goto end; + } + + break; + } + ++cntr; + } + +end: + + return error_old; } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/Platform.cpp b/ar_track_alvar/src/Platform.cpp index ab8860c..61eac27 100644 --- a/ar_track_alvar/src/Platform.cpp +++ b/ar_track_alvar/src/Platform.cpp @@ -28,46 +28,50 @@ #include #include #ifdef WIN32 - #include +#include #else - #include +#include #endif -namespace alvar { - -void errorAtLine(int status, int error, const char *filename, - unsigned int line, const char *format, ...) +namespace alvar +{ +void errorAtLine(int status, int error, const char* filename, unsigned int line, + const char* format, ...) { - fflush(stdout); - if (filename) { - fprintf(stderr, "%s:%d: ", filename, line); - } - if (format) { - va_list args; - va_start(args, format); - vfprintf(stderr, format, args); - va_end(args); - } - if (error) { - fprintf(stderr, ": %s", strerror(error)); - } - fprintf(stderr, "\n"); - fflush(stderr); - if (status) { - exit(status); - } + fflush(stdout); + if (filename) + { + fprintf(stderr, "%s:%d: ", filename, line); + } + if (format) + { + va_list args; + va_start(args, format); + vfprintf(stderr, format, args); + va_end(args); + } + if (error) + { + fprintf(stderr, ": %s", strerror(error)); + } + fprintf(stderr, "\n"); + fflush(stderr); + if (status) + { + exit(status); + } } void sleep(unsigned long milliseconds) { - #ifdef WIN32 - Sleep(milliseconds); - #else - struct timespec t; - t.tv_sec = 0; - t.tv_nsec = 1000 * 1000 * milliseconds; - nanosleep(&t, NULL); - #endif +#ifdef WIN32 + Sleep(milliseconds); +#else + struct timespec t; + t.tv_sec = 0; + t.tv_nsec = 1000 * 1000 * milliseconds; + nanosleep(&t, NULL); +#endif } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/Plugin.cpp b/ar_track_alvar/src/Plugin.cpp index 0758b41..93a36c2 100644 --- a/ar_track_alvar/src/Plugin.cpp +++ b/ar_track_alvar/src/Plugin.cpp @@ -25,42 +25,41 @@ #include "ar_track_alvar/Plugin_private.h" -namespace alvar { - +namespace alvar +{ Plugin::Plugin(const std::string filename) - : d(new PluginPrivate()) - , mReferenceCount(new int(1)) + : d(new PluginPrivate()), mReferenceCount(new int(1)) { - d->load(filename); + d->load(filename); } -Plugin::Plugin(const Plugin &plugin) - : d(plugin.d) - , mReferenceCount(plugin.mReferenceCount) +Plugin::Plugin(const Plugin& plugin) + : d(plugin.d), mReferenceCount(plugin.mReferenceCount) { - ++*mReferenceCount; + ++*mReferenceCount; } -Plugin &Plugin::operator=(const Plugin &plugin) +Plugin& Plugin::operator=(const Plugin& plugin) { - d = plugin.d; - mReferenceCount = plugin.mReferenceCount; - ++*mReferenceCount; - return *this; + d = plugin.d; + mReferenceCount = plugin.mReferenceCount; + ++*mReferenceCount; + return *this; } Plugin::~Plugin() { - if (!--*mReferenceCount) { - d->unload(); - delete d; - delete mReferenceCount; - } + if (!--*mReferenceCount) + { + d->unload(); + delete d; + delete mReferenceCount; + } } -void *Plugin::resolve(const char *symbol) +void* Plugin::resolve(const char* symbol) { - return d->resolve(symbol); + return d->resolve(symbol); } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/Plugin_unix.cpp b/ar_track_alvar/src/Plugin_unix.cpp index 931e21e..3a383a4 100644 --- a/ar_track_alvar/src/Plugin_unix.cpp +++ b/ar_track_alvar/src/Plugin_unix.cpp @@ -29,54 +29,53 @@ #include #include -namespace alvar { - +namespace alvar +{ class PluginPrivateData { public: - PluginPrivateData() - : mHandle(NULL) - { - } - - void *mHandle; + PluginPrivateData() : mHandle(NULL) + { + } + + void* mHandle; }; -PluginPrivate::PluginPrivate() - : d(new PluginPrivateData()) +PluginPrivate::PluginPrivate() : d(new PluginPrivateData()) { } PluginPrivate::~PluginPrivate() { - delete d; + delete d; } void PluginPrivate::load(const std::string filename) { - d->mHandle = dlopen(filename.data(), RTLD_LAZY); - if (!d->mHandle) { - std::stringstream message; - message << "could not load " << filename - << ", error code " << errno; - throw AlvarException(message.str().data()); - } + d->mHandle = dlopen(filename.data(), RTLD_LAZY); + if (!d->mHandle) + { + std::stringstream message; + message << "could not load " << filename << ", error code " << errno; + throw AlvarException(message.str().data()); + } } void PluginPrivate::unload() { - dlclose(d->mHandle); + dlclose(d->mHandle); } -void *PluginPrivate::resolve(const char *symbol) +void* PluginPrivate::resolve(const char* symbol) { - void *address = (void *)dlsym(d->mHandle, symbol); - if (!address) { - std::stringstream message; - message << "could not resolve " << symbol; - throw AlvarException(message.str().data()); - } - return address; + void* address = (void*)dlsym(d->mHandle, symbol); + if (!address) + { + std::stringstream message; + message << "could not resolve " << symbol; + throw AlvarException(message.str().data()); + } + return address; } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/Plugin_win.cpp b/ar_track_alvar/src/Plugin_win.cpp index d82cfd0..85083b5 100644 --- a/ar_track_alvar/src/Plugin_win.cpp +++ b/ar_track_alvar/src/Plugin_win.cpp @@ -28,54 +28,54 @@ #include #include -namespace alvar { - +namespace alvar +{ class PluginPrivateData { public: - PluginPrivateData() - : mHandle(NULL) - { - } - - HINSTANCE mHandle; + PluginPrivateData() : mHandle(NULL) + { + } + + HINSTANCE mHandle; }; -PluginPrivate::PluginPrivate() - : d(new PluginPrivateData()) +PluginPrivate::PluginPrivate() : d(new PluginPrivateData()) { } PluginPrivate::~PluginPrivate() { - delete d; + delete d; } void PluginPrivate::load(const std::string filename) { - d->mHandle = LoadLibrary(filename.data()); - if (!d->mHandle) { - std::stringstream message; - message << "could not load " << filename - << ", error code " << GetLastError(); - throw AlvarException(message.str().data()); - } + d->mHandle = LoadLibrary(filename.data()); + if (!d->mHandle) + { + std::stringstream message; + message << "could not load " << filename << ", error code " + << GetLastError(); + throw AlvarException(message.str().data()); + } } void PluginPrivate::unload() { - FreeLibrary(d->mHandle); + FreeLibrary(d->mHandle); } -void *PluginPrivate::resolve(const char *symbol) +void* PluginPrivate::resolve(const char* symbol) { - void *address = (void *)GetProcAddress(d->mHandle, symbol); - if (!address) { - std::stringstream message; - message << "could not resolve " << symbol; - throw AlvarException(message.str().data()); - } - return address; + void* address = (void*)GetProcAddress(d->mHandle, symbol); + if (!address) + { + std::stringstream message; + message << "could not resolve " << symbol; + throw AlvarException(message.str().data()); + } + return address; } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/Pose.cpp b/ar_track_alvar/src/Pose.cpp index 6809e57..2084acc 100644 --- a/ar_track_alvar/src/Pose.cpp +++ b/ar_track_alvar/src/Pose.cpp @@ -25,158 +25,181 @@ using namespace std; -namespace alvar { +namespace alvar +{ using namespace std; -void Pose::Output() const { - cout<(3, 0) = 1; } -Pose::Pose(CvMat *mat) : Rotation(mat, MAT) { - cvInitMatHeader(&translation_mat, 4, 1, CV_64F, translation); - cvZero(&translation_mat); - cvmSet(&translation_mat, 3, 0, 1); - // Fill in translation part - if (mat->cols == 4) { - cvmSet(&translation_mat, 0, 0, cvmGet(mat, 0, 3)); - cvmSet(&translation_mat, 1, 0, cvmGet(mat, 1, 3)); - cvmSet(&translation_mat, 2, 0, cvmGet(mat, 2, 3)); - } +Pose::Pose(const cv::Mat& tra, const cv::Mat& rot, RotationType t) + : Rotation(rot, t) +{ + translation_mat = cv::Mat(4, 1, CV_64F, translation); + translation_mat.setTo(cv::Scalar::all(0)); + translation_mat.at(3, 0) = 1; + // Fill in translation part + translation_mat.at(0, 0) = tra.at(0, 0); + translation_mat.at(1, 0) = tra.at(1, 0); + translation_mat.at(2, 0) = tra.at(2, 0); } -Pose::Pose(const Pose& p) :Rotation(p) { - cvInitMatHeader(&translation_mat, 4, 1, CV_64F, translation); - cvCopy(&p.translation_mat, &translation_mat); +Pose::Pose(const cv::Mat& mat) : Rotation(mat, MAT) +{ + translation_mat = cv::Mat(4, 1, CV_64F, translation); + translation_mat.setTo(cv::Scalar::all(0)); + translation_mat.at(3, 0) = 1; + // Fill in translation part + if (mat.cols == 4) + { + translation_mat.at(0, 0) = mat.at(0, 3); + translation_mat.at(1, 0) = mat.at(1, 3); + translation_mat.at(2, 0) = mat.at(2, 3); + } +} + +Pose::Pose(const Pose& p) : Rotation(p) +{ + translation_mat = cv::Mat(4, 1, CV_64F, translation); + p.translation_mat.copyTo(translation_mat); } void Pose::Reset() { - cvZero(&quaternion_mat); cvmSet(&quaternion_mat, 0, 0, 1); - cvZero(&translation_mat); + quaternion_mat.setTo(cv::Scalar::all(0)); + quaternion_mat.at(0, 0) = 1; + translation_mat.setTo(cv::Scalar::all(0)); } -void Pose::SetMatrix(const CvMat *mat) +void Pose::SetMatrix(const cv::Mat& mat) { - double tmp[9]; - for(int i = 0; i < 3; ++i) - for(int j = 0; j < 3; ++j) - tmp[i*3+j] = cvmGet(mat, i, j); - - Mat9ToQuat(tmp, quaternion); - if (mat->cols == 4) { - cvmSet(&translation_mat, 0, 0, cvmGet(mat, 0, 3)); - cvmSet(&translation_mat, 1, 0, cvmGet(mat, 1, 3)); - cvmSet(&translation_mat, 2, 0, cvmGet(mat, 2, 3)); - cvmSet(&translation_mat, 3, 0, 1); - } -} - -void Pose::GetMatrix(CvMat *mat) const + double tmp[9]; + for (int i = 0; i < 3; ++i) + for (int j = 0; j < 3; ++j) + tmp[i * 3 + j] = mat.at(i, j); + + Mat9ToQuat(tmp, quaternion); + if (mat.cols == 4) + { + translation_mat.at(0, 0) = mat.at(0, 3); + translation_mat.at(1, 0) = mat.at(1, 3); + translation_mat.at(2, 0) = mat.at(2, 3); + translation_mat.at(3, 0) = 1; + } +} + +void Pose::GetMatrix(cv::Mat& mat) const { - if (mat->width == 3) { - QuatToMat9(quaternion, mat->data.db); - } else if (mat->width == 4) { - cvSetIdentity(mat); - QuatToMat16(quaternion, mat->data.db); - cvmSet(mat, 0, 3, cvmGet(&translation_mat, 0, 0)); - cvmSet(mat, 1, 3, cvmGet(&translation_mat, 1, 0)); - cvmSet(mat, 2, 3, cvmGet(&translation_mat, 2, 0)); - } + if (mat.cols == 3) + { + QuatToMat9(quaternion, mat.ptr(0)); + } + else if (mat.cols == 4) + { + cv::setIdentity(mat); + QuatToMat16(quaternion, mat.ptr(0)); + mat.at(0, 3) = translation_mat.at(0, 0); + mat.at(1, 3) = translation_mat.at(1, 0); + mat.at(2, 3) = translation_mat.at(2, 0); + } } void Pose::GetMatrixGL(double gl[16], bool mirror) { - if (mirror) Mirror(false, true, true); - CvMat gl_mat = cvMat(4, 4, CV_64F, gl); - GetMatrix(&gl_mat); - cvTranspose(&gl_mat, &gl_mat); - if (mirror) Mirror(false, true, true); + if (mirror) + Mirror(false, true, true); + cv::Mat gl_mat = cv::Mat(4, 4, CV_64F, gl); + GetMatrix(gl_mat); + gl_mat = gl_mat.t(); + if (mirror) + Mirror(false, true, true); } void Pose::SetMatrixGL(double gl[16], bool mirror) { - double gll[16]; - memcpy(gll, gl, sizeof(double)*16); - CvMat gl_mat = cvMat(4, 4, CV_64F, gll); - cvTranspose(&gl_mat, &gl_mat); - SetMatrix(&gl_mat); - if (mirror) Mirror(false, true, true); + double gll[16]; + memcpy(gll, gl, sizeof(double) * 16); + cv::Mat gl_mat = cv::Mat(4, 4, CV_64F, gll); + gl_mat = gl_mat.t(); + SetMatrix(gl_mat); + if (mirror) + Mirror(false, true, true); } void Pose::Transpose() { - double tmp[16]; - CvMat tmp_mat = cvMat(4, 4, CV_64F, tmp); - GetMatrix(&tmp_mat); - cvTranspose(&tmp_mat, &tmp_mat); - SetMatrix(&tmp_mat); + double tmp[16]; + cv::Mat tmp_mat = cv::Mat(4, 4, CV_64F, tmp); + GetMatrix(tmp_mat); + tmp_mat = tmp_mat.t(); + SetMatrix(tmp_mat); } void Pose::Invert() { - double tmp[16]; - CvMat tmp_mat = cvMat(4, 4, CV_64F, tmp); - GetMatrix(&tmp_mat); - cvInvert(&tmp_mat, &tmp_mat); - SetMatrix(&tmp_mat); + double tmp[16]; + cv::Mat tmp_mat = cv::Mat(4, 4, CV_64F, tmp); + GetMatrix(tmp_mat); + tmp_mat = tmp_mat.inv(); + SetMatrix(tmp_mat); } void Pose::Mirror(bool x, bool y, bool z) { - double tmp[16]; - CvMat tmp_mat = cvMat(4, 4, CV_64F, tmp); - GetMatrix(&tmp_mat); - MirrorMat(&tmp_mat, x, y, z); - SetMatrix(&tmp_mat); + double tmp[16]; + cv::Mat tmp_mat = cv::Mat(4, 4, CV_64F, tmp); + GetMatrix(tmp_mat); + MirrorMat(tmp_mat, x, y, z); + SetMatrix(tmp_mat); } -void Pose::SetTranslation(const CvMat *tra) { - cvmSet(&translation_mat, 0, 0, cvmGet(tra, 0, 0)); - cvmSet(&translation_mat, 1, 0, cvmGet(tra, 1, 0)); - cvmSet(&translation_mat, 2, 0, cvmGet(tra, 2, 0)); - cvmSet(&translation_mat, 3, 0, 1); +void Pose::SetTranslation(const cv::Mat& tra) +{ + translation_mat.at(0, 0) = tra.at(0, 0); + translation_mat.at(1, 0) = tra.at(1, 0); + translation_mat.at(2, 0) = tra.at(2, 0); + translation_mat.at(3, 0) = 1; } -void Pose::SetTranslation(const double *tra) { - translation[0] = tra[0]; - translation[1] = tra[1]; - translation[2] = tra[2]; - translation[3] = 1; +void Pose::SetTranslation(const double* tra) +{ + translation[0] = tra[0]; + translation[1] = tra[1]; + translation[2] = tra[2]; + translation[3] = 1; } -void Pose::SetTranslation(const double x, const double y, const double z) { - translation[0] = x; - translation[1] = y; - translation[2] = z; - translation[3] = 1; +void Pose::SetTranslation(const double x, const double y, const double z) +{ + translation[0] = x; + translation[1] = y; + translation[2] = z; + translation[3] = 1; } -void Pose::GetTranslation( CvMat *tra) const{ - cvmSet(tra, 0, 0, cvmGet(&translation_mat, 0, 0)); - cvmSet(tra, 1, 0, cvmGet(&translation_mat, 1, 0)); - cvmSet(tra, 2, 0, cvmGet(&translation_mat, 2, 0)); - if (tra->rows == 4) cvmSet(tra, 3, 0, 1); +void Pose::GetTranslation(cv::Mat& tra) const +{ + tra.at(0, 0) = translation_mat.at(0, 0); + tra.at(1, 0) = translation_mat.at(1, 0); + tra.at(2, 0) = translation_mat.at(2, 0); + if (tra.rows == 4) + tra.at(3, 0) = 1; } -Pose& Pose::operator = (const Pose& p) +Pose& Pose::operator=(const Pose& p) { - memcpy(quaternion, p.quaternion, 4*sizeof(double)); - memcpy(translation, p.translation, 4*sizeof(double)); - return *this; + memcpy(quaternion, p.quaternion, 4 * sizeof(double)); + memcpy(translation, p.translation, 4 * sizeof(double)); + return *this; } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/Ransac.cpp b/ar_track_alvar/src/Ransac.cpp index bd35e28..ac479f5 100644 --- a/ar_track_alvar/src/Ransac.cpp +++ b/ar_track_alvar/src/Ransac.cpp @@ -26,13 +26,14 @@ #include #ifdef TRACE - #include +#include #endif -namespace alvar { - -RansacImpl::RansacImpl(int min_params, int max_params, - int sizeof_param, int sizeof_model) { +namespace alvar +{ +RansacImpl::RansacImpl(int min_params, int max_params, int sizeof_param, + int sizeof_model) +{ this->min_params = min_params; this->max_params = max_params; this->sizeof_param = sizeof_param; @@ -40,112 +41,131 @@ RansacImpl::RansacImpl(int min_params, int max_params, indices = NULL; samples = new void*[max_params]; - if (!samples) { + if (!samples) + { #ifdef TRACE - printf("Cound not allocate memory for %d sample pointers.", - max_params); + printf("Cound not allocate memory for %d sample pointers.", max_params); #endif } hypothesis = new char[sizeof_model]; - if (!hypothesis) { + if (!hypothesis) + { #ifdef TRACE printf("Cound not allocate %d bytes of memory for model parameters.", - sizeof_model); + sizeof_model); #endif } } -RansacImpl::~RansacImpl() { - if (samples) delete[] samples; - if (hypothesis) delete[] (char *)hypothesis; - if (indices) delete[] indices; +RansacImpl::~RansacImpl() +{ + if (samples) + delete[] samples; + if (hypothesis) + delete[](char*) hypothesis; + if (indices) + delete[] indices; } -int RansacImpl::_estimate(void* params, int param_c, - int support_limit, int max_rounds, - void* model) { - if (param_c < min_params) return 0; - - int max_support = 0; - - // Randomly search for the best model. - for (int i = 0; - i < max_rounds && max_support < support_limit; - i++) { - // 1. pick a random sample of min_params. - int sample_c; - for (sample_c = 0; sample_c < min_params; sample_c++) { - int r = rand() % (param_c-sample_c); - void* s = (char*)params + r*sizeof_param; - for (int j = 0; j < sample_c; j++) - if (s >= samples[j]) s = (char*)s + sizeof_param; - samples[sample_c] = s; - } - - // 2. create a model from the sampled parameters. - _doEstimate(samples, sample_c, hypothesis); - - // 3. count the support for the model. - int hypo_support = 0; - for (int j = 0; j < param_c; j++) { - if (_doSupports((char*)params + j*sizeof_param, hypothesis)) { - hypo_support++; - } - } +int RansacImpl::_estimate(void* params, int param_c, int support_limit, + int max_rounds, void* model) +{ + if (param_c < min_params) + return 0; + + int max_support = 0; + + // Randomly search for the best model. + for (int i = 0; i < max_rounds && max_support < support_limit; i++) + { + // 1. pick a random sample of min_params. + int sample_c; + for (sample_c = 0; sample_c < min_params; sample_c++) + { + int r = rand() % (param_c - sample_c); + void* s = (char*)params + r * sizeof_param; + for (int j = 0; j < sample_c; j++) + if (s >= samples[j]) + s = (char*)s + sizeof_param; + samples[sample_c] = s; + } + + // 2. create a model from the sampled parameters. + _doEstimate(samples, sample_c, hypothesis); + + // 3. count the support for the model. + int hypo_support = 0; + for (int j = 0; j < param_c; j++) + { + if (_doSupports((char*)params + j * sizeof_param, hypothesis)) + { + hypo_support++; + } + } #ifdef TRACE - printf("Hypothesis got %d support\n", hypo_support); + printf("Hypothesis got %d support\n", hypo_support); #endif - if (hypo_support > max_support) { - max_support = hypo_support; - memcpy(model, hypothesis, sizeof_model); - } - } + if (hypo_support > max_support) + { + max_support = hypo_support; + memcpy(model, hypothesis, sizeof_model); + } + } - return max_support; + return max_support; } -int RansacImpl::_refine(void* params, int param_c, - int support_limit, int max_rounds, - void* model, char *inlier_mask) { - if (param_c < min_params) return 0; +int RansacImpl::_refine(void* params, int param_c, int support_limit, + int max_rounds, void* model, char* inlier_mask) +{ + if (param_c < min_params) + return 0; int max_support = 0; // Iteratively make the model estimation better. - for (int i = 0; - i < max_rounds && max_support < support_limit; - i++) { + for (int i = 0; i < max_rounds && max_support < support_limit; i++) + { int sample_c = 0; // 1. Find all parameters that support the current model. - for (int j = 0; j < param_c && sample_c < max_params; j++) { - if (_doSupports((char*)params + j*sizeof_param, model)) { - samples[sample_c++] = (char*)params + j*sizeof_param; - if (inlier_mask) inlier_mask[j] = 1; - } else { - if (inlier_mask) inlier_mask[j] = 0; + for (int j = 0; j < param_c && sample_c < max_params; j++) + { + if (_doSupports((char*)params + j * sizeof_param, model)) + { + samples[sample_c++] = (char*)params + j * sizeof_param; + if (inlier_mask) + inlier_mask[j] = 1; + } + else + { + if (inlier_mask) + inlier_mask[j] = 0; } } #ifdef TRACE printf("Found %d supporting parameters\n", sample_c); #endif - if (sample_c > max_support) { + if (sample_c > max_support) + { // 2. create a model from all supporting parameters. _doEstimate(samples, sample_c, model); max_support = sample_c; - - } else { + } + else + { // until there are no new supporting parameters. break; } } - + return max_support; } /** IndexRansac version */ -RansacImpl::RansacImpl(int min_params, int max_params, int sizeof_model) { +RansacImpl::RansacImpl(int min_params, int max_params, int sizeof_model) +{ this->min_params = min_params; this->max_params = max_params; this->sizeof_param = -1; @@ -153,101 +173,117 @@ RansacImpl::RansacImpl(int min_params, int max_params, int sizeof_model) { samples = NULL; indices = new int[max_params]; hypothesis = new char[sizeof_model]; - if (!hypothesis) { + if (!hypothesis) + { #ifdef TRACE printf("Cound not allocate %d bytes of memory for model parameters.", - sizeof_model); + sizeof_model); #endif } } -int RansacImpl::_estimate(int param_c, - int support_limit, int max_rounds, - void* model) { - if (param_c < min_params) return 0; +int RansacImpl::_estimate(int param_c, int support_limit, int max_rounds, + void* model) +{ + if (param_c < min_params) + return 0; int max_support = 0; // Randomly search for the best model. - for (int i = 0; - i < max_rounds && max_support < support_limit; - i++) { - // 1. pick a random sample of min_params. - int sample_c; - for (sample_c = 0; sample_c < min_params; sample_c++) { - int r = rand() % (param_c-sample_c); - for (int j = 0; j < sample_c; j++) - if (r >= indices[j]) r++; - indices[sample_c] = r; - } + for (int i = 0; i < max_rounds && max_support < support_limit; i++) + { + // 1. pick a random sample of min_params. + int sample_c; + for (sample_c = 0; sample_c < min_params; sample_c++) + { + int r = rand() % (param_c - sample_c); + for (int j = 0; j < sample_c; j++) + if (r >= indices[j]) + r++; + indices[sample_c] = r; + } - // 2. create a model from the sampled parameters. - _doEstimate(indices, sample_c, hypothesis); + // 2. create a model from the sampled parameters. + _doEstimate(indices, sample_c, hypothesis); - // 3. count the support for the model. - int hypo_support = 0; - for (int j = 0; j < param_c; j++) { - if (_doSupports(j, hypothesis)) { - hypo_support++; - } + // 3. count the support for the model. + int hypo_support = 0; + for (int j = 0; j < param_c; j++) + { + if (_doSupports(j, hypothesis)) + { + hypo_support++; } + } #ifdef TRACE - printf("Hypothesis got %d support\n", hypo_support); + printf("Hypothesis got %d support\n", hypo_support); #endif - if (hypo_support > max_support) { - max_support = hypo_support; - memcpy(model, hypothesis, sizeof_model); - } + if (hypo_support > max_support) + { + max_support = hypo_support; + memcpy(model, hypothesis, sizeof_model); + } } return max_support; } -int RansacImpl::_refine(int param_c, - int support_limit, int max_rounds, - void* model, char *inlier_mask) { - if (param_c < min_params) return 0; +int RansacImpl::_refine(int param_c, int support_limit, int max_rounds, + void* model, char* inlier_mask) +{ + if (param_c < min_params) + return 0; int max_support = 0; // Iteratively make the model estimation better. - for (int i = 0; - i < max_rounds && max_support < support_limit; - i++) { + for (int i = 0; i < max_rounds && max_support < support_limit; i++) + { int sample_c = 0; // 1. Find all parameters that support the current model. - for (int j = 0; j < param_c && sample_c < max_params; j++) { - if (_doSupports(j, model)) { - indices[sample_c++] = j; - if (inlier_mask) inlier_mask[j] = 1; - } else { - if (inlier_mask) inlier_mask[j] = 0; + for (int j = 0; j < param_c && sample_c < max_params; j++) + { + if (_doSupports(j, model)) + { + indices[sample_c++] = j; + if (inlier_mask) + inlier_mask[j] = 1; + } + else + { + if (inlier_mask) + inlier_mask[j] = 0; } } #ifdef TRACE printf("Found %d supporting parameters\n", sample_c); #endif - if (sample_c < min_params) break; // indicates too few points. - if (sample_c > max_support) { + if (sample_c < min_params) + break; // indicates too few points. + if (sample_c > max_support) + { // 2. create a model from all supporting parameters. _doEstimate(indices, sample_c, model); max_support = sample_c; - - } else { + } + else + { // until there are no new supporting parameters. break; } } - + return max_support; } /** public methods */ int RansacImpl::estimateRequiredRounds(float success_propability, - float inlier_percentage) { - return (int) - (log(1-success_propability) / log(1-pow(inlier_percentage,3))); + float inlier_percentage) +{ + return (int)(log(1 - success_propability) / + log(1 - pow(inlier_percentage, 3))); } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/Rotation.cpp b/ar_track_alvar/src/Rotation.cpp index bc96009..3e89b4e 100644 --- a/ar_track_alvar/src/Rotation.cpp +++ b/ar_track_alvar/src/Rotation.cpp @@ -23,363 +23,375 @@ #include "ar_track_alvar/Alvar.h" #include "ar_track_alvar/Rotation.h" +#include using namespace std; -namespace alvar { +namespace alvar +{ using namespace std; Rotation::Rotation() { - cvInitMatHeader(&quaternion_mat, 4, 1, CV_64F, quaternion); - Reset(); + quaternion_mat = cv::Mat(4, 1, CV_64F, quaternion); + Reset(); } -Rotation::Rotation(const Rotation& r) { - cvInitMatHeader(&quaternion_mat, 4, 1, CV_64F, quaternion); - cvCopy(&r.quaternion_mat, &quaternion_mat); +Rotation::Rotation(const Rotation& r) +{ + quaternion_mat = cv::Mat(4, 1, CV_64F); + r.quaternion_mat.copyTo(quaternion_mat); } -Rotation::Rotation(CvMat *data, RotationType t) +Rotation::Rotation(const cv::Mat& data, RotationType t) { - cvInitMatHeader(&quaternion_mat, 4, 1, CV_64F, quaternion); - - Reset(); - - switch (t) - { - case QUAT : - SetQuaternion(data); - break; - case MAT : - SetMatrix(data); - break; - case EUL : - SetEuler(data); - break; - case ROD : - SetRodriques(data); - break; - } + quaternion_mat = cv::Mat(4, 1, CV_64F, quaternion); + + Reset(); + + switch (t) + { + case QUAT: + SetQuaternion(data); + break; + case MAT: + SetMatrix(data); + break; + case EUL: + SetEuler(data); + break; + case ROD: + SetRodriques(data); + break; + } } void Rotation::Transpose() { - double tmp[9]; - CvMat tmp_mat = cvMat(3, 3, CV_64F, tmp); - GetMatrix(&tmp_mat); - cvTranspose(&tmp_mat, &tmp_mat); - SetMatrix(&tmp_mat); + double tmp[9]; + cv::Mat tmp_mat = cv::Mat(3, 3, CV_64F, tmp); + GetMatrix(tmp_mat); + cv::transpose(tmp_mat, tmp_mat); + SetMatrix(tmp_mat); } - -void Rotation::MirrorMat(CvMat *mat, bool x, bool y, bool z) { - CvMat *mat_mul = cvCloneMat(mat); - cvSetIdentity(mat_mul); - if (x) cvmSet(mat_mul, 0, 0, -1); - if (y) cvmSet(mat_mul, 1, 1, -1); - if (z) cvmSet(mat_mul, 2, 2, -1); - cvMatMul(mat_mul, mat, mat); - cvReleaseMat(&mat_mul); + +void Rotation::MirrorMat(cv::Mat& mat, bool x, bool y, bool z) +{ + cv::Mat mat_mul = mat.clone(); + cv::setIdentity(mat_mul); + if (x) + mat_mul.at(0, 0) = -1; + if (y) + mat_mul.at(1, 1) = -1; + if (z) + mat_mul.at(2, 2) = -1; + mat = mat_mul * mat; + mat_mul.release(); } - + void Rotation::Mirror(bool x, bool y, bool z) { - double tmp[9]; - CvMat tmp_mat = cvMat(3, 3, CV_64F, tmp); - GetMatrix(&tmp_mat); - MirrorMat(&tmp_mat, x, y, z); - SetMatrix(&tmp_mat); + double tmp[9]; + cv::Mat tmp_mat = cv::Mat(3, 3, CV_64F, tmp); + GetMatrix(tmp_mat); + MirrorMat(tmp_mat, x, y, z); + SetMatrix(tmp_mat); } void Rotation::Reset() { - cvZero(&quaternion_mat); cvmSet(&quaternion_mat, 0, 0, 1); + quaternion_mat.setTo(cv::Scalar::all(0)); + quaternion_mat.at(0, 0) = 1; } -void Rotation::Mat9ToRod(double *mat, double *rod) +void Rotation::Mat9ToRod(double* mat, double* rod) { - CvMat mat_m, rod_m; - cvInitMatHeader(&mat_m, 3, 3, CV_64F, mat); - cvInitMatHeader(&rod_m, 3, 1, CV_64F, rod); - cvRodrigues2(&mat_m, &rod_m); + cv::Mat mat_m, rod_m; + mat_m = cv::Mat(3, 3, CV_64F, mat); + rod_m = cv::Mat(3, 1, CV_64F, rod); + cv::Rodrigues(mat_m, rod_m); } -void Rotation::RodToMat9(double *rod, double *mat) +void Rotation::RodToMat9(const double* rod, double* mat) { - CvMat mat_m, rod_m; - cvInitMatHeader(&mat_m, 3, 3, CV_64F, mat); - cvInitMatHeader(&rod_m, 3, 1, CV_64F, rod); - cvRodrigues2(&rod_m, &mat_m, 0); + cv::Mat mat_m, rod_m; + mat_m = cv::Mat(3, 3, CV_64F, mat); + rod_m = cv::Mat(3, 1, CV_64F, const_cast(rod)); + cv::Rodrigues(rod_m, mat_m); } -void Rotation::QuatInv(const double *q, double *qi) +void Rotation::QuatInv(const double* q, double* qi) { - qi[0] = q[0]; - qi[1] = -1*q[1]; - qi[2] = -1*q[2]; - qi[3] = -1*q[3]; + qi[0] = q[0]; + qi[1] = -1 * q[1]; + qi[2] = -1 * q[2]; + qi[3] = -1 * q[3]; } -void Rotation::QuatNorm(double *q) +void Rotation::QuatNorm(double* q) { - double l = sqrt(q[0]*q[0] + q[1]*q[1] + q[2]*q[2] + q[3]*q[3] ); - - if(l != 0) - for(unsigned i = 0; i < 4; ++i) - q[i] = q[i] / l; + double l = sqrt(q[0] * q[0] + q[1] * q[1] + q[2] * q[2] + q[3] * q[3]); + + if (l != 0) + for (unsigned i = 0; i < 4; ++i) + q[i] = q[i] / l; } -void Rotation::QuatMul(const double *q1, const double *q2, double *q3) +void Rotation::QuatMul(const double* q1, const double* q2, double* q3) { - double w1 = q1[0]; - double x1 = q1[1]; - double y1 = q1[2]; - double z1 = q1[3]; - - double w2 = q2[0]; - double x2 = q2[1]; - double y2 = q2[2]; - double z2 = q2[3]; - - q3[0] = w1*w2 - x1*x2 - y1*y2 - z1*z2; - q3[1] = w1*x2 + x1*w2 + y1*z2 - z1*y2; - q3[2] = w1*y2 + y1*w2 + z1*x2 - x1*z2; - q3[3] = w1*z2 + z1*w2 + x1*y2 - y1*x2; - - QuatNorm(q3); + double w1 = q1[0]; + double x1 = q1[1]; + double y1 = q1[2]; + double z1 = q1[3]; + + double w2 = q2[0]; + double x2 = q2[1]; + double y2 = q2[2]; + double z2 = q2[3]; + + q3[0] = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2; + q3[1] = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2; + q3[2] = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2; + q3[3] = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2; + + QuatNorm(q3); } -void Rotation::QuatToMat9(const double *quat, double *mat) +void Rotation::QuatToMat9(const double* quat, double* mat) { - double W = quat[0]; - double X = quat[1]; - double Y = quat[2]; - double Z = quat[3]; - - double xx = X * X; - double xy = X * Y; - double xz = X * Z; - double xw = X * W; - - double yy = Y * Y; - double yz = Y * Z; - double yw = Y * W; - - double zz = Z * Z; - double zw = Z * W; - - mat[0] = 1 - 2 * ( yy + zz ); //(0,0) - mat[1] = 2 * ( xy - zw ); //(0,1) - mat[2] = 2 * ( xz + yw ); //(0,2) - - mat[3] = 2 * ( xy + zw ); //(1,0) - mat[4] = 1 - 2 * ( xx + zz ); //(1,1) - mat[5] = 2 * ( yz - xw ); //(1,2) - - mat[6] = 2 * ( xz - yw ); //(2,0) - mat[7] = 2 * ( yz + xw ); //(2,1) - mat[8] = 1 - 2 * ( xx + yy ); //(2,2) + double W = quat[0]; + double X = quat[1]; + double Y = quat[2]; + double Z = quat[3]; + + double xx = X * X; + double xy = X * Y; + double xz = X * Z; + double xw = X * W; + + double yy = Y * Y; + double yz = Y * Z; + double yw = Y * W; + + double zz = Z * Z; + double zw = Z * W; + + mat[0] = 1 - 2 * (yy + zz); //(0,0) + mat[1] = 2 * (xy - zw); //(0,1) + mat[2] = 2 * (xz + yw); //(0,2) + + mat[3] = 2 * (xy + zw); //(1,0) + mat[4] = 1 - 2 * (xx + zz); //(1,1) + mat[5] = 2 * (yz - xw); //(1,2) + + mat[6] = 2 * (xz - yw); //(2,0) + mat[7] = 2 * (yz + xw); //(2,1) + mat[8] = 1 - 2 * (xx + yy); //(2,2) } -// TODO: Now we don't want to eliminate the translation part from the matrix here. Did this change break something??? -void Rotation::QuatToMat16(const double *quat, double *mat) +// TODO: Now we don't want to eliminate the translation part from the matrix +// here. Did this change break something??? +void Rotation::QuatToMat16(const double* quat, double* mat) { - //memset(mat, 0, 16*sizeof(double)); + // memset(mat, 0, 16*sizeof(double)); - double W = quat[0]; - double X = quat[1]; - double Y = quat[2]; - double Z = quat[3]; + double W = quat[0]; + double X = quat[1]; + double Y = quat[2]; + double Z = quat[3]; - double xx = X * X; - double xy = X * Y; - double xz = X * Z; - double xw = X * W; + double xx = X * X; + double xy = X * Y; + double xz = X * Z; + double xw = X * W; - double yy = Y * Y; - double yz = Y * Z; - double yw = Y * W; + double yy = Y * Y; + double yz = Y * Z; + double yw = Y * W; - double zz = Z * Z; - double zw = Z * W; + double zz = Z * Z; + double zw = Z * W; - mat[0] = 1 - 2 * ( yy + zz ); //(0,0) - mat[1] = 2 * ( xy - zw ); //(0,1) - mat[2] = 2 * ( xz + yw ); //(0,2) + mat[0] = 1 - 2 * (yy + zz); //(0,0) + mat[1] = 2 * (xy - zw); //(0,1) + mat[2] = 2 * (xz + yw); //(0,2) - mat[4] = 2 * ( xy + zw ); //(1,0) - mat[5] = 1 - 2 * ( xx + zz ); //(1,1) - mat[6] = 2 * ( yz - xw ); //(1,2) + mat[4] = 2 * (xy + zw); //(1,0) + mat[5] = 1 - 2 * (xx + zz); //(1,1) + mat[6] = 2 * (yz - xw); //(1,2) - mat[8] = 2 * ( xz - yw ); //(2,0) - mat[9] = 2 * ( yz + xw ); //(2,1) - mat[10] = 1 - 2 * ( xx + yy ); //(2,2) + mat[8] = 2 * (xz - yw); //(2,0) + mat[9] = 2 * (yz + xw); //(2,1) + mat[10] = 1 - 2 * (xx + yy); //(2,2) - //mat[15] = 1; + // mat[15] = 1; } -void Rotation::QuatToEul(const double *q, double *eul) +void Rotation::QuatToEul(const double* q, double* eul) { - double qw = q[0]; - double qx = q[1]; - double qy = q[2]; - double qz = q[3]; - - double heading = 0, bank = 0, attitude = 0; - - if ((2*qx*qy + 2*qz*qw) == 1.0) - { - heading = 2 * atan2(qx,qw); - bank = 0; - } - else if ((2*qx*qy + 2*qz*qw) == -1.0) - { - heading = -2 * atan2(qx,qw); - bank = 0; - } - else - { - heading = atan2(2*qy*qw-2*qx*qz , 1 - 2*qy*qy - 2*qz*qz); - bank = atan2(2*qx*qw-2*qy*qz , 1 - 2*qx*qx - 2*qz*qz); - } - - attitude = asin(2*qx*qy + 2*qz*qw); - - heading = 180.0 * heading / PI; - attitude = 180.0 * attitude / PI; - bank = 180.0 * bank / PI; - - eul[0] = heading; - eul[1] = attitude; - eul[2] = bank; + double qw = q[0]; + double qx = q[1]; + double qy = q[2]; + double qz = q[3]; + + double heading = 0, bank = 0, attitude = 0; + + if ((2 * qx * qy + 2 * qz * qw) == 1.0) + { + heading = 2 * atan2(qx, qw); + bank = 0; + } + else if ((2 * qx * qy + 2 * qz * qw) == -1.0) + { + heading = -2 * atan2(qx, qw); + bank = 0; + } + else + { + heading = atan2(2 * qy * qw - 2 * qx * qz, 1 - 2 * qy * qy - 2 * qz * qz); + bank = atan2(2 * qx * qw - 2 * qy * qz, 1 - 2 * qx * qx - 2 * qz * qz); + } + + attitude = asin(2 * qx * qy + 2 * qz * qw); + + heading = 180.0 * heading / PI; + attitude = 180.0 * attitude / PI; + bank = 180.0 * bank / PI; + + eul[0] = heading; + eul[1] = attitude; + eul[2] = bank; } -void Rotation::Mat9ToQuat(const double *mat, double *quat) +void Rotation::Mat9ToQuat(const double* mat, double* quat) { - quat[0] = sqrt(max(0., 1 + mat[0] + mat[4] + mat[8])) / 2.0; // w - quat[1] = sqrt(max(0., 1 + mat[0] - mat[4] - mat[8])) / 2.0; // x - quat[2] = sqrt(max(0., 1 - mat[0] + mat[4] - mat[8])) / 2.0; // y - quat[3] = sqrt(max(0., 1 - mat[0] - mat[4] + mat[8])) / 2.0; // z + quat[0] = sqrt(max(0., 1 + mat[0] + mat[4] + mat[8])) / 2.0; // w + quat[1] = sqrt(max(0., 1 + mat[0] - mat[4] - mat[8])) / 2.0; // x + quat[2] = sqrt(max(0., 1 - mat[0] + mat[4] - mat[8])) / 2.0; // y + quat[3] = sqrt(max(0., 1 - mat[0] - mat[4] + mat[8])) / 2.0; // z - quat[1] = quat[1]*Sign(mat[7] - mat[5]); // x - quat[2] = quat[2]*Sign(mat[2] - mat[6]); // y - quat[3] = quat[3]*Sign(mat[3] - mat[1]); // z + quat[1] = quat[1] * Sign(mat[7] - mat[5]); // x + quat[2] = quat[2] * Sign(mat[2] - mat[6]); // y + quat[3] = quat[3] * Sign(mat[3] - mat[1]); // z - QuatNorm(quat); + QuatNorm(quat); } -void Rotation::EulToQuat(const double *eul, double *quat) +void Rotation::EulToQuat(const double* eul, double* quat) { - double heading = PI*eul[0]/180.0; - double attitude = PI*eul[1]/180.0; - double bank = PI*eul[2]/180.0; - - double c1 = cos(heading/2.0); - double s1 = sin(heading/2.0); - double c2 = cos(attitude/2.0); - double s2 = sin(attitude/2.0); - double c3 = cos(bank/2.0); - double s3 = sin(bank/2.0); - double c1c2 = c1*c2; - double s1s2 = s1*s2; - - quat[0] = c1c2*c3 - s1s2*s3; - quat[1] = c1c2*s3 + s1s2*c3; - quat[2] = s1*c2*c3 + c1*s2*s3; - quat[3] = c1*s2*c3 - s1*c2*s3; - - QuatNorm(quat); + double heading = PI * eul[0] / 180.0; + double attitude = PI * eul[1] / 180.0; + double bank = PI * eul[2] / 180.0; + + double c1 = cos(heading / 2.0); + double s1 = sin(heading / 2.0); + double c2 = cos(attitude / 2.0); + double s2 = sin(attitude / 2.0); + double c3 = cos(bank / 2.0); + double s3 = sin(bank / 2.0); + double c1c2 = c1 * c2; + double s1s2 = s1 * s2; + + quat[0] = c1c2 * c3 - s1s2 * s3; + quat[1] = c1c2 * s3 + s1s2 * c3; + quat[2] = s1 * c2 * c3 + c1 * s2 * s3; + quat[3] = c1 * s2 * c3 - s1 * c2 * s3; + + QuatNorm(quat); } -void Rotation::SetQuaternion(CvMat *mat) +void Rotation::SetQuaternion(const cv::Mat& mat) { - cvCopy(mat, &quaternion_mat); - QuatNorm(quaternion); + mat.copyTo(quaternion_mat); + QuatNorm(quaternion); } -void Rotation::SetQuaternion(const double *quat) +void Rotation::SetQuaternion(const double* quat) { - quaternion[0] = quat[0]; - quaternion[1] = quat[1]; - quaternion[2] = quat[2]; - quaternion[3] = quat[3]; - QuatNorm(quaternion); + quaternion[0] = quat[0]; + quaternion[1] = quat[1]; + quaternion[2] = quat[2]; + quaternion[3] = quat[3]; + QuatNorm(quaternion); } -void Rotation::SetEuler(const CvMat *mat) +void Rotation::SetEuler(const cv::Mat& mat) { - EulToQuat(mat->data.db, quaternion); + EulToQuat(mat.ptr(0), quaternion); } -void Rotation::SetRodriques(const CvMat *mat) +void Rotation::SetRodriques(const cv::Mat& mat) { - double tmp[9]; - RodToMat9(mat->data.db, tmp); - Mat9ToQuat(tmp, quaternion); + double tmp[9]; + RodToMat9(mat.ptr(0), tmp); + Mat9ToQuat(tmp, quaternion); } -void Rotation::SetMatrix(const CvMat *mat) +void Rotation::SetMatrix(const cv::Mat& mat) { - double tmp[9]; - for(int i = 0; i < 3; ++i) - for(int j = 0; j < 3; ++j) - tmp[i*3+j] = cvmGet(mat, i, j); + double tmp[9]; + for (int i = 0; i < 3; ++i) + for (int j = 0; j < 3; ++j) + tmp[i * 3 + j] = mat.at(i, j); - Mat9ToQuat(tmp, quaternion); + Mat9ToQuat(tmp, quaternion); } -void Rotation::GetMatrix(CvMat *mat) const +void Rotation::GetMatrix(cv::Mat& mat) const { - if (mat->width == 3) { - QuatToMat9(quaternion, mat->data.db); - } else if (mat->width == 4) { - cvSetIdentity(mat); - QuatToMat16(quaternion, mat->data.db); - } + if (mat.cols == 3) + { + QuatToMat9(quaternion, mat.ptr(0)); + } + else if (mat.cols == 4) + { + cv::setIdentity(mat); + QuatToMat16(quaternion, mat.ptr(0)); + } } -void Rotation::GetRodriques(CvMat *mat) const +void Rotation::GetRodriques(cv::Mat& mat) const { - double tmp[9]; - QuatToMat9(quaternion, tmp); - Mat9ToRod(tmp, mat->data.db); + double tmp[9]; + QuatToMat9(quaternion, tmp); + Mat9ToRod(tmp, mat.ptr(0)); } -void Rotation::GetEuler(CvMat *mat) const +void Rotation::GetEuler(cv::Mat& mat) const { - QuatToEul(quaternion, mat->data.db); + QuatToEul(quaternion, mat.ptr(0)); } -void Rotation::GetQuaternion(CvMat *mat) const +void Rotation::GetQuaternion(cv::Mat& mat) const { - cvCopy(&quaternion_mat, mat); + quaternion_mat.copyTo(mat); } // TODO: This is not needed??? -inline Rotation& Rotation::operator = (const Rotation& r) +inline Rotation& Rotation::operator=(const Rotation& r) { - memcpy(quaternion, r.quaternion, 4*sizeof(double)); - return *this; + memcpy(quaternion, r.quaternion, 4 * sizeof(double)); + return *this; } -inline Rotation& Rotation::operator += (const Rotation& r) +inline Rotation& Rotation::operator+=(const Rotation& r) { - Rotation res; - QuatMul(quaternion, r.quaternion, res.quaternion); - memcpy(quaternion, res.quaternion, 4*sizeof(double)); - //x += v.x; - //y += v.y; - //z += v.z; - - return *this; + Rotation res; + QuatMul(quaternion, r.quaternion, res.quaternion); + memcpy(quaternion, res.quaternion, 4 * sizeof(double)); + // x += v.x; + // y += v.y; + // z += v.z; + + return *this; } -inline Rotation operator + (const Rotation& r1, const Rotation& r2) +inline Rotation operator+(const Rotation& r1, const Rotation& r2) { - Rotation ret = r1; - ret += r2; + Rotation ret = r1; + ret += r2; - return ret; + return ret; } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/SampleCamCalib.cpp b/ar_track_alvar/src/SampleCamCalib.cpp index 5b76e11..d76f0f3 100644 --- a/ar_track_alvar/src/SampleCamCalib.cpp +++ b/ar_track_alvar/src/SampleCamCalib.cpp @@ -5,182 +5,230 @@ using namespace alvar; using namespace std; -const int calib_count_max=50; -const int etalon_rows=6; -const int etalon_columns=8; +const int calib_count_max = 50; +const int etalon_rows = 6; +const int etalon_columns = 8; std::stringstream calibrationFilename; -void videocallback(IplImage *image) +void videocallback(IplImage* image) { - static bool calibrated = false; - static int calib_count=0; - static Camera cam; - static ProjPoints pp; - static int64 prev_tick=0; - static bool initialized = false; - - if (!initialized) { - cam.SetRes(image->width, image->height); - prev_tick = cvGetTickCount(); - initialized = true; + static bool calibrated = false; + static int calib_count = 0; + static Camera cam; + static ProjPoints pp; + static int64 prev_tick = 0; + static bool initialized = false; + + if (!initialized) + { + cam.SetRes(image->width, image->height); + prev_tick = cvGetTickCount(); + initialized = true; + } + + bool flip_image = (image->origin ? true : false); + if (flip_image) + { + cvFlip(image); + image->origin = !image->origin; + } + + assert(image); + if (!calibrated) + { + // If we have already collected enough data to make the calibration + // - We are ready to end the capture loop + // - Calibrate + // - Save the calibration file + if (calib_count >= calib_count_max) + { + std::cout << "Calibrating..." << endl; + calib_count = 0; + cam.Calibrate(pp); + pp.Reset(); + cam.SaveCalib(calibrationFilename.str().c_str()); + std::cout << "Saving calibration: " << calibrationFilename.str() << endl; + calibrated = true; } - - bool flip_image = (image->origin?true:false); - if (flip_image) { - cvFlip(image); - image->origin = !image->origin; - } - - assert(image); - if (!calibrated) { - // If we have already collected enough data to make the calibration - // - We are ready to end the capture loop - // - Calibrate - // - Save the calibration file - if (calib_count >= calib_count_max) { - std::cout<<"Calibrating..."< (cvGetTickFrequency() * 1000 * 1000 * 1.5)) { - if (pp.AddPointsUsingChessboard(image, 2.8, etalon_rows, etalon_columns, true)) { - prev_tick = tick; - calib_count++; - cout< (cvGetTickFrequency() * 1000 * 1000 * 1.5)) + { + if (pp.AddPointsUsingChessboard(image, 2.8, etalon_rows, etalon_columns, + true)) + { + prev_tick = tick; + calib_count++; + cout << calib_count << "/" << calib_count_max << endl; } + } } - - if (flip_image) { - cvFlip(image); - image->origin = !image->origin; + } + else + { + if (pp.AddPointsUsingChessboard(image, 2.5, etalon_rows, etalon_columns, + true)) + { + Pose pose; + cam.CalcExteriorOrientation(pp.object_points, pp.image_points, &pose); + cam.ProjectPoints(pp.object_points, &pose, pp.image_points); + for (size_t i = 0; i < pp.image_points.size(); i++) + { + cvCircle(image, + cvPoint((int)pp.image_points[i].x, (int)pp.image_points[i].y), + 6, CV_RGB(0, 0, 255)); + } + pp.Reset(); } + } + + if (flip_image) + { + cvFlip(image); + image->origin = !image->origin; + } } -int main(int argc, char *argv[]) +int main(int argc, char* argv[]) { - try { - // Output usage message - std::string filename(argv[0]); - filename = filename.substr(filename.find_last_of('\\') + 1); - std::cout << "SampleCamCalib" << std::endl; - std::cout << "==============" << std::endl; - std::cout << std::endl; - std::cout << "Description:" << std::endl; - std::cout << " This is an example of how to use the 'Camera' and 'ProjPoints' classes" << std::endl; - std::cout << " to perform camera calibration. Point the camera to the chessboard" << std::endl; - std::cout << " calibration pattern (see ALVAR.pdf) from several directions until 50" << std::endl; - std::cout << " calibration images are collected. A 'calib.xml' file that contains the" << std::endl; - std::cout << " internal parameters of the camera is generated and can be used by other" << std::endl; - std::cout << " applications that require a calibrated camera." << std::endl; - std::cout << std::endl; - std::cout << "Usage:" << std::endl; - std::cout << " " << filename << " [device]" << std::endl; - std::cout << std::endl; - std::cout << " device integer selecting device from enumeration list (default 0)" << std::endl; - std::cout << " highgui capture devices are prefered" << std::endl; - std::cout << std::endl; - std::cout << "Keyboard Shortcuts:" << std::endl; - std::cout << " q: quit" << std::endl; - std::cout << std::endl; - - // Initialise CvTestbed - CvTestbed::Instance().SetVideoCallback(videocallback); - - // Enumerate possible capture plugins - CaptureFactory::CapturePluginVector plugins = CaptureFactory::instance()->enumeratePlugins(); - if (plugins.size() < 1) { - std::cout << "Could not find any capture plugins." << std::endl; - return 0; - } - - // Display capture plugins - std::cout << "Available Plugins: "; - outputEnumeratedPlugins(plugins); - std::cout << std::endl; + try + { + // Output usage message + std::string filename(argv[0]); + filename = filename.substr(filename.find_last_of('\\') + 1); + std::cout << "SampleCamCalib" << std::endl; + std::cout << "==============" << std::endl; + std::cout << std::endl; + std::cout << "Description:" << std::endl; + std::cout << " This is an example of how to use the 'Camera' and " + "'ProjPoints' classes" + << std::endl; + std::cout << " to perform camera calibration. Point the camera to the " + "chessboard" + << std::endl; + std::cout << " calibration pattern (see ALVAR.pdf) from several " + "directions until 50" + << std::endl; + std::cout << " calibration images are collected. A 'calib.xml' file that " + "contains the" + << std::endl; + std::cout << " internal parameters of the camera is generated and can be " + "used by other" + << std::endl; + std::cout << " applications that require a calibrated camera." + << std::endl; + std::cout << std::endl; + std::cout << "Usage:" << std::endl; + std::cout << " " << filename << " [device]" << std::endl; + std::cout << std::endl; + std::cout << " device integer selecting device from enumeration list " + "(default 0)" + << std::endl; + std::cout << " highgui capture devices are prefered" + << std::endl; + std::cout << std::endl; + std::cout << "Keyboard Shortcuts:" << std::endl; + std::cout << " q: quit" << std::endl; + std::cout << std::endl; + + // Initialise CvTestbed + CvTestbed::Instance().SetVideoCallback(videocallback); + + // Enumerate possible capture plugins + CaptureFactory::CapturePluginVector plugins = + CaptureFactory::instance()->enumeratePlugins(); + if (plugins.size() < 1) + { + std::cout << "Could not find any capture plugins." << std::endl; + return 0; + } - // Enumerate possible capture devices - CaptureFactory::CaptureDeviceVector devices = CaptureFactory::instance()->enumerateDevices(); - if (devices.size() < 1) { - std::cout << "Could not find any capture devices." << std::endl; - return 0; - } + // Display capture plugins + std::cout << "Available Plugins: "; + outputEnumeratedPlugins(plugins); + std::cout << std::endl; + + // Enumerate possible capture devices + CaptureFactory::CaptureDeviceVector devices = + CaptureFactory::instance()->enumerateDevices(); + if (devices.size() < 1) + { + std::cout << "Could not find any capture devices." << std::endl; + return 0; + } - // Check command line argument for which device to use - int selectedDevice = defaultDevice(devices); - if (argc > 1) { - selectedDevice = atoi(argv[1]); - } - if (selectedDevice >= (int)devices.size()) { - selectedDevice = defaultDevice(devices); - } - - // Display capture devices - std::cout << "Enumerated Capture Devices:" << std::endl; - outputEnumeratedDevices(devices, selectedDevice); - std::cout << std::endl; - - // Create capture object from camera - Capture *cap = CaptureFactory::instance()->createCapture(devices[selectedDevice]); - std::string uniqueName = devices[selectedDevice].uniqueName(); - - // Handle capture lifecycle and start video capture - // Note that loadSettings/saveSettings are not supported by all plugins - if (cap) { - std::stringstream settingsFilename; - settingsFilename << "camera_settings_" << uniqueName << ".xml"; - calibrationFilename << "camera_calibration_" << uniqueName << ".xml"; - - cap->start(); - cap->setResolution(640, 480); - - if (cap->loadSettings(settingsFilename.str())) { - std::cout << "Loading settings: " << settingsFilename.str() << std::endl; - } - - std::stringstream title; - title << "SampleCamCalib (" << cap->captureDevice().captureType() << ")"; - - CvTestbed::Instance().StartVideo(cap, title.str().c_str()); - - if (cap->saveSettings(settingsFilename.str())) { - std::cout << "Saving settings: " << settingsFilename.str() << std::endl; - } - - cap->stop(); - delete cap; - } - else if (CvTestbed::Instance().StartVideo(0, argv[0])) { - } - else { - std::cout << "Could not initialize the selected capture backend." << std::endl; - } + // Check command line argument for which device to use + int selectedDevice = defaultDevice(devices); + if (argc > 1) + { + selectedDevice = atoi(argv[1]); + } + if (selectedDevice >= (int)devices.size()) + { + selectedDevice = defaultDevice(devices); + } - return 0; + // Display capture devices + std::cout << "Enumerated Capture Devices:" << std::endl; + outputEnumeratedDevices(devices, selectedDevice); + std::cout << std::endl; + + // Create capture object from camera + Capture* cap = + CaptureFactory::instance()->createCapture(devices[selectedDevice]); + std::string uniqueName = devices[selectedDevice].uniqueName(); + + // Handle capture lifecycle and start video capture + // Note that loadSettings/saveSettings are not supported by all plugins + if (cap) + { + std::stringstream settingsFilename; + settingsFilename << "camera_settings_" << uniqueName << ".xml"; + calibrationFilename << "camera_calibration_" << uniqueName << ".xml"; + + cap->start(); + cap->setResolution(640, 480); + + if (cap->loadSettings(settingsFilename.str())) + { + std::cout << "Loading settings: " << settingsFilename.str() + << std::endl; + } + + std::stringstream title; + title << "SampleCamCalib (" << cap->captureDevice().captureType() << ")"; + + CvTestbed::Instance().StartVideo(cap, title.str().c_str()); + + if (cap->saveSettings(settingsFilename.str())) + { + std::cout << "Saving settings: " << settingsFilename.str() << std::endl; + } + + cap->stop(); + delete cap; } - catch (const std::exception &e) { - std::cout << "Exception: " << e.what() << endl; + else if (CvTestbed::Instance().StartVideo(0, argv[0])) + { } - catch (...) { - std::cout << "Exception: unknown" << std::endl; + else + { + std::cout << "Could not initialize the selected capture backend." + << std::endl; } + + return 0; + } + catch (const std::exception& e) + { + std::cout << "Exception: " << e.what() << endl; + } + catch (...) + { + std::cout << "Exception: unknown" << std::endl; + } } diff --git a/ar_track_alvar/src/SampleCvTestbed.cpp b/ar_track_alvar/src/SampleCvTestbed.cpp index f915dce..c88c7b7 100644 --- a/ar_track_alvar/src/SampleCvTestbed.cpp +++ b/ar_track_alvar/src/SampleCvTestbed.cpp @@ -3,128 +3,163 @@ using namespace alvar; using namespace std; -void videocallback(IplImage *image) +void videocallback(IplImage* image) { - static IplImage *img_gray=NULL; + static IplImage* img_gray = NULL; + + assert(image); + if (img_gray == NULL) + { + // Following image is toggled visible using key '0' + img_gray = + CvTestbed::Instance().CreateImageWithProto("Grayscale", image, 0, 1); + } + if (image->nChannels > 1) + { + cvCvtColor(image, img_gray, CV_RGB2GRAY); + } + else + { + cvCopy(image, img_gray); + } + // TODO: Do your image operations +} - assert(image); - if (img_gray == NULL) { - // Following image is toggled visible using key '0' - img_gray = CvTestbed::Instance().CreateImageWithProto("Grayscale", image, 0, 1); +int main(int argc, char* argv[]) +{ + try + { + // Output usage message + std::string filename(argv[0]); + filename = filename.substr(filename.find_last_of('\\') + 1); + std::cout << "SampleCvTestbed" << std::endl; + std::cout << "===============" << std::endl; + std::cout << std::endl; + std::cout << "Description:" << std::endl; + std::cout << " This is an example of how to use the 'CvTestbed', " + "'CaptureFactory' and" + << std::endl; + std::cout << " 'Capture' classes. The 'CaptureFactory' can create " + "'Capture' objects" + << std::endl; + std::cout << " from many different backends (see SampleCvTestbed.cpp). " + "You can also" + << std::endl; + std::cout << " show/hide the 1st ten images created using 'CvTestbed' " + "using the number" + << std::endl; + std::cout << " keys. In this example you can use key '0' to show/hide a " + "grayscale" + << std::endl; + std::cout << " version of the captured image." << std::endl; + std::cout << std::endl; + std::cout << "Usage:" << std::endl; + std::cout << " " << filename << " [device]" << std::endl; + std::cout << std::endl; + std::cout << " device integer selecting device from enumeration list " + "(default 0)" + << std::endl; + std::cout << " highgui capture devices are prefered" + << std::endl; + std::cout << std::endl; + std::cout << "Keyboard Shortcuts:" << std::endl; + std::cout << " 0: show/hide grayscale image" << std::endl; + std::cout << " q: quit" << std::endl; + std::cout << std::endl; + + // Initialise CvTestbed + CvTestbed::Instance().SetVideoCallback(videocallback); + + // Enumerate possible capture plugins + CaptureFactory::CapturePluginVector plugins = + CaptureFactory::instance()->enumeratePlugins(); + if (plugins.size() < 1) + { + std::cout << "Could not find any capture plugins." << std::endl; + return 0; } - if (image->nChannels > 1) { - cvCvtColor(image, img_gray, CV_RGB2GRAY); - } else { - cvCopy(image, img_gray); + + // Display capture plugins + std::cout << "Available Plugins: "; + outputEnumeratedPlugins(plugins); + std::cout << std::endl; + + // Enumerate possible capture devices + CaptureFactory::CaptureDeviceVector devices = + CaptureFactory::instance()->enumerateDevices(); + if (devices.size() < 1) + { + std::cout << "Could not find any capture devices." << std::endl; + return 0; } - // TODO: Do your image operations -} -int main(int argc, char *argv[]) -{ - try { - // Output usage message - std::string filename(argv[0]); - filename = filename.substr(filename.find_last_of('\\') + 1); - std::cout << "SampleCvTestbed" << std::endl; - std::cout << "===============" << std::endl; - std::cout << std::endl; - std::cout << "Description:" << std::endl; - std::cout << " This is an example of how to use the 'CvTestbed', 'CaptureFactory' and" << std::endl; - std::cout << " 'Capture' classes. The 'CaptureFactory' can create 'Capture' objects" << std::endl; - std::cout << " from many different backends (see SampleCvTestbed.cpp). You can also" << std::endl; - std::cout << " show/hide the 1st ten images created using 'CvTestbed' using the number" << std::endl; - std::cout << " keys. In this example you can use key '0' to show/hide a grayscale" << std::endl; - std::cout << " version of the captured image." << std::endl; - std::cout << std::endl; - std::cout << "Usage:" << std::endl; - std::cout << " " << filename << " [device]" << std::endl; - std::cout << std::endl; - std::cout << " device integer selecting device from enumeration list (default 0)" << std::endl; - std::cout << " highgui capture devices are prefered" << std::endl; - std::cout << std::endl; - std::cout << "Keyboard Shortcuts:" << std::endl; - std::cout << " 0: show/hide grayscale image" << std::endl; - std::cout << " q: quit" << std::endl; - std::cout << std::endl; - - // Initialise CvTestbed - CvTestbed::Instance().SetVideoCallback(videocallback); - - // Enumerate possible capture plugins - CaptureFactory::CapturePluginVector plugins = CaptureFactory::instance()->enumeratePlugins(); - if (plugins.size() < 1) { - std::cout << "Could not find any capture plugins." << std::endl; - return 0; - } - - // Display capture plugins - std::cout << "Available Plugins: "; - outputEnumeratedPlugins(plugins); - std::cout << std::endl; - - // Enumerate possible capture devices - CaptureFactory::CaptureDeviceVector devices = CaptureFactory::instance()->enumerateDevices(); - if (devices.size() < 1) { - std::cout << "Could not find any capture devices." << std::endl; - return 0; - } - - // Check command line argument for which device to use - int selectedDevice = defaultDevice(devices); - if (argc > 1) { - selectedDevice = atoi(argv[1]); - } - if (selectedDevice >= (int)devices.size()) { - selectedDevice = defaultDevice(devices); - } - - // Display capture devices - std::cout << "Enumerated Capture Devices:" << std::endl; - outputEnumeratedDevices(devices, selectedDevice); - std::cout << std::endl; - - // Create capture object from camera - Capture *cap = CaptureFactory::instance()->createCapture(devices[selectedDevice]); - std::string uniqueName = devices[selectedDevice].uniqueName(); - - // Handle capture lifecycle and start video capture - // Note that loadSettings/saveSettings are not supported by all plugins - if (cap) { - std::stringstream settingsFilename; - settingsFilename << "camera_settings_" << uniqueName << ".xml"; - - cap->start(); - cap->setResolution(640, 480); - - if (cap->loadSettings(settingsFilename.str())) { - std::cout << "Loading settings: " << settingsFilename.str() << std::endl; - } - - std::stringstream title; - title << "SampleCvTestbed (" << cap->captureDevice().captureType() << ")"; - - CvTestbed::Instance().StartVideo(cap, title.str().c_str()); - - if (cap->saveSettings(settingsFilename.str())) { - std::cout << "Saving settings: " << settingsFilename.str() << std::endl; - } - - cap->stop(); - delete cap; - } - else if (CvTestbed::Instance().StartVideo(0, argv[0])) { - } - else { - std::cout << "Could not initialize the selected capture backend." << std::endl; - } - - return 0; + // Check command line argument for which device to use + int selectedDevice = defaultDevice(devices); + if (argc > 1) + { + selectedDevice = atoi(argv[1]); } - catch (const std::exception &e) { - std::cout << "Exception: " << e.what() << endl; + if (selectedDevice >= (int)devices.size()) + { + selectedDevice = defaultDevice(devices); } - catch (...) { - std::cout << "Exception: unknown" << std::endl; + + // Display capture devices + std::cout << "Enumerated Capture Devices:" << std::endl; + outputEnumeratedDevices(devices, selectedDevice); + std::cout << std::endl; + + // Create capture object from camera + Capture* cap = + CaptureFactory::instance()->createCapture(devices[selectedDevice]); + std::string uniqueName = devices[selectedDevice].uniqueName(); + + // Handle capture lifecycle and start video capture + // Note that loadSettings/saveSettings are not supported by all plugins + if (cap) + { + std::stringstream settingsFilename; + settingsFilename << "camera_settings_" << uniqueName << ".xml"; + + cap->start(); + cap->setResolution(640, 480); + + if (cap->loadSettings(settingsFilename.str())) + { + std::cout << "Loading settings: " << settingsFilename.str() + << std::endl; + } + + std::stringstream title; + title << "SampleCvTestbed (" << cap->captureDevice().captureType() << ")"; + + CvTestbed::Instance().StartVideo(cap, title.str().c_str()); + + if (cap->saveSettings(settingsFilename.str())) + { + std::cout << "Saving settings: " << settingsFilename.str() << std::endl; + } + + cap->stop(); + delete cap; + } + else if (CvTestbed::Instance().StartVideo(0, argv[0])) + { } + else + { + std::cout << "Could not initialize the selected capture backend." + << std::endl; + } + + return 0; + } + catch (const std::exception& e) + { + std::cout << "Exception: " << e.what() << endl; + } + catch (...) + { + std::cout << "Exception: unknown" << std::endl; + } } diff --git a/ar_track_alvar/src/SampleFilter.cpp b/ar_track_alvar/src/SampleFilter.cpp index c575540..fd99d96 100644 --- a/ar_track_alvar/src/SampleFilter.cpp +++ b/ar_track_alvar/src/SampleFilter.cpp @@ -10,246 +10,289 @@ using namespace alvar; using namespace std; -const int res=320; +const int res = 320; -void filter_none(double x, double y, double *fx, double *fy) { - *fx = x; *fy = y; +void filter_none(double x, double y, double* fx, double* fy) +{ + *fx = x; + *fy = y; } -void filter_average(double x, double y, double *fx, double *fy) { - static FilterAverage ax(30); - static FilterAverage ay(30); - *fx = ax.next(x); - *fy = ay.next(y); +void filter_average(double x, double y, double* fx, double* fy) +{ + static FilterAverage ax(30); + static FilterAverage ay(30); + *fx = ax.next(x); + *fy = ay.next(y); } -void filter_median(double x, double y, double *fx, double *fy) { - static FilterMedian ax(30); - static FilterMedian ay(30); - *fx = ax.next(x); - *fy = ay.next(y); +void filter_median(double x, double y, double* fx, double* fy) +{ + static FilterMedian ax(30); + static FilterMedian ay(30); + *fx = ax.next(x); + *fy = ay.next(y); } -void filter_running_average(double x, double y, double *fx, double *fy) { - static FilterRunningAverage ax(0.03); - static FilterRunningAverage ay(0.03); - *fx = ax.next(x); - *fy = ay.next(y); +void filter_running_average(double x, double y, double* fx, double* fy) +{ + static FilterRunningAverage ax(0.03); + static FilterRunningAverage ay(0.03); + *fx = ax.next(x); + *fy = ay.next(y); } -void filter_des(double x, double y, double *fx, double *fy) { - static FilterDoubleExponentialSmoothing ax(0.03,0.01); - static FilterDoubleExponentialSmoothing ay(0.03,0.01); - *fx = ax.next(x); - *fy = ay.next(y); +void filter_des(double x, double y, double* fx, double* fy) +{ + static FilterDoubleExponentialSmoothing ax(0.03, 0.01); + static FilterDoubleExponentialSmoothing ay(0.03, 0.01); + *fx = ax.next(x); + *fy = ay.next(y); } -void filter_kalman(double x, double y, double *fx, double *fy) { - static bool init=true; - static KalmanSensor sensor(4,2); - static Kalman kalman(4); // x, y, dx, dy - if (init) { - init = false; - // H - cvZero(sensor.H); - cvmSet(sensor.H, 0, 0, 1); - cvmSet(sensor.H, 1, 1, 1); - // R - cvSetIdentity(sensor.R, cvScalar(10)); - // F - cvSetIdentity(kalman.F); - cvmSet(kalman.F, 0, 2, 1); - cvmSet(kalman.F, 1, 3, 1); - // Q - cvmSet(kalman.Q, 0, 0, 0.0001); - cvmSet(kalman.Q, 1, 1, 0.0001); - cvmSet(kalman.Q, 2, 2, 0.000001); - cvmSet(kalman.Q, 3, 3, 0.000001); - // P - cvSetIdentity(kalman.P, cvScalar(100)); - } - cvmSet(sensor.z, 0, 0, x); - cvmSet(sensor.z, 1, 0, y); - kalman.predict_update(&sensor, (unsigned long)(cv::getTickCount() / cv::getTickFrequency() * 1000)); - *fx = cvmGet(kalman.x, 0, 0); - *fy = cvmGet(kalman.x, 1, 0); +void filter_kalman(double x, double y, double* fx, double* fy) +{ + static bool init = true; + static KalmanSensor sensor(4, 2); + static Kalman kalman(4); // x, y, dx, dy + if (init) + { + init = false; + // H + cvZero(sensor.H); + cvmSet(sensor.H, 0, 0, 1); + cvmSet(sensor.H, 1, 1, 1); + // R + cvSetIdentity(sensor.R, cvScalar(10)); + // F + cvSetIdentity(kalman.F); + cvmSet(kalman.F, 0, 2, 1); + cvmSet(kalman.F, 1, 3, 1); + // Q + cvmSet(kalman.Q, 0, 0, 0.0001); + cvmSet(kalman.Q, 1, 1, 0.0001); + cvmSet(kalman.Q, 2, 2, 0.000001); + cvmSet(kalman.Q, 3, 3, 0.000001); + // P + cvSetIdentity(kalman.P, cvScalar(100)); + } + cvmSet(sensor.z, 0, 0, x); + cvmSet(sensor.z, 1, 0, y); + kalman.predict_update( + &sensor, + (unsigned long)(cv::getTickCount() / cv::getTickFrequency() * 1000)); + *fx = cvmGet(kalman.x, 0, 0); + *fy = cvmGet(kalman.x, 1, 0); } -void filter_array_average(double x, double y, double *fx, double *fy) { - static bool init=true; - static FilterArray fa(2); - if (init) { - init=false; - for (int i=0; i<2; i++) { - fa[i].setWindowSize(30); - } +void filter_array_average(double x, double y, double* fx, double* fy) +{ + static bool init = true; + static FilterArray fa(2); + if (init) + { + init = false; + for (int i = 0; i < 2; i++) + { + fa[i].setWindowSize(30); } - *fx = fa[0].next(x); - *fy = fa[1].next(y); + } + *fx = fa[0].next(x); + *fy = fa[1].next(y); } -class KalmanSensorOwn : public KalmanSensorEkf { - virtual void h(CvMat *x_pred, CvMat *_z_pred) { - double x = cvmGet(x_pred, 0, 0); - double y = cvmGet(x_pred, 1, 0); - double dx = cvmGet(x_pred, 2, 0); - double dy = cvmGet(x_pred, 3, 0); - cvmSet(_z_pred, 0, 0, x); - cvmSet(_z_pred, 1, 0, y); - } +class KalmanSensorOwn : public KalmanSensorEkf +{ + virtual void h(CvMat* x_pred, CvMat* _z_pred) + { + double x = cvmGet(x_pred, 0, 0); + double y = cvmGet(x_pred, 1, 0); + double dx = cvmGet(x_pred, 2, 0); + double dy = cvmGet(x_pred, 3, 0); + cvmSet(_z_pred, 0, 0, x); + cvmSet(_z_pred, 1, 0, y); + } + public: - KalmanSensorOwn(int _n, int _m) : KalmanSensorEkf(_n, _m) {} + KalmanSensorOwn(int _n, int _m) : KalmanSensorEkf(_n, _m) + { + } }; -class KalmanOwn : public KalmanEkf { - virtual void f(CvMat *_x, CvMat *_x_pred, double dt) { - double x = cvmGet(_x, 0, 0); - double y = cvmGet(_x, 1, 0); - double dx = cvmGet(_x, 2, 0); - double dy = cvmGet(_x, 3, 0); - cvmSet(_x_pred, 0, 0, x + dt*dx); - cvmSet(_x_pred, 1, 0, y + dt*dy); - cvmSet(_x_pred, 2, 0, dx); - cvmSet(_x_pred, 3, 0, dy); - } +class KalmanOwn : public KalmanEkf +{ + virtual void f(CvMat* _x, CvMat* _x_pred, double dt) + { + double x = cvmGet(_x, 0, 0); + double y = cvmGet(_x, 1, 0); + double dx = cvmGet(_x, 2, 0); + double dy = cvmGet(_x, 3, 0); + cvmSet(_x_pred, 0, 0, x + dt * dx); + cvmSet(_x_pred, 1, 0, y + dt * dy); + cvmSet(_x_pred, 2, 0, dx); + cvmSet(_x_pred, 3, 0, dy); + } + public: - KalmanOwn(int _n) : KalmanEkf(_n) {} + KalmanOwn(int _n) : KalmanEkf(_n) + { + } }; -void filter_ekf(double x, double y, double *fx, double *fy) { - static bool init=true; - static KalmanSensorOwn sensor(4,2); - static KalmanOwn kalman(4); // x, y, dx, dy - if (init) { - init = false; - // R - cvSetIdentity(sensor.R, cvScalar(100)); - // Q - cvmSet(kalman.Q, 0, 0, 0.001); - cvmSet(kalman.Q, 1, 1, 0.001); - cvmSet(kalman.Q, 2, 2, 0.01); - cvmSet(kalman.Q, 3, 3, 0.01); - // P - cvSetIdentity(kalman.P, cvScalar(100)); - } - cvmSet(sensor.z, 0, 0, x); - cvmSet(sensor.z, 1, 0, y); - kalman.predict_update(&sensor, (unsigned long)(cv::getTickCount() / cv::getTickFrequency() * 1000)); - *fx = cvmGet(kalman.x, 0, 0); - *fy = cvmGet(kalman.x, 1, 0); +void filter_ekf(double x, double y, double* fx, double* fy) +{ + static bool init = true; + static KalmanSensorOwn sensor(4, 2); + static KalmanOwn kalman(4); // x, y, dx, dy + if (init) + { + init = false; + // R + cvSetIdentity(sensor.R, cvScalar(100)); + // Q + cvmSet(kalman.Q, 0, 0, 0.001); + cvmSet(kalman.Q, 1, 1, 0.001); + cvmSet(kalman.Q, 2, 2, 0.01); + cvmSet(kalman.Q, 3, 3, 0.01); + // P + cvSetIdentity(kalman.P, cvScalar(100)); + } + cvmSet(sensor.z, 0, 0, x); + cvmSet(sensor.z, 1, 0, y); + kalman.predict_update( + &sensor, + (unsigned long)(cv::getTickCount() / cv::getTickFrequency() * 1000)); + *fx = cvmGet(kalman.x, 0, 0); + *fy = cvmGet(kalman.x, 1, 0); } -//Make list of filters +// Make list of filters const int nof_filters = 8; -void (*(filters[nof_filters]))(double x, double y, double *fx, double *fy) = { - filter_none, - filter_average, - filter_median, - filter_running_average, - filter_des, - filter_kalman, - filter_ekf, - filter_array_average, -}; -char filter_names[nof_filters][64]={ - "No filter - Press any key to change", - "Average", - "Median", - "Running Average", - "Double Exponential Smoothing", - "Kalman", - "Extended Kalman", - "Array (average)" +void (*(filters[nof_filters]))(double x, double y, double* fx, double* fy) = { + filter_none, filter_average, filter_median, filter_running_average, + filter_des, filter_kalman, filter_ekf, filter_array_average, }; +char filter_names[nof_filters][64] = { "No filter - Press any key to change", + "Average", + "Median", + "Running Average", + "Double Exponential Smoothing", + "Kalman", + "Extended Kalman", + "Array (average)" }; // Just generate some random data that can be used as sensor input -void get_measurement(double *x, double *y) { - static double xx=0; - static double yy=0; - static double dxx = 0.3; - static double dyy = 0.7; - xx += dxx; yy += dyy; - if ((xx > res) || (xx < 0)) dxx = -dxx; - if ((yy > res) || (yy < 0)) dyy = -dyy; - double rx = (rand()*20.0/RAND_MAX)-10.0; - double ry = (rand()*20.0/RAND_MAX)-10.0; - - // Add some outliers - if(fabs(rx*ry)>50) - { - rx *= 5; - ry *= 5; - } +void get_measurement(double* x, double* y) +{ + static double xx = 0; + static double yy = 0; + static double dxx = 0.3; + static double dyy = 0.7; + xx += dxx; + yy += dyy; + if ((xx > res) || (xx < 0)) + dxx = -dxx; + if ((yy > res) || (yy < 0)) + dyy = -dyy; + double rx = (rand() * 20.0 / RAND_MAX) - 10.0; + double ry = (rand() * 20.0 / RAND_MAX) - 10.0; + + // Add some outliers + if (fabs(rx * ry) > 50) + { + rx *= 5; + ry *= 5; + } - *x = xx + rx; *y = yy + ry; + *x = xx + rx; + *y = yy + ry; } -int main(int argc, char *argv[]) +int main(int argc, char* argv[]) { - try { - // Output usage message - std::string filename(argv[0]); - filename = filename.substr(filename.find_last_of('\\') + 1); - std::cout << "SampleFilter" << std::endl; - std::cout << "============" << std::endl; - std::cout << std::endl; - std::cout << "Description:" << std::endl; - std::cout << " This is an example of how to use the 'FilterAverage', 'FilterMedian'," << std::endl; - std::cout << " 'FilterRunningAverage', 'FilterDoubleExponentialSmoothing', 'Kalman'" << std::endl; - std::cout << " 'KalmanEkf' and 'FilterArray' filtering classes. First the example" << std::endl; - std::cout << " shows unfiltered test data with outliers. The data is then filtered" << std::endl; - std::cout << " using the various filters. Press any key to cycle through the filters." << std::endl; - std::cout << std::endl; - std::cout << "Usage:" << std::endl; - std::cout << " " << filename << std::endl; - std::cout << std::endl; - std::cout << "Keyboard Shortcuts:" << std::endl; - std::cout << " any key: cycle through filters" << std::endl; - std::cout << " q: quit" << std::endl; - std::cout << std::endl; - - // Processing loop - IplImage *img = cvCreateImage(cvSize(res, res), IPL_DEPTH_8U, 3); - cvNamedWindow("SampleFilter"); - CvFont font; - cvInitFont(&font, CV_FONT_HERSHEY_PLAIN, 1.0, 1.0); - for (int ii = 0; ii < nof_filters; ii++) { - int key = 0; - double x, y; - double fx, fy; - vector tail; - while (1) { - get_measurement(&x, &y); - filters[ii](x, y, &fx, &fy); - cvZero(img); - cvPutText(img, filter_names[ii], cvPoint(3, res - 10), &font, CV_RGB(255, 255, 255)); - cvCircle(img, cvPoint(int(x), int(y)), 2, CV_RGB(0, 255, 255)); - cvCircle(img, cvPoint(int(x), int(y)), 3, CV_RGB(255, 255, 255)); - CvPoint fp; - fp.x = int(fx); - fp.y = int(fy); - tail.push_back(fp); - for (size_t iii = 0; iii < tail.size(); iii++) { - cvCircle(img, tail[iii], 0, CV_RGB(255, 255, 0)); - } - cvCircle(img, fp, 2, CV_RGB(255, 0, 255)); - cvShowImage("SampleFilter", img); - key = cvWaitKey(10); - if (key != -1) { - break; - } - } - if (key == 'q') { - break; - } + try + { + // Output usage message + std::string filename(argv[0]); + filename = filename.substr(filename.find_last_of('\\') + 1); + std::cout << "SampleFilter" << std::endl; + std::cout << "============" << std::endl; + std::cout << std::endl; + std::cout << "Description:" << std::endl; + std::cout << " This is an example of how to use the 'FilterAverage', " + "'FilterMedian'," + << std::endl; + std::cout << " 'FilterRunningAverage', " + "'FilterDoubleExponentialSmoothing', 'Kalman'" + << std::endl; + std::cout << " 'KalmanEkf' and 'FilterArray' filtering classes. First the " + "example" + << std::endl; + std::cout << " shows unfiltered test data with outliers. The data is then " + "filtered" + << std::endl; + std::cout << " using the various filters. Press any key to cycle through " + "the filters." + << std::endl; + std::cout << std::endl; + std::cout << "Usage:" << std::endl; + std::cout << " " << filename << std::endl; + std::cout << std::endl; + std::cout << "Keyboard Shortcuts:" << std::endl; + std::cout << " any key: cycle through filters" << std::endl; + std::cout << " q: quit" << std::endl; + std::cout << std::endl; + + // Processing loop + IplImage* img = cvCreateImage(cvSize(res, res), IPL_DEPTH_8U, 3); + cvNamedWindow("SampleFilter"); + CvFont font; + cvInitFont(&font, CV_FONT_HERSHEY_PLAIN, 1.0, 1.0); + for (int ii = 0; ii < nof_filters; ii++) + { + int key = 0; + double x, y; + double fx, fy; + vector tail; + while (1) + { + get_measurement(&x, &y); + filters[ii](x, y, &fx, &fy); + cvZero(img); + cvPutText(img, filter_names[ii], cvPoint(3, res - 10), &font, + CV_RGB(255, 255, 255)); + cvCircle(img, cvPoint(int(x), int(y)), 2, CV_RGB(0, 255, 255)); + cvCircle(img, cvPoint(int(x), int(y)), 3, CV_RGB(255, 255, 255)); + CvPoint fp; + fp.x = int(fx); + fp.y = int(fy); + tail.push_back(fp); + for (size_t iii = 0; iii < tail.size(); iii++) + { + cvCircle(img, tail[iii], 0, CV_RGB(255, 255, 0)); } - cvReleaseImage(&img); - return 0; - } - catch (const std::exception &e) { - std::cout << "Exception: " << e.what() << endl; - } - catch (...) { - std::cout << "Exception: unknown" << std::endl; + cvCircle(img, fp, 2, CV_RGB(255, 0, 255)); + cvShowImage("SampleFilter", img); + key = cvWaitKey(10); + if (key != -1) + { + break; + } + } + if (key == 'q') + { + break; + } } + cvReleaseImage(&img); + return 0; + } + catch (const std::exception& e) + { + std::cout << "Exception: " << e.what() << endl; + } + catch (...) + { + std::cout << "Exception: unknown" << std::endl; + } } diff --git a/ar_track_alvar/src/SampleIntegralImage.cpp b/ar_track_alvar/src/SampleIntegralImage.cpp index 2a88625..ec8b2fd 100644 --- a/ar_track_alvar/src/SampleIntegralImage.cpp +++ b/ar_track_alvar/src/SampleIntegralImage.cpp @@ -4,207 +4,257 @@ using namespace alvar; using namespace std; -void videocallback(IplImage *image) +void videocallback(IplImage* image) { - static IplImage *img_grad = NULL; - static IplImage *img_gray = NULL; - static IplImage *img_ver = NULL; - static IplImage *img_hor = NULL; - static IplImage *img_canny = NULL; - static IntegralImage integ; - static IntegralGradient grad; - - assert(image); - if (img_gray == NULL) { - // Following image is toggled visible using key '0' - img_grad = CvTestbed::Instance().CreateImageWithProto("Gradient", image); - CvTestbed::Instance().ToggleImageVisible(0); - img_gray = CvTestbed::Instance().CreateImageWithProto("Grayscale", image, 0, 1); - img_ver = CvTestbed::Instance().CreateImage("Vertical", cvSize(1,image->height), IPL_DEPTH_8U, 1); - img_hor = CvTestbed::Instance().CreateImage("Horizontal", cvSize(image->width,1), IPL_DEPTH_8U, 1); - img_canny = CvTestbed::Instance().CreateImageWithProto("Canny", image, 0, 1); - img_canny->origin = img_ver->origin = img_hor->origin = image->origin; - } - if (image->nChannels > 1) { - cvCvtColor(image, img_gray, CV_RGB2GRAY); - } else { - cvCopy(image, img_gray); + static IplImage* img_grad = NULL; + static IplImage* img_gray = NULL; + static IplImage* img_ver = NULL; + static IplImage* img_hor = NULL; + static IplImage* img_canny = NULL; + static IntegralImage integ; + static IntegralGradient grad; + + assert(image); + if (img_gray == NULL) + { + // Following image is toggled visible using key '0' + img_grad = CvTestbed::Instance().CreateImageWithProto("Gradient", image); + CvTestbed::Instance().ToggleImageVisible(0); + img_gray = + CvTestbed::Instance().CreateImageWithProto("Grayscale", image, 0, 1); + img_ver = CvTestbed::Instance().CreateImage( + "Vertical", cvSize(1, image->height), IPL_DEPTH_8U, 1); + img_hor = CvTestbed::Instance().CreateImage( + "Horizontal", cvSize(image->width, 1), IPL_DEPTH_8U, 1); + img_canny = + CvTestbed::Instance().CreateImageWithProto("Canny", image, 0, 1); + img_canny->origin = img_ver->origin = img_hor->origin = image->origin; + } + if (image->nChannels > 1) + { + cvCvtColor(image, img_gray, CV_RGB2GRAY); + } + else + { + cvCopy(image, img_gray); + } + + // Show PerformanceTimer + // PerformanceTimer timer; + // timer.Start(); + + // Update the integral images + integ.Update(img_gray); + grad.Update(img_gray); + + // Whole image projections + integ.GetSubimage(cvRect(0, 0, image->width, image->height), img_ver); + integ.GetSubimage(cvRect(0, 0, image->width, image->height), img_hor); + for (int y = 1; y < image->height; y++) + { + cvLine(image, cvPoint(int(cvGet2D(img_ver, y - 1, 0).val[0]), y - 1), + cvPoint(int(cvGet2D(img_ver, y, 0).val[0]), y), CV_RGB(255, 0, 0)); + } + for (int x = 1; x < image->width; x++) + { + cvLine(image, cvPoint(x - 1, int(cvGet2D(img_hor, 0, x - 1).val[0])), + cvPoint(x, int(cvGet2D(img_hor, 0, x).val[0])), CV_RGB(0, 255, 0)); + } + + // Gradients + // Mark gradients for 4x4 sub-blocks + /* + cvZero(img_grad); + CvRect r = {0,0,4,4}; + for (int y=0; yheight/4; y++) { + r.y = y*4; + for (int x=0; xwidth/4; x++) { + r.x = x*4; + double dirx, diry; + grad.GetAveGradient(r, &dirx, &diry); + cvLine(img_grad, cvPoint(r.x+2,r.y+2), + cvPoint(r.x+2+int(dirx),r.y+2+int(diry)), CV_RGB(255,0,0)); + } + } + */ + + // Gradients on canny + cvZero(img_grad); + static int t1 = 64, t2 = 192; + cvCreateTrackbar("t1", "Gradient", &t1, 255, NULL); + cvCreateTrackbar("t2", "Gradient", &t2, 255, NULL); + cvCanny(img_gray, img_canny, t1, t2); + CvRect r = { 0, 0, 4, 4 }; + for (r.y = 0; r.y < img_canny->height - 4; r.y++) + { + for (r.x = 0; r.x < img_canny->width - 4; r.x++) + { + if (img_canny->imageData[r.y * img_canny->widthStep + r.x]) + { + double dirx, diry; + grad.GetAveGradient(r, &dirx, &diry); + cvLine(img_grad, cvPoint(r.x + 2, r.y + 2), + cvPoint(r.x + 2 + int(dirx), r.y + 2 + int(diry)), + CV_RGB(0, 0, 255)); + cvLine(img_grad, cvPoint(r.x + 2, r.y + 2), + cvPoint(r.x + 2 + int(-diry), r.y + 2 + int(+dirx)), + CV_RGB(255, 0, 0)); + cvLine(img_grad, cvPoint(r.x + 2, r.y + 2), + cvPoint(r.x + 2 + int(+diry), r.y + 2 + int(-dirx)), + CV_RGB(255, 0, 0)); + } } + } + + // Show PerformanceTimer + // cout<<"Processing: "<<1.0 / timer.Stop()<<" fps"<width,image->height), img_ver); - integ.GetSubimage(cvRect(0,0,image->width,image->height), img_hor); - for (int y=1; yheight; y++) { - cvLine(image, - cvPoint(int(cvGet2D(img_ver, y-1, 0).val[0]), y-1), - cvPoint(int(cvGet2D(img_ver, y, 0).val[0]), y), - CV_RGB(255,0,0)); + // Enumerate possible capture plugins + CaptureFactory::CapturePluginVector plugins = + CaptureFactory::instance()->enumeratePlugins(); + if (plugins.size() < 1) + { + std::cout << "Could not find any capture plugins." << std::endl; + return 0; } - for (int x=1; xwidth; x++) { - cvLine(image, - cvPoint(x-1, int(cvGet2D(img_hor, 0, x-1).val[0])), - cvPoint(x, int(cvGet2D(img_hor, 0, x).val[0])), - CV_RGB(0,255,0)); + + // Display capture plugins + std::cout << "Available Plugins: "; + outputEnumeratedPlugins(plugins); + std::cout << std::endl; + + // Enumerate possible capture devices + CaptureFactory::CaptureDeviceVector devices = + CaptureFactory::instance()->enumerateDevices(); + if (devices.size() < 1) + { + std::cout << "Could not find any capture devices." << std::endl; + return 0; } - // Gradients - // Mark gradients for 4x4 sub-blocks - /* - cvZero(img_grad); - CvRect r = {0,0,4,4}; - for (int y=0; yheight/4; y++) { - r.y = y*4; - for (int x=0; xwidth/4; x++) { - r.x = x*4; - double dirx, diry; - grad.GetAveGradient(r, &dirx, &diry); - cvLine(img_grad, cvPoint(r.x+2,r.y+2), cvPoint(r.x+2+int(dirx),r.y+2+int(diry)), CV_RGB(255,0,0)); - } + // Check command line argument for which device to use + int selectedDevice = defaultDevice(devices); + if (argc > 1) + { + selectedDevice = atoi(argv[1]); } - */ - - // Gradients on canny - cvZero(img_grad); - static int t1=64, t2=192; - cvCreateTrackbar("t1", "Gradient", &t1, 255, NULL); - cvCreateTrackbar("t2", "Gradient", &t2, 255, NULL); - cvCanny(img_gray, img_canny, t1, t2); - CvRect r = {0,0,4,4}; - for (r.y=0; r.yheight-4; r.y++) { - for (r.x=0; r.xwidth-4; r.x++) { - if (img_canny->imageData[r.y*img_canny->widthStep+r.x]) { - double dirx, diry; - grad.GetAveGradient(r, &dirx, &diry); - cvLine(img_grad, cvPoint(r.x+2,r.y+2), cvPoint(r.x+2+int(dirx),r.y+2+int(diry)), CV_RGB(0,0,255)); - cvLine(img_grad, cvPoint(r.x+2,r.y+2), cvPoint(r.x+2+int(-diry),r.y+2+int(+dirx)), CV_RGB(255,0,0)); - cvLine(img_grad, cvPoint(r.x+2,r.y+2), cvPoint(r.x+2+int(+diry),r.y+2+int(-dirx)), CV_RGB(255,0,0)); - } - } + if (selectedDevice >= (int)devices.size()) + { + selectedDevice = defaultDevice(devices); } - // Show PerformanceTimer - //cout<<"Processing: "<<1.0 / timer.Stop()<<" fps"<enumeratePlugins(); - if (plugins.size() < 1) { - std::cout << "Could not find any capture plugins." << std::endl; - return 0; - } - - // Display capture plugins - std::cout << "Available Plugins: "; - outputEnumeratedPlugins(plugins); - std::cout << std::endl; - - // Enumerate possible capture devices - CaptureFactory::CaptureDeviceVector devices = CaptureFactory::instance()->enumerateDevices(); - if (devices.size() < 1) { - std::cout << "Could not find any capture devices." << std::endl; - return 0; - } - - // Check command line argument for which device to use - int selectedDevice = defaultDevice(devices); - if (argc > 1) { - selectedDevice = atoi(argv[1]); - } - if (selectedDevice >= (int)devices.size()) { - selectedDevice = defaultDevice(devices); - } - - // Display capture devices - std::cout << "Enumerated Capture Devices:" << std::endl; - outputEnumeratedDevices(devices, selectedDevice); - std::cout << std::endl; - - // Create capture object from camera - Capture *cap = CaptureFactory::instance()->createCapture(devices[selectedDevice]); - std::string uniqueName = devices[selectedDevice].uniqueName(); - - // Handle capture lifecycle and start video capture - // Note that loadSettings/saveSettings are not supported by all plugins - if (cap) { - std::stringstream settingsFilename; - settingsFilename << "camera_settings_" << uniqueName << ".xml"; - - cap->start(); - cap->setResolution(640, 480); - - if (cap->loadSettings(settingsFilename.str())) { - std::cout << "Loading settings: " << settingsFilename.str() << std::endl; - } - - std::stringstream title; - title << "SampleIntegralImage (" << cap->captureDevice().captureType() << ")"; - - CvTestbed::Instance().StartVideo(cap, title.str().c_str()); - - if (cap->saveSettings(settingsFilename.str())) { - std::cout << "Saving settings: " << settingsFilename.str() << std::endl; - } - - cap->stop(); - delete cap; - } - else if (CvTestbed::Instance().StartVideo(0, argv[0])) { - } - else { - std::cout << "Could not initialize the selected capture backend." << std::endl; - } - - return 0; + // Create capture object from camera + Capture* cap = + CaptureFactory::instance()->createCapture(devices[selectedDevice]); + std::string uniqueName = devices[selectedDevice].uniqueName(); + + // Handle capture lifecycle and start video capture + // Note that loadSettings/saveSettings are not supported by all plugins + if (cap) + { + std::stringstream settingsFilename; + settingsFilename << "camera_settings_" << uniqueName << ".xml"; + + cap->start(); + cap->setResolution(640, 480); + + if (cap->loadSettings(settingsFilename.str())) + { + std::cout << "Loading settings: " << settingsFilename.str() + << std::endl; + } + + std::stringstream title; + title << "SampleIntegralImage (" << cap->captureDevice().captureType() + << ")"; + + CvTestbed::Instance().StartVideo(cap, title.str().c_str()); + + if (cap->saveSettings(settingsFilename.str())) + { + std::cout << "Saving settings: " << settingsFilename.str() << std::endl; + } + + cap->stop(); + delete cap; } - catch (const std::exception &e) { - std::cout << "Exception: " << e.what() << endl; + else if (CvTestbed::Instance().StartVideo(0, argv[0])) + { } - catch (...) { - std::cout << "Exception: unknown" << std::endl; + else + { + std::cout << "Could not initialize the selected capture backend." + << std::endl; } + + return 0; + } + catch (const std::exception& e) + { + std::cout << "Exception: " << e.what() << endl; + } + catch (...) + { + std::cout << "Exception: unknown" << std::endl; + } } diff --git a/ar_track_alvar/src/SampleLabeling.cpp b/ar_track_alvar/src/SampleLabeling.cpp index 9af6eba..3f1e0ad 100644 --- a/ar_track_alvar/src/SampleLabeling.cpp +++ b/ar_track_alvar/src/SampleLabeling.cpp @@ -6,168 +6,200 @@ using namespace std; int thresh_param1 = 31; -void videocallback(IplImage *image) +void videocallback(IplImage* image) { - bool flip_image = (image->origin?true:false); - if (flip_image) { - cvFlip(image); - image->origin = !image->origin; + bool flip_image = (image->origin ? true : false); + if (flip_image) + { + cvFlip(image); + image->origin = !image->origin; + } + + static LabelingCvSeq* labeling = 0; + if (!labeling) + { + labeling = new LabelingCvSeq(); + } + + labeling->SetThreshParams(thresh_param1, 5); + + const int min_edge_size = 10; + CvSeq* edges = labeling->LabelImage(image, min_edge_size); + + int n_edges = edges->total; + for (int i = 0; i < n_edges; ++i) + { + CvSeq* pixels = (CvSeq*)cvGetSeqElem(edges, i); + int n_pixels = pixels->total; + for (int j = 0; j < n_pixels; ++j) + { + CvPoint* pt = (CvPoint*)cvGetSeqElem(pixels, j); + cvLine(image, *pt, *pt, CV_RGB(255, 0, 0)); } + } - static LabelingCvSeq* labeling = 0; - if(!labeling) { - labeling = new LabelingCvSeq(); - } + // Visualize now also the square corners. + labeling->LabelSquares(image, true); - labeling->SetThreshParams(thresh_param1, 5); + if (flip_image) + { + cvFlip(image); + image->origin = !image->origin; + } +} - const int min_edge_size = 10; - CvSeq* edges = labeling->LabelImage(image, min_edge_size); +int keycallback(int key) +{ + if (key == '+') + { + thresh_param1 += 2; + std::cout << "Adaptive threshold block size: " << thresh_param1 + << std::endl; + } + else if (key == '-') + { + thresh_param1 -= 2; + if (thresh_param1 < 3) + thresh_param1 = 3; + std::cout << "Adaptive threshold block size: " << thresh_param1 + << std::endl; + } + + else + return key; + + return 0; +} - int n_edges = edges->total; - for(int i = 0; i < n_edges; ++i) +int main(int argc, char* argv[]) +{ + try + { + // Output usage message + std::string filename(argv[0]); + filename = filename.substr(filename.find_last_of('\\') + 1); + std::cout << "SampleLabeling" << std::endl; + std::cout << "==============" << std::endl; + std::cout << std::endl; + std::cout << "Description:" << std::endl; + std::cout << " This is an example of how to use the 'LabelingCvSeq' class " + "to perform" + << std::endl; + std::cout << " labeling. Blobs are detected in the image and if the blobs " + "have four" + << std::endl; + std::cout << " corners, the edges between the corners are visualized." + << std::endl; + std::cout << std::endl; + std::cout << "Usage:" << std::endl; + std::cout << " " << filename << " [device]" << std::endl; + std::cout << std::endl; + std::cout << " device integer selecting device from enumeration list " + "(default 0)" + << std::endl; + std::cout << " highgui capture devices are prefered" + << std::endl; + std::cout << std::endl; + std::cout << "Keyboard Shortcuts:" << std::endl; + std::cout << " +: Increase adaptive threshold block size." << std::endl; + std::cout << " -: Decrease adaptive threshold block size." << std::endl; + std::cout << " q: quit" << std::endl; + std::cout << std::endl; + + // Initialise CvTestbed + CvTestbed::Instance().SetVideoCallback(videocallback); + CvTestbed::Instance().SetKeyCallback(keycallback); + + // Enumerate possible capture plugins + CaptureFactory::CapturePluginVector plugins = + CaptureFactory::instance()->enumeratePlugins(); + if (plugins.size() < 1) { - CvSeq* pixels = (CvSeq*)cvGetSeqElem(edges, i); - int n_pixels = pixels->total; - for(int j = 0; j < n_pixels; ++j) - { - CvPoint* pt = (CvPoint*)cvGetSeqElem(pixels, j); - cvLine(image, *pt, *pt, CV_RGB(255,0,0)); - } + std::cout << "Could not find any capture plugins." << std::endl; + return 0; } - // Visualize now also the square corners. - labeling->LabelSquares(image, true); + // Display capture plugins + std::cout << "Available Plugins: "; + outputEnumeratedPlugins(plugins); + std::cout << std::endl; - if (flip_image) { - cvFlip(image); - image->origin = !image->origin; + // Enumerate possible capture devices + CaptureFactory::CaptureDeviceVector devices = + CaptureFactory::instance()->enumerateDevices(); + if (devices.size() < 1) + { + std::cout << "Could not find any capture devices." << std::endl; + return 0; } -} -int keycallback(int key) -{ - if(key == '+') + // Check command line argument for which device to use + int selectedDevice = defaultDevice(devices); + if (argc > 1) { - thresh_param1+=2; - std::cout<<"Adaptive threshold block size: "<= (int)devices.size()) { - thresh_param1-=2; - if(thresh_param1<3) thresh_param1 = 3; - std::cout<<"Adaptive threshold block size: "<createCapture(devices[selectedDevice]); + std::string uniqueName = devices[selectedDevice].uniqueName(); -int main(int argc, char *argv[]) -{ - try { - // Output usage message - std::string filename(argv[0]); - filename = filename.substr(filename.find_last_of('\\') + 1); - std::cout << "SampleLabeling" << std::endl; - std::cout << "==============" << std::endl; - std::cout << std::endl; - std::cout << "Description:" << std::endl; - std::cout << " This is an example of how to use the 'LabelingCvSeq' class to perform" << std::endl; - std::cout << " labeling. Blobs are detected in the image and if the blobs have four" << std::endl; - std::cout << " corners, the edges between the corners are visualized." << std::endl; - std::cout << std::endl; - std::cout << "Usage:" << std::endl; - std::cout << " " << filename << " [device]" << std::endl; - std::cout << std::endl; - std::cout << " device integer selecting device from enumeration list (default 0)" << std::endl; - std::cout << " highgui capture devices are prefered" << std::endl; - std::cout << std::endl; - std::cout << "Keyboard Shortcuts:" << std::endl; - std::cout << " +: Increase adaptive threshold block size." << std::endl; - std::cout << " -: Decrease adaptive threshold block size." << std::endl; - std::cout << " q: quit" << std::endl; - std::cout << std::endl; - - // Initialise CvTestbed - CvTestbed::Instance().SetVideoCallback(videocallback); - CvTestbed::Instance().SetKeyCallback(keycallback); - - // Enumerate possible capture plugins - CaptureFactory::CapturePluginVector plugins = CaptureFactory::instance()->enumeratePlugins(); - if (plugins.size() < 1) { - std::cout << "Could not find any capture plugins." << std::endl; - return 0; - } - - // Display capture plugins - std::cout << "Available Plugins: "; - outputEnumeratedPlugins(plugins); - std::cout << std::endl; - - // Enumerate possible capture devices - CaptureFactory::CaptureDeviceVector devices = CaptureFactory::instance()->enumerateDevices(); - if (devices.size() < 1) { - std::cout << "Could not find any capture devices." << std::endl; - return 0; - } - - // Check command line argument for which device to use - int selectedDevice = defaultDevice(devices); - if (argc > 1) { - selectedDevice = atoi(argv[1]); - } - if (selectedDevice >= (int)devices.size()) { - selectedDevice = defaultDevice(devices); - } - - // Display capture devices - std::cout << "Enumerated Capture Devices:" << std::endl; - outputEnumeratedDevices(devices, selectedDevice); - std::cout << std::endl; - - // Create capture object from camera - Capture *cap = CaptureFactory::instance()->createCapture(devices[selectedDevice]); - std::string uniqueName = devices[selectedDevice].uniqueName(); - - // Handle capture lifecycle and start video capture - // Note that loadSettings/saveSettings are not supported by all plugins - if (cap) { - std::stringstream settingsFilename; - settingsFilename << "camera_settings_" << uniqueName << ".xml"; - - cap->start(); - cap->setResolution(640, 480); - - if (cap->loadSettings(settingsFilename.str())) { - std::cout << "Loading settings: " << settingsFilename.str() << std::endl; - } - - std::stringstream title; - title << "SampleLabeling (" << cap->captureDevice().captureType() << ")"; - - CvTestbed::Instance().StartVideo(cap, title.str().c_str()); - - if (cap->saveSettings(settingsFilename.str())) { - std::cout << "Saving settings: " << settingsFilename.str() << std::endl; - } - - cap->stop(); - delete cap; - } - else if (CvTestbed::Instance().StartVideo(0, argv[0])) { - } - else { - std::cout << "Could not initialize the selected capture backend." << std::endl; - } - - return 0; + // Handle capture lifecycle and start video capture + // Note that loadSettings/saveSettings are not supported by all plugins + if (cap) + { + std::stringstream settingsFilename; + settingsFilename << "camera_settings_" << uniqueName << ".xml"; + + cap->start(); + cap->setResolution(640, 480); + + if (cap->loadSettings(settingsFilename.str())) + { + std::cout << "Loading settings: " << settingsFilename.str() + << std::endl; + } + + std::stringstream title; + title << "SampleLabeling (" << cap->captureDevice().captureType() << ")"; + + CvTestbed::Instance().StartVideo(cap, title.str().c_str()); + + if (cap->saveSettings(settingsFilename.str())) + { + std::cout << "Saving settings: " << settingsFilename.str() << std::endl; + } + + cap->stop(); + delete cap; } - catch (const std::exception &e) { - std::cout << "Exception: " << e.what() << endl; + else if (CvTestbed::Instance().StartVideo(0, argv[0])) + { } - catch (...) { - std::cout << "Exception: unknown" << std::endl; + else + { + std::cout << "Could not initialize the selected capture backend." + << std::endl; } + + return 0; + } + catch (const std::exception& e) + { + std::cout << "Exception: " << e.what() << endl; + } + catch (...) + { + std::cout << "Exception: unknown" << std::endl; + } } diff --git a/ar_track_alvar/src/SampleMarkerCreator.cpp b/ar_track_alvar/src/SampleMarkerCreator.cpp index c0a8f04..7d99aac 100644 --- a/ar_track_alvar/src/SampleMarkerCreator.cpp +++ b/ar_track_alvar/src/SampleMarkerCreator.cpp @@ -1,298 +1,419 @@ #include "ar_track_alvar/MultiMarker.h" -#include "highgui.h" +#include using namespace std; using namespace alvar; -struct State { - IplImage *img; - stringstream filename; - double minx, miny, maxx, maxy; // top-left and bottom-right in pixel units - MultiMarker multi_marker; +struct State +{ + cv::Mat img; + stringstream filename; + double minx, miny, maxx, maxy; // top-left and bottom-right in pixel units + MultiMarker multi_marker; - // General options - bool prompt; - double units; // how many pixels per one unit - double marker_side_len; // marker side len in current units - int marker_type; // 0:MarkerData, 1:ArToolkit - double posx, posy; // The position of marker center in the given units - int content_res; - double margin_res; - bool array; + // General options + bool prompt; + double units; // how many pixels per one unit + double marker_side_len; // marker side len in current units + int marker_type; // 0:MarkerData, 1:ArToolkit + double posx, posy; // The position of marker center in the given units + int content_res; + double margin_res; + bool array; - // MarkerData specific options - MarkerData::MarkerContentType marker_data_content_type; - bool marker_data_force_strong_hamming; + // MarkerData specific options + MarkerData::MarkerContentType marker_data_content_type; + bool marker_data_force_strong_hamming; - State() - : img(0), - prompt(false), - units(96.0/2.54), // cm assuming 96 dpi - marker_side_len(9.0), // 9 cm - marker_type(0), - posx(0), posy(0), - content_res(0), // 0 uses default - margin_res(0.0), // 0.0 uses default (can be n*0.5) - marker_data_content_type(MarkerData::MARKER_CONTENT_TYPE_NUMBER), - marker_data_force_strong_hamming(false) - {} - ~State() { - if (img) cvReleaseImage(&img); - } - void AddMarker(const char *id) { - std::cout<<"ADDING MARKER "< new_maxx) new_maxx = maxx; - if (maxy > new_maxy) new_maxy = maxy; - IplImage *new_img = cvCreateImage(cvSize(int(new_maxx-new_minx+0.5), int(new_maxy-new_miny+0.5)), IPL_DEPTH_8U, 1); - cvSet(new_img, cvScalar(255)); - CvRect roi = cvRect(int(minx-new_minx+0.5), int(miny-new_miny+0.5), img->width, img->height); - cvSetImageROI(new_img, roi); - cvCopy(img, new_img); - cvReleaseImage(&img); - img = new_img; - roi.x = int((posx*units) - (marker_side_len*units/2.0) - new_minx + 0.5); - roi.y = int((posy*units) - (marker_side_len*units/2.0) - new_miny + 0.5); - roi.width = int(marker_side_len*units+0.5); roi.height = int(marker_side_len*units+0.5); - cvSetImageROI(img, roi); - minx = new_minx; miny = new_miny; - maxx = new_maxx; maxy = new_maxy; - } - if (marker_data_content_type == MarkerData::MARKER_CONTENT_TYPE_NUMBER) { - int idi = atoi(id); - md.SetContent(marker_data_content_type, idi, 0); - if (filename.str().length()<64) filename<<"_"< new_maxx) + new_maxx = maxx; + if (maxy > new_maxy) + new_maxy = maxy; + cv::Mat new_img = cv::Mat(cv::Size(int(new_maxx - new_minx + 0.5), + int(new_maxy - new_miny + 0.5)), + CV_8UC1, cv::Scalar(255)); + img.copyTo( + new_img(cv::Rect(int(minx - new_minx + 0.5), + int(miny - new_miny + 0.5), img.cols, img.rows))); + img.release(); + img = new_img; + int roi_x = int((posx * units) - (marker_side_len * units / 2.0) - + new_minx + 0.5); + int roi_y = int((posy * units) - (marker_side_len * units / 2.0) - + new_miny + 0.5); + int roi_t = -roi_y; + int roi_b = -img.rows + int(marker_side_len * units + 0.5) + roi_y; + int roi_l = -roi_x; + int roi_r = -img.cols + int(marker_side_len * units + 0.5) + roi_x; + img.adjustROI(roi_t, roi_b, roi_l, roi_r); + minx = new_minx; + miny = new_miny; + maxx = new_maxx; + maxy = new_maxy; + } + if (marker_data_content_type == MarkerData::MARKER_CONTENT_TYPE_NUMBER) + { + int idi = atoi(id); + md.SetContent(marker_data_content_type, idi, 0); + if (filename.str().length() < 64) + filename << "_" << idi; - Pose pose; - pose.Reset(); - pose.SetTranslation(posx, -posy, 0); - multi_marker.PointCloudAdd(idi, marker_side_len, pose); - } else { - md.SetContent(marker_data_content_type, 0, id); - const char *p = id; - int counter=0; - filename<<"_"; - while(*p) { - if (!isalnum(*p)) filename<<"_"; - else filename<<(char)tolower(*p); - p++; counter++; - if (counter > 8) break; - } - } - md.ScaleMarkerToImage(img); - cvResetImageROI(img); - } - else if (marker_type == 1) { - // Create and save MarkerArtoolkit marker (Note, that this doesn't support multi markers) - MarkerArtoolkit md(marker_side_len, content_res, margin_res); - int side_len = int(marker_side_len*units+0.5); - if (img != 0) cvReleaseImage(&img); - img = cvCreateImage(cvSize(side_len, side_len), IPL_DEPTH_8U, 1); - filename.str(""); - filename<<"MarkerArtoolkit"; - md.SetContent(atoi(id)); - filename<<"_"< 8) + break; } + } + md.ScaleMarkerToImage(img); + // reset ROI + cv::Size roi_size; + cv::Point roi_offset; + img.locateROI(roi_size, roi_offset); + img.adjustROI(roi_offset.y, roi_size.height - img.rows, roi_offset.x, + roi_size.width - img.cols); } - void Save() { - if (img) { - std::stringstream filenamexml; - filenamexml< 1) { - std::cout<<"Saving: "< 1) + { + std::cout << "Saving: " << filenamexml.str() << std::endl; + multi_marker.Save(filenamexml.str().c_str(), alvar::FILE_FORMAT_XML); + } } + } } st; -int main(int argc, char *argv[]) +int main(int argc, char* argv[]) { - try { - if (argc < 2) st.prompt = true; - for (int i=1; i" << std::endl; - std::cout << " -s 5.0 use marker size 5.0x5.0 units (default 9.0x9.0)" << std::endl; - std::cout << " -r 5 marker content resolution -- 0 uses default" << std::endl; - std::cout << " -m 2.0 marker margin resolution -- 0 uses default" << std::endl; - std::cout << " -a use ArToolkit style matrix markers" << std::endl; - std::cout << " -p prompt marker placements interactively from the user" << std::endl; - std::cout << std::endl; + // Output usage message + if (st.prompt) + { + std::string filename(argv[0]); + filename = filename.substr(filename.find_last_of('\\') + 1); + std::cout << "SampleMarkerCreator" << std::endl; + std::cout << "===================" << std::endl; + std::cout << std::endl; + std::cout << "Description:" << std::endl; + std::cout << " This is an example of how to use the 'MarkerData' and " + "'MarkerArtoolkit'" + << std::endl; + std::cout << " classes to generate marker images. This application can " + "be used to" + << std::endl; + std::cout << " generate markers and multimarker setups that can be used " + "with" + << std::endl; + std::cout << " SampleMarkerDetector and SampleMultiMarker." << std::endl; + std::cout << std::endl; + std::cout << "Usage:" << std::endl; + std::cout << " " << filename << " [options] argument" << std::endl; + std::cout << std::endl; + std::cout << " 65535 marker with number 65535" + << std::endl; + std::cout << " -f 65535 force hamming(8,4) encoding" + << std::endl; + std::cout << " -1 \"hello world\" marker with string" << std::endl; + std::cout << " -2 catalog.xml marker with file reference" + << std::endl; + std::cout << " -3 www.vtt.fi marker with URL" << std::endl; + std::cout << " -u 96 use units corresponding to 1.0 unit " + "per 96 pixels" + << std::endl; + std::cout << " -uin use inches as units (assuming 96 dpi)" + << std::endl; + std::cout << " -ucm use cm's as units (assuming 96 dpi) " + "" + << std::endl; + std::cout << " -s 5.0 use marker size 5.0x5.0 units " + "(default 9.0x9.0)" + << std::endl; + std::cout << " -r 5 marker content resolution -- 0 uses " + "default" + << std::endl; + std::cout << " -m 2.0 marker margin resolution -- 0 uses " + "default" + << std::endl; + std::cout << " -a use ArToolkit style matrix markers" + << std::endl; + std::cout << " -p prompt marker placements " + "interactively from the user" + << std::endl; + std::cout << std::endl; - // Interactive stuff here - if (st.prompt) { - st.marker_type = 0; - st.marker_data_content_type = MarkerData::MARKER_CONTENT_TYPE_NUMBER; - std::cout<<"\nPrompt marker placements interactively"< 0) marker_id=atoi(s.c_str()); - if (marker_id < 0) break; - std::cout<<" x position (in current units) ["< 0) posx=atof(s.c_str()); - std::cout<<" y position (in current units) ["< 0) posy=atof(s.c_str()); - st.posx=posx; st.posy=posy; - std::stringstream ss; - ss< 0) + marker_id = atoi(s.c_str()); + if (marker_id < 0) + break; + std::cout << " x position (in current units) [" << posx << "]: "; + std::flush(std::cout); + std::getline(std::cin, s); + if (s.length() > 0) + posx = atof(s.c_str()); + std::cout << " y position (in current units) [" << posy << "]: "; + std::flush(std::cout); + std::getline(std::cin, s); + if (s.length() > 0) + posy = atof(s.c_str()); + st.posx = posx; + st.posy = posy; + std::stringstream ss; + ss << marker_id; + st.AddMarker(ss.str().c_str()); - // Guess suitable marker_id and placement for the next marker - marker_id++; - if (posx <= 0) { - posx = int(sqrt(double(marker_id)))*st.marker_side_len*2; - posy = 0; - vert = true; - } else if (vert) { - posy += (st.marker_side_len*2); - if (posy >= posx) vert = false; - } else { - posx -= (st.marker_side_len*2); - } - } - } + // Guess suitable marker_id and placement for the next marker + marker_id++; + if (posx <= 0) + { + posx = int(sqrt(double(marker_id))) * st.marker_side_len * 2; + posy = 0; + vert = true; + } + else if (vert) + { + posy += (st.marker_side_len * 2); + if (posy >= posx) + vert = false; + } + else + { + posx -= (st.marker_side_len * 2); + } } - else if(st.array) { - st.marker_type = 0; - st.marker_data_content_type = MarkerData::MARKER_CONTENT_TYPE_NUMBER; - int rows = 0; - int cols = 0; - double rows_distance = 0; - double cols_distance = 0; - bool column_major = 1; - int first_id = 0; - int marker_id = 0; - std::string s; - std::cout<<"\nPrompt marker placements interactively"< 0) rows=atoi(s.c_str()); - std::cout<<"Array cols : "; std::flush(std::cout); - std::getline(std::cin, s); if (s.length() > 0) cols=atoi(s.c_str()); - std::cout<<"Rows distance : "; std::flush(std::cout); - std::getline(std::cin, s); if (s.length() > 0) rows_distance=atof(s.c_str()); - std::cout<<"Cols distance : "; std::flush(std::cout); - std::getline(std::cin, s); if (s.length() > 0) cols_distance=atof(s.c_str()); - std::cout<<"Column major (1 or 0) : "; std::flush(std::cout); - std::getline(std::cin, s); if (s.length() > 0) column_major=atoi(s.c_str()); - std::cout<<"First marker ID : "; std::flush(std::cout); - std::getline(std::cin, s); if (s.length() > 0) first_id=atoi(s.c_str()); - if(!column_major) { - for(int row = 0; row 0) + rows = atoi(s.c_str()); + std::cout << "Array cols : "; + std::flush(std::cout); + std::getline(std::cin, s); + if (s.length() > 0) + cols = atoi(s.c_str()); + std::cout << "Rows distance : "; + std::flush(std::cout); + std::getline(std::cin, s); + if (s.length() > 0) + rows_distance = atof(s.c_str()); + std::cout << "Cols distance : "; + std::flush(std::cout); + std::getline(std::cin, s); + if (s.length() > 0) + cols_distance = atof(s.c_str()); + std::cout << "Column major (1 or 0) : "; + std::flush(std::cout); + std::getline(std::cin, s); + if (s.length() > 0) + column_major = atoi(s.c_str()); + std::cout << "First marker ID : "; + std::flush(std::cout); + std::getline(std::cin, s); + if (s.length() > 0) + first_id = atoi(s.c_str()); + if (!column_major) + { + for (int row = 0; row < rows; row++) + for (int col = 0; col < cols; col++) + { + st.posy = row * rows_distance; + st.posx = col * cols_distance; + marker_id = first_id + row * cols + col; + std::stringstream ss; + ss << marker_id; + st.AddMarker(ss.str().c_str()); + } + } + else + { + for (int col = 0; col < cols; col++) + for (int row = 0; row < rows; row++) + { + st.posy = row * rows_distance; + st.posx = col * cols_distance; + marker_id = first_id + col * rows + row; + std::stringstream ss; + ss << marker_id; + st.AddMarker(ss.str().c_str()); + } + } } + st.Save(); + return 0; + } + catch (const std::exception& e) + { + std::cout << "Exception: " << e.what() << endl; + } + catch (...) + { + std::cout << "Exception: unknown" << std::endl; + } } diff --git a/ar_track_alvar/src/SampleMarkerDetector.cpp b/ar_track_alvar/src/SampleMarkerDetector.cpp index 64297aa..961925e 100644 --- a/ar_track_alvar/src/SampleMarkerDetector.cpp +++ b/ar_track_alvar/src/SampleMarkerDetector.cpp @@ -5,195 +5,255 @@ using namespace alvar; using namespace std; -bool init=true; -const int marker_size=15; +bool init = true; +const int marker_size = 15; Camera cam; Drawable d[32]; std::stringstream calibrationFilename; -void videocallback(IplImage *image) +void videocallback(IplImage* image) { - static IplImage *rgba; - bool flip_image = (image->origin?true:false); - if (flip_image) { - cvFlip(image); - image->origin = !image->origin; - } + static IplImage* rgba; + bool flip_image = (image->origin ? true : false); + if (flip_image) + { + cvFlip(image); + image->origin = !image->origin; + } - if (init) { - init = false; - cout<<"Loading calibration: "<width, image->height)) { - cout<<" [Ok]"<width, image->height); - cout<<" [Fail]"<width,image->height); - GlutViewer::SetGlProjectionMatrix(p); - for (int i=0; i<32; i++) { - d[i].SetScale(marker_size); - } - rgba = CvTestbed::Instance().CreateImageWithProto("RGBA", image, 0, 4); + if (init) + { + init = false; + cout << "Loading calibration: " << calibrationFilename.str(); + if (cam.SetCalib(calibrationFilename.str().c_str(), image->width, + image->height)) + { + cout << " [Ok]" << endl; } - static MarkerDetector marker_detector; - marker_detector.SetMarkerSize(marker_size); // for marker ids larger than 255, set the content resolution accordingly - //static MarkerDetector marker_detector; - //marker_detector.SetMarkerSize(2.8, 3, 1.5); - - // Here we try to use RGBA just to make sure that also it works... - //cvCvtColor(image, rgba, CV_RGB2RGBA); - marker_detector.Detect(image, &cam, true, true); - GlutViewer::DrawableClear(); - for (size_t i=0; isize(); i++) { - if (i >= 32) break; - - Pose p = (*(marker_detector.markers))[i].pose; - p.GetMatrixGL(d[i].gl_mat); - - int id = (*(marker_detector.markers))[i].GetId(); - double r = 1.0 - double(id+1)/32.0; - double g = 1.0 - double(id*3%32+1)/32.0; - double b = 1.0 - double(id*7%32+1)/32.0; - d[i].SetColor(r, g, b); - - GlutViewer::DrawableAdd(&(d[i])); + else + { + cam.SetRes(image->width, image->height); + cout << " [Fail]" << endl; } - - if (flip_image) { - cvFlip(image); - image->origin = !image->origin; + double p[16]; + cam.GetOpenglProjectionMatrix(p, image->width, image->height); + GlutViewer::SetGlProjectionMatrix(p); + for (int i = 0; i < 32; i++) + { + d[i].SetScale(marker_size); } + rgba = CvTestbed::Instance().CreateImageWithProto("RGBA", image, 0, 4); + } + static MarkerDetector marker_detector; + marker_detector.SetMarkerSize(marker_size); // for marker ids larger than + // 255, set the content + // resolution accordingly + // static MarkerDetector marker_detector; + // marker_detector.SetMarkerSize(2.8, 3, 1.5); + + // Here we try to use RGBA just to make sure that also it works... + // cvCvtColor(image, rgba, CV_RGB2RGBA); + marker_detector.Detect(image, &cam, true, true); + GlutViewer::DrawableClear(); + for (size_t i = 0; i < marker_detector.markers->size(); i++) + { + if (i >= 32) + break; + + Pose p = (*(marker_detector.markers))[i].pose; + p.GetMatrixGL(d[i].gl_mat); + + int id = (*(marker_detector.markers))[i].GetId(); + double r = 1.0 - double(id + 1) / 32.0; + double g = 1.0 - double(id * 3 % 32 + 1) / 32.0; + double b = 1.0 - double(id * 7 % 32 + 1) / 32.0; + d[i].SetColor(r, g, b); + + GlutViewer::DrawableAdd(&(d[i])); + } + + if (flip_image) + { + cvFlip(image); + image->origin = !image->origin; + } } -int main(int argc, char *argv[]) +int main(int argc, char* argv[]) { - try { - // Output usage message - std::string filename(argv[0]); - filename = filename.substr(filename.find_last_of('\\') + 1); - std::cout << "SampleMarkerDetector" << std::endl; - std::cout << "====================" << std::endl; - std::cout << std::endl; - std::cout << "Description:" << std::endl; - std::cout << " This is an example of how to detect 'MarkerData' markers using" << std::endl; - std::cout << " 'MarkerDetector' and visualize them using 'GlutViewer'. In the" << std::endl; - std::cout << " SampleMarkerDetector window, various debug information is shown" << std::endl; - std::cout << " about the detected markers. The coordinate axes and a virtual cube" << std::endl; - std::cout << " are superimposed onto the markers to visualize the detected pose." << std::endl; - std::cout << " For each marker, a small image of the marker content is displayed" << std::endl; - std::cout << " at the origin and the marker number is displayed at one of the" << std::endl; - std::cout << " corners. At the opposing corner, the error estimation percentages" << std::endl; - std::cout << " 'MARGIN_ERROR' and 'DECODE_ERROR' (red) or 'TRACK_ERROR' (dark red)" << std::endl; - std::cout << " are displayed." << std::endl; - std::cout << std::endl; - std::cout << " In the AR window, squares are drawn over the marker positions using" << std::endl; - std::cout << " OpenGL. In the VR window, the squares are drawn with respect to the" << std::endl; - std::cout << " camera coordinate frame. The viewpoint can be modified by dragging" << std::endl; - std::cout << " with the left and right mouse buttons." << std::endl; - std::cout << std::endl; - std::cout << "Usage:" << std::endl; - std::cout << " " << filename << " [device|filename]" << std::endl; - std::cout << std::endl; - std::cout << " device integer selecting device from enumeration list (default 0)" << std::endl; - std::cout << " highgui capture devices are prefered" << std::endl; - std::cout << " filename string specifying a media file as input" << std::endl; - std::cout << std::endl; - std::cout << "Keyboard Shortcuts:" << std::endl; - std::cout << " q: quit" << std::endl; - std::cout << std::endl; - - // Initialise GlutViewer and CvTestbed - GlutViewer::Start(argc, argv, 640, 480); - CvTestbed::Instance().SetVideoCallback(videocallback); - - // Create capture object from camera (argv[1] is a number) or from file (argv[1] is a string) - Capture *cap; - std::string uniqueName; - if ((argc > 1) && (!isdigit(argv[1][0]))) { - // Manually create capture device and initialize capture object - CaptureDevice device("file", argv[1]); - cap = CaptureFactory::instance()->createCapture(device); - uniqueName = "file"; - } - else { - // Enumerate possible capture plugins - CaptureFactory::CapturePluginVector plugins = CaptureFactory::instance()->enumeratePlugins(); - if (plugins.size() < 1) { - std::cout << "Could not find any capture plugins." << std::endl; - return 0; - } - - // Display capture plugins - std::cout << "Available Plugins: "; - outputEnumeratedPlugins(plugins); - std::cout << std::endl; - - // Enumerate possible capture devices - CaptureFactory::CaptureDeviceVector devices = CaptureFactory::instance()->enumerateDevices(); - if (devices.size() < 1) { - std::cout << "Could not find any capture devices." << std::endl; - return 0; - } - - // Check command line argument for which device to use - int selectedDevice = defaultDevice(devices); - if (argc > 1) { - selectedDevice = atoi(argv[1]); - } - if (selectedDevice >= (int)devices.size()) { - selectedDevice = defaultDevice(devices); - } - - // Display capture devices - std::cout << "Enumerated Capture Devices:" << std::endl; - outputEnumeratedDevices(devices, selectedDevice); - std::cout << std::endl; - - // Create capture object from camera - cap = CaptureFactory::instance()->createCapture(devices[selectedDevice]); - uniqueName = devices[selectedDevice].uniqueName(); - } - - // Handle capture lifecycle and start video capture - // Note that loadSettings/saveSettings are not supported by all plugins - if (cap) { - std::stringstream settingsFilename; - settingsFilename << "camera_settings_" << uniqueName << ".xml"; - calibrationFilename << "camera_calibration_" << uniqueName << ".xml"; - - cap->start(); - cap->setResolution(640, 480); - - if (cap->loadSettings(settingsFilename.str())) { - std::cout << "Loading settings: " << settingsFilename.str() << std::endl; - } - - std::stringstream title; - title << "SampleMarkerDetector (" << cap->captureDevice().captureType() << ")"; - - CvTestbed::Instance().StartVideo(cap, title.str().c_str()); - - if (cap->saveSettings(settingsFilename.str())) { - std::cout << "Saving settings: " << settingsFilename.str() << std::endl; - } - - cap->stop(); - delete cap; - } - else if (CvTestbed::Instance().StartVideo(0, argv[0])) { - } - else { - std::cout << "Could not initialize the selected capture backend." << std::endl; - } + try + { + // Output usage message + std::string filename(argv[0]); + filename = filename.substr(filename.find_last_of('\\') + 1); + std::cout << "SampleMarkerDetector" << std::endl; + std::cout << "====================" << std::endl; + std::cout << std::endl; + std::cout << "Description:" << std::endl; + std::cout << " This is an example of how to detect 'MarkerData' markers " + "using" + << std::endl; + std::cout << " 'MarkerDetector' and visualize them using 'GlutViewer'. In " + "the" + << std::endl; + std::cout << " SampleMarkerDetector window, various debug information is " + "shown" + << std::endl; + std::cout << " about the detected markers. The coordinate axes and a " + "virtual cube" + << std::endl; + std::cout << " are superimposed onto the markers to visualize the " + "detected pose." + << std::endl; + std::cout << " For each marker, a small image of the marker content is " + "displayed" + << std::endl; + std::cout << " at the origin and the marker number is displayed at one of " + "the" + << std::endl; + std::cout << " corners. At the opposing corner, the error estimation " + "percentages" + << std::endl; + std::cout << " 'MARGIN_ERROR' and 'DECODE_ERROR' (red) or 'TRACK_ERROR' " + "(dark red)" + << std::endl; + std::cout << " are displayed." << std::endl; + std::cout << std::endl; + std::cout << " In the AR window, squares are drawn over the marker " + "positions using" + << std::endl; + std::cout << " OpenGL. In the VR window, the squares are drawn with " + "respect to the" + << std::endl; + std::cout << " camera coordinate frame. The viewpoint can be modified by " + "dragging" + << std::endl; + std::cout << " with the left and right mouse buttons." << std::endl; + std::cout << std::endl; + std::cout << "Usage:" << std::endl; + std::cout << " " << filename << " [device|filename]" << std::endl; + std::cout << std::endl; + std::cout << " device integer selecting device from enumeration list " + "(default 0)" + << std::endl; + std::cout << " highgui capture devices are prefered" + << std::endl; + std::cout << " filename string specifying a media file as input" + << std::endl; + std::cout << std::endl; + std::cout << "Keyboard Shortcuts:" << std::endl; + std::cout << " q: quit" << std::endl; + std::cout << std::endl; + + // Initialise GlutViewer and CvTestbed + GlutViewer::Start(argc, argv, 640, 480); + CvTestbed::Instance().SetVideoCallback(videocallback); + + // Create capture object from camera (argv[1] is a number) or from file + // (argv[1] is a string) + Capture* cap; + std::string uniqueName; + if ((argc > 1) && (!isdigit(argv[1][0]))) + { + // Manually create capture device and initialize capture object + CaptureDevice device("file", argv[1]); + cap = CaptureFactory::instance()->createCapture(device); + uniqueName = "file"; + } + else + { + // Enumerate possible capture plugins + CaptureFactory::CapturePluginVector plugins = + CaptureFactory::instance()->enumeratePlugins(); + if (plugins.size() < 1) + { + std::cout << "Could not find any capture plugins." << std::endl; + return 0; + } + + // Display capture plugins + std::cout << "Available Plugins: "; + outputEnumeratedPlugins(plugins); + std::cout << std::endl; + // Enumerate possible capture devices + CaptureFactory::CaptureDeviceVector devices = + CaptureFactory::instance()->enumerateDevices(); + if (devices.size() < 1) + { + std::cout << "Could not find any capture devices." << std::endl; return 0; + } + + // Check command line argument for which device to use + int selectedDevice = defaultDevice(devices); + if (argc > 1) + { + selectedDevice = atoi(argv[1]); + } + if (selectedDevice >= (int)devices.size()) + { + selectedDevice = defaultDevice(devices); + } + + // Display capture devices + std::cout << "Enumerated Capture Devices:" << std::endl; + outputEnumeratedDevices(devices, selectedDevice); + std::cout << std::endl; + + // Create capture object from camera + cap = CaptureFactory::instance()->createCapture(devices[selectedDevice]); + uniqueName = devices[selectedDevice].uniqueName(); + } + + // Handle capture lifecycle and start video capture + // Note that loadSettings/saveSettings are not supported by all plugins + if (cap) + { + std::stringstream settingsFilename; + settingsFilename << "camera_settings_" << uniqueName << ".xml"; + calibrationFilename << "camera_calibration_" << uniqueName << ".xml"; + + cap->start(); + cap->setResolution(640, 480); + + if (cap->loadSettings(settingsFilename.str())) + { + std::cout << "Loading settings: " << settingsFilename.str() + << std::endl; + } + + std::stringstream title; + title << "SampleMarkerDetector (" << cap->captureDevice().captureType() + << ")"; + + CvTestbed::Instance().StartVideo(cap, title.str().c_str()); + + if (cap->saveSettings(settingsFilename.str())) + { + std::cout << "Saving settings: " << settingsFilename.str() << std::endl; + } + + cap->stop(); + delete cap; } - catch (const std::exception &e) { - std::cout << "Exception: " << e.what() << endl; + else if (CvTestbed::Instance().StartVideo(0, argv[0])) + { } - catch (...) { - std::cout << "Exception: unknown" << std::endl; + else + { + std::cout << "Could not initialize the selected capture backend." + << std::endl; } + + return 0; + } + catch (const std::exception& e) + { + std::cout << "Exception: " << e.what() << endl; + } + catch (...) + { + std::cout << "Exception: unknown" << std::endl; + } } diff --git a/ar_track_alvar/src/SampleMarkerHide.cpp b/ar_track_alvar/src/SampleMarkerHide.cpp index e833af4..8f5cd55 100644 --- a/ar_track_alvar/src/SampleMarkerHide.cpp +++ b/ar_track_alvar/src/SampleMarkerHide.cpp @@ -5,223 +5,269 @@ using namespace alvar; using namespace std; -#define GLUT_DISABLE_ATEXIT_HACK // Needed to compile with Mingw? +#define GLUT_DISABLE_ATEXIT_HACK // Needed to compile with Mingw? #include const double margin = 1.0; std::stringstream calibrationFilename; // Own drawable for showing hide-texture in OpenGL -struct OwnDrawable : public Drawable { - unsigned char hidingtex[64*64*4]; - virtual void Draw() { - glPushMatrix(); - glMultMatrixd(gl_mat); - - glPushAttrib(GL_ALL_ATTRIB_BITS); - glEnable(GL_TEXTURE_2D); - int tex=0; - glBindTexture(GL_TEXTURE_2D, tex); - glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP); - glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA,64,64,0,GL_RGBA,GL_UNSIGNED_BYTE,hidingtex); - glDisable(GL_CULL_FACE); - glDisable(GL_LIGHTING); - glDisable(GL_DEPTH_TEST); - glEnable(GL_ALPHA_TEST); - glEnable(GL_BLEND); - glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); - glBegin(GL_QUADS); - glTexCoord2d(0.0,0.0); - glVertex3d(-margin,-margin,0); - glTexCoord2d(0.0,1.0); - glVertex3d(-margin,margin,0); - glTexCoord2d(1.0,1.0); - glVertex3d(margin,margin,0); - glTexCoord2d(1.0,0.0); - glVertex3d(margin,-margin,0); - glEnd(); - glPopAttrib(); - glPopMatrix(); - } +struct OwnDrawable : public Drawable +{ + unsigned char hidingtex[64 * 64 * 4]; + virtual void Draw() + { + glPushMatrix(); + glMultMatrixd(gl_mat); + + glPushAttrib(GL_ALL_ATTRIB_BITS); + glEnable(GL_TEXTURE_2D); + int tex = 0; + glBindTexture(GL_TEXTURE_2D, tex); + glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP); + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 64, 64, 0, GL_RGBA, + GL_UNSIGNED_BYTE, hidingtex); + glDisable(GL_CULL_FACE); + glDisable(GL_LIGHTING); + glDisable(GL_DEPTH_TEST); + glEnable(GL_ALPHA_TEST); + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + glBegin(GL_QUADS); + glTexCoord2d(0.0, 0.0); + glVertex3d(-margin, -margin, 0); + glTexCoord2d(0.0, 1.0); + glVertex3d(-margin, margin, 0); + glTexCoord2d(1.0, 1.0); + glVertex3d(margin, margin, 0); + glTexCoord2d(1.0, 0.0); + glVertex3d(margin, -margin, 0); + glEnd(); + glPopAttrib(); + glPopMatrix(); + } }; -void videocallback(IplImage *image) +void videocallback(IplImage* image) { - static bool init=true; - static const int marker_size=15; - static Camera cam; - static OwnDrawable d[32]; - static IplImage *hide_texture; - - bool flip_image = (image->origin?true:false); - if (flip_image) { - cvFlip(image); - image->origin = !image->origin; + static bool init = true; + static const int marker_size = 15; + static Camera cam; + static OwnDrawable d[32]; + static IplImage* hide_texture; + + bool flip_image = (image->origin ? true : false); + if (flip_image) + { + cvFlip(image); + image->origin = !image->origin; + } + + static IplImage* bg_image = 0; + if (!bg_image) + bg_image = cvCreateImage(cvSize(512, 512), 8, 3); + if (image->nChannels == 3) + { + bg_image->origin = 0; + cvResize(image, bg_image); + GlutViewer::SetVideo(bg_image); + } + + if (init) + { + init = false; + cout << "Loading calibration: " << calibrationFilename.str(); + if (cam.SetCalib(calibrationFilename.str().c_str(), image->width, + image->height)) + { + cout << " [Ok]" << endl; + } + else + { + cam.SetRes(image->width, image->height); + cout << " [Fail]" << endl; } + double p[16]; + cam.GetOpenglProjectionMatrix(p, image->width, image->height); + GlutViewer::SetGlProjectionMatrix(p); + hide_texture = + CvTestbed::Instance().CreateImage("hide_texture", cvSize(64, 64), 8, 4); + } + static MarkerDetector marker_detector; + marker_detector.Detect(image, &cam, false, false); - static IplImage* bg_image = 0; - if(!bg_image) bg_image = cvCreateImage(cvSize(512, 512), 8, 3); - if(image->nChannels == 3) + GlutViewer::DrawableClear(); + for (size_t i = 0; i < marker_detector.markers->size(); i++) + { + if (i >= 32) + break; + GlutViewer::DrawableAdd(&(d[i])); + } + for (size_t i = 0; i < marker_detector.markers->size(); i++) + { + if (i >= 32) + break; + + // Note that we need to mirror both the y- and z-axis because: + // - In OpenCV we have coordinates: x-right, y-down, z-ahead + // - In OpenGL we have coordinates: x-right, y-up, z-backwards + // TODO: Better option might be to use OpenGL projection matrix that + // matches our OpenCV-approach + Pose p = (*(marker_detector.markers))[i].pose; + BuildHideTexture(image, hide_texture, &cam, d[i].gl_mat, + PointDouble(-margin, -margin), + PointDouble(margin, margin)); + // DrawTexture(image, hide_texture, &cam, d[i].gl_mat, PointDouble(-0.7, + // -0.7), PointDouble(0.7, 0.7)); + + p.GetMatrixGL(d[i].gl_mat); + for (int ii = 0; ii < 64 * 64; ii++) { - bg_image->origin = 0; - cvResize(image, bg_image); - GlutViewer::SetVideo(bg_image); + d[i].hidingtex[ii * 4 + 0] = hide_texture->imageData[ii * 4 + 2]; + d[i].hidingtex[ii * 4 + 1] = hide_texture->imageData[ii * 4 + 1]; + d[i].hidingtex[ii * 4 + 2] = hide_texture->imageData[ii * 4 + 0]; + d[i].hidingtex[ii * 4 + 3] = hide_texture->imageData[ii * 4 + 3]; } + } + if (flip_image) + { + cvFlip(image); + image->origin = !image->origin; + } +} + +int main(int argc, char* argv[]) +{ + try + { + // Output usage message + std::string filename(argv[0]); + filename = filename.substr(filename.find_last_of('\\') + 1); + std::cout << "SampleMarkerHide" << std::endl; + std::cout << "================" << std::endl; + std::cout << std::endl; + std::cout << "Description:" << std::endl; + std::cout << " This is an example of how to detect 'MarkerData' markers, " + "similarly" + << std::endl; + std::cout << " to 'SampleMarkerDetector', and hide them using the " + "'BuildHideTexture'" + << std::endl; + std::cout << " and 'DrawTexture' classes." << std::endl; + std::cout << std::endl; + std::cout << "Usage:" << std::endl; + std::cout << " " << filename << " [device]" << std::endl; + std::cout << std::endl; + std::cout << " device integer selecting device from enumeration list " + "(default 0)" + << std::endl; + std::cout << " highgui capture devices are prefered" + << std::endl; + std::cout << std::endl; + std::cout << "Keyboard Shortcuts:" << std::endl; + std::cout << " q: quit" << std::endl; + std::cout << std::endl; - if (init) { - init = false; - cout<<"Loading calibration: "<width, image->height)) { - cout<<" [Ok]"<width, image->height); - cout<<" [Fail]"<width,image->height); - GlutViewer::SetGlProjectionMatrix(p); - hide_texture = CvTestbed::Instance().CreateImage("hide_texture", cvSize(64, 64), 8, 4); + // Initialise GlutViewer and CvTestbed + GlutViewer::Start(argc, argv, 640, 480, 15); + CvTestbed::Instance().SetVideoCallback(videocallback); + + // Enumerate possible capture plugins + CaptureFactory::CapturePluginVector plugins = + CaptureFactory::instance()->enumeratePlugins(); + if (plugins.size() < 1) + { + std::cout << "Could not find any capture plugins." << std::endl; + return 0; } - static MarkerDetector marker_detector;\ - marker_detector.Detect(image, &cam, false, false); - GlutViewer::DrawableClear(); - for (size_t i=0; isize(); i++) { - if (i >= 32) break; - GlutViewer::DrawableAdd(&(d[i])); + // Display capture plugins + std::cout << "Available Plugins: "; + outputEnumeratedPlugins(plugins); + std::cout << std::endl; + + // Enumerate possible capture devices + CaptureFactory::CaptureDeviceVector devices = + CaptureFactory::instance()->enumerateDevices(); + if (devices.size() < 1) + { + std::cout << "Could not find any capture devices." << std::endl; + return 0; } - for (size_t i=0; isize(); i++) { - if (i >= 32) break; - - // Note that we need to mirror both the y- and z-axis because: - // - In OpenCV we have coordinates: x-right, y-down, z-ahead - // - In OpenGL we have coordinates: x-right, y-up, z-backwards - // TODO: Better option might be to use OpenGL projection matrix that matches our OpenCV-approach - Pose p = (*(marker_detector.markers))[i].pose; - BuildHideTexture(image, hide_texture, &cam, d[i].gl_mat, PointDouble(-margin, -margin), PointDouble(margin, margin)); - //DrawTexture(image, hide_texture, &cam, d[i].gl_mat, PointDouble(-0.7, -0.7), PointDouble(0.7, 0.7)); - - p.GetMatrixGL(d[i].gl_mat); - for (int ii=0; ii<64*64; ii++) { - d[i].hidingtex[ii*4+0] = hide_texture->imageData[ii*4+2]; - d[i].hidingtex[ii*4+1] = hide_texture->imageData[ii*4+1]; - d[i].hidingtex[ii*4+2] = hide_texture->imageData[ii*4+0]; - d[i].hidingtex[ii*4+3] = hide_texture->imageData[ii*4+3]; - } + + // Check command line argument for which device to use + int selectedDevice = defaultDevice(devices); + if (argc > 1) + { + selectedDevice = atoi(argv[1]); } - if (flip_image) { - cvFlip(image); - image->origin = !image->origin; + if (selectedDevice >= (int)devices.size()) + { + selectedDevice = defaultDevice(devices); } -} -int main(int argc, char *argv[]) -{ - try { - // Output usage message - std::string filename(argv[0]); - filename = filename.substr(filename.find_last_of('\\') + 1); - std::cout << "SampleMarkerHide" << std::endl; - std::cout << "================" << std::endl; - std::cout << std::endl; - std::cout << "Description:" << std::endl; - std::cout << " This is an example of how to detect 'MarkerData' markers, similarly" << std::endl; - std::cout << " to 'SampleMarkerDetector', and hide them using the 'BuildHideTexture'" << std::endl; - std::cout << " and 'DrawTexture' classes." << std::endl; - std::cout << std::endl; - std::cout << "Usage:" << std::endl; - std::cout << " " << filename << " [device]" << std::endl; - std::cout << std::endl; - std::cout << " device integer selecting device from enumeration list (default 0)" << std::endl; - std::cout << " highgui capture devices are prefered" << std::endl; - std::cout << std::endl; - std::cout << "Keyboard Shortcuts:" << std::endl; - std::cout << " q: quit" << std::endl; - std::cout << std::endl; - - // Initialise GlutViewer and CvTestbed - GlutViewer::Start(argc, argv, 640, 480, 15); - CvTestbed::Instance().SetVideoCallback(videocallback); - - // Enumerate possible capture plugins - CaptureFactory::CapturePluginVector plugins = CaptureFactory::instance()->enumeratePlugins(); - if (plugins.size() < 1) { - std::cout << "Could not find any capture plugins." << std::endl; - return 0; - } - - // Display capture plugins - std::cout << "Available Plugins: "; - outputEnumeratedPlugins(plugins); - std::cout << std::endl; - - // Enumerate possible capture devices - CaptureFactory::CaptureDeviceVector devices = CaptureFactory::instance()->enumerateDevices(); - if (devices.size() < 1) { - std::cout << "Could not find any capture devices." << std::endl; - return 0; - } - - // Check command line argument for which device to use - int selectedDevice = defaultDevice(devices); - if (argc > 1) { - selectedDevice = atoi(argv[1]); - } - if (selectedDevice >= (int)devices.size()) { - selectedDevice = defaultDevice(devices); - } - - // Display capture devices - std::cout << "Enumerated Capture Devices:" << std::endl; - outputEnumeratedDevices(devices, selectedDevice); - std::cout << std::endl; - - // Create capture object from camera - Capture *cap = CaptureFactory::instance()->createCapture(devices[selectedDevice]); - std::string uniqueName = devices[selectedDevice].uniqueName(); - - // Handle capture lifecycle and start video capture - // Note that loadSettings/saveSettings are not supported by all plugins - if (cap) { - std::stringstream settingsFilename; - settingsFilename << "camera_settings_" << uniqueName << ".xml"; - calibrationFilename << "camera_calibration_" << uniqueName << ".xml"; - - cap->start(); - cap->setResolution(640, 480); - - if (cap->loadSettings(settingsFilename.str())) { - std::cout << "Loading settings: " << settingsFilename.str() << std::endl; - } - - std::stringstream title; - title << "SampleMarkerHide (" << cap->captureDevice().captureType() << ")"; - - CvTestbed::Instance().StartVideo(cap, title.str().c_str()); - - if (cap->saveSettings(settingsFilename.str())) { - std::cout << "Saving settings: " << settingsFilename.str() << std::endl; - } - - cap->stop(); - delete cap; - } - else if (CvTestbed::Instance().StartVideo(0, argv[0])) { - } - else { - std::cout << "Could not initialize the selected capture backend." << std::endl; - } - - return 0; + // Display capture devices + std::cout << "Enumerated Capture Devices:" << std::endl; + outputEnumeratedDevices(devices, selectedDevice); + std::cout << std::endl; + + // Create capture object from camera + Capture* cap = + CaptureFactory::instance()->createCapture(devices[selectedDevice]); + std::string uniqueName = devices[selectedDevice].uniqueName(); + + // Handle capture lifecycle and start video capture + // Note that loadSettings/saveSettings are not supported by all plugins + if (cap) + { + std::stringstream settingsFilename; + settingsFilename << "camera_settings_" << uniqueName << ".xml"; + calibrationFilename << "camera_calibration_" << uniqueName << ".xml"; + + cap->start(); + cap->setResolution(640, 480); + + if (cap->loadSettings(settingsFilename.str())) + { + std::cout << "Loading settings: " << settingsFilename.str() + << std::endl; + } + + std::stringstream title; + title << "SampleMarkerHide (" << cap->captureDevice().captureType() + << ")"; + + CvTestbed::Instance().StartVideo(cap, title.str().c_str()); + + if (cap->saveSettings(settingsFilename.str())) + { + std::cout << "Saving settings: " << settingsFilename.str() << std::endl; + } + + cap->stop(); + delete cap; } - catch (const std::exception &e) { - std::cout << "Exception: " << e.what() << endl; + else if (CvTestbed::Instance().StartVideo(0, argv[0])) + { } - catch (...) { - std::cout << "Exception: unknown" << std::endl; + else + { + std::cout << "Could not initialize the selected capture backend." + << std::endl; } + + return 0; + } + catch (const std::exception& e) + { + std::cout << "Exception: " << e.what() << endl; + } + catch (...) + { + std::cout << "Exception: unknown" << std::endl; + } } diff --git a/ar_track_alvar/src/SampleMarkerlessCreator.cpp b/ar_track_alvar/src/SampleMarkerlessCreator.cpp index fdbb5c9..1040c8c 100644 --- a/ar_track_alvar/src/SampleMarkerlessCreator.cpp +++ b/ar_track_alvar/src/SampleMarkerlessCreator.cpp @@ -2,52 +2,66 @@ using namespace std; using namespace alvar; -int main(int argc, char *argv[]) +int main(int argc, char* argv[]) { - try { - // Output usage message - std::string filename(argv[0]); - filename = filename.substr(filename.find_last_of('\\') + 1); - std::cout << "SampleMarkerlessCreator" << std::endl; - std::cout << "=======================" << std::endl; - std::cout << std::endl; - std::cout << "Description:" << std::endl; - std::cout << " This is an example of how to use the 'FernImageDetector' class" << std::endl; - std::cout << " to train a Fern classifier for markerless image-based tracking." << std::endl; - std::cout << " The image should contain many unique features and be in the range" << std::endl; - std::cout << " of 200x200 to 500x500 pixels. A '.dat' file will be saved in the" << std::endl; - std::cout << " same directory as the image and can be used with the" << std::endl; - std::cout << " SampleMarkerlessDetector sample." << std::endl; - std::cout << std::endl; - std::cout << "Usage:" << std::endl; - std::cout << " " << filename << " filename" << std::endl; - std::cout << std::endl; - std::cout << " filename filename of image to train" << std::endl; - std::cout << std::endl; + try + { + // Output usage message + std::string filename(argv[0]); + filename = filename.substr(filename.find_last_of('\\') + 1); + std::cout << "SampleMarkerlessCreator" << std::endl; + std::cout << "=======================" << std::endl; + std::cout << std::endl; + std::cout << "Description:" << std::endl; + std::cout << " This is an example of how to use the 'FernImageDetector' " + "class" + << std::endl; + std::cout << " to train a Fern classifier for markerless image-based " + "tracking." + << std::endl; + std::cout << " The image should contain many unique features and be in " + "the range" + << std::endl; + std::cout << " of 200x200 to 500x500 pixels. A '.dat' file will be saved " + "in the" + << std::endl; + std::cout << " same directory as the image and can be used with the" + << std::endl; + std::cout << " SampleMarkerlessDetector sample." << std::endl; + std::cout << std::endl; + std::cout << "Usage:" << std::endl; + std::cout << " " << filename << " filename" << std::endl; + std::cout << std::endl; + std::cout << " filename filename of image to train" << std::endl; + std::cout << std::endl; - if (argc < 2) { - std::cout << "Filename not specified." << std::endl; - return 0; - } - - std::cout << "Training classifier." << std::endl; - FernImageDetector fernDetector(true); - std::string imageFilename(argv[1]); - fernDetector.train(imageFilename); + if (argc < 2) + { + std::cout << "Filename not specified." << std::endl; + return 0; + } - std::cout << "Writing classifier." << std::endl; - std::string classifierFilename = imageFilename + ".dat"; - if (!fernDetector.write(classifierFilename)) { - std::cout << "Writing classifier failed." << std::endl; - return 1; - } + std::cout << "Training classifier." << std::endl; + FernImageDetector fernDetector(true); + std::string imageFilename(argv[1]); + fernDetector.train(imageFilename); - return 0; - } - catch (const std::exception &e) { - std::cout << "Exception: " << e.what() << endl; - } - catch (...) { - std::cout << "Exception: unknown" << std::endl; + std::cout << "Writing classifier." << std::endl; + std::string classifierFilename = imageFilename + ".dat"; + if (!fernDetector.write(classifierFilename)) + { + std::cout << "Writing classifier failed." << std::endl; + return 1; } + + return 0; + } + catch (const std::exception& e) + { + std::cout << "Exception: " << e.what() << endl; + } + catch (...) + { + std::cout << "Exception: unknown" << std::endl; + } } diff --git a/ar_track_alvar/src/SampleMarkerlessDetector.cpp b/ar_track_alvar/src/SampleMarkerlessDetector.cpp index 1c125f2..dcf4ce3 100644 --- a/ar_track_alvar/src/SampleMarkerlessDetector.cpp +++ b/ar_track_alvar/src/SampleMarkerlessDetector.cpp @@ -15,185 +15,235 @@ Drawable d; cv::Mat gray; bool reset = false; -void videocallback(IplImage *image) +void videocallback(IplImage* image) { - bool flip_image = (image->origin?true:false); - if (flip_image) { - cvFlip(image); - image->origin = !image->origin; + bool flip_image = (image->origin ? true : false); + if (flip_image) + { + cvFlip(image); + image->origin = !image->origin; + } + + if (init) + { + init = false; + cout << "Loading calibration: " << calibrationFilename.str(); + if (fernEstimator.setCalibration(calibrationFilename.str(), image->width, + image->height)) + { + cout << " [Ok]" << endl; } - - if (init) { - init = false; - cout << "Loading calibration: " << calibrationFilename.str(); - if (fernEstimator.setCalibration(calibrationFilename.str(), image->width, image->height)) { - cout << " [Ok]" << endl; - } else { - fernEstimator.setResolution(image->width, image->height); - cout << " [Fail]" << endl; - } - double p[16]; - fernEstimator.camera().GetOpenglProjectionMatrix(p, image->width, image->height); - GlutViewer::SetGlProjectionMatrix(p); - d.SetScale(10); - gray = cv::Mat(image); + else + { + fernEstimator.setResolution(image->width, image->height); + cout << " [Fail]" << endl; } + double p[16]; + fernEstimator.camera().GetOpenglProjectionMatrix(p, image->width, + image->height); + GlutViewer::SetGlProjectionMatrix(p); + d.SetScale(10); + gray = cv::Mat(image); + } + + if (image->nChannels == 3) + { + cv::Mat img = cvarrToMat(image); + cv::cvtColor(img, gray, CV_RGB2GRAY); + } + else + { + gray = image; + } + + vector ipts; + vector mpts; + + fernDetector.findFeatures(gray, true); + fernDetector.imagePoints(ipts); + fernDetector.modelPoints(mpts, true); + double test = fernDetector.inlierRatio(); + if (test > 0.15 && mpts.size() > 4) + { + fernEstimator.calculateFromPointCorrespondences(mpts, ipts); + } + + GlutViewer::DrawableClear(); + Pose pose = fernEstimator.pose(); + pose.GetMatrixGL(d.gl_mat); + GlutViewer::DrawableAdd(&d); + + if (flip_image) + { + cvFlip(image); + image->origin = !image->origin; + } +} - if (image->nChannels == 3) { - cv::Mat img = cvarrToMat(image); - cv::cvtColor(img, gray, CV_RGB2GRAY); +int main(int argc, char* argv[]) +{ + try + { + // Output usage message + std::string filename(argv[0]); + filename = filename.substr(filename.find_last_of('\\') + 1); + std::cout << "SampleMarkerlessDetector" << std::endl; + std::cout << "========================" << std::endl; + std::cout << std::endl; + std::cout << "Description:" << std::endl; + std::cout << " This is an example of how to use the 'FernImageDetector' " + "and" + << std::endl; + std::cout << " 'FernPoseEstimator' classes to detect and track an image " + "and" + << std::endl; + std::cout << " visualize it using 'GlutViewer'. The classification must " + "first" + << std::endl; + std::cout << " be trained with the SampleMarkerlessCreator sample and the" + << std::endl; + std::cout << " resulting file passed as an argument to this sample." + << std::endl; + std::cout << std::endl; + std::cout << " For optimal results, a high quality USB camera or a " + "Firewire" + << std::endl; + std::cout << " camera is necessary. It is also advised to calibrate the " + "camera" + << std::endl; + std::cout << " using the SampleCamCalib sample. It should be noted that " + "the size" + << std::endl; + std::cout << " of the trained image will affect the optimal distance for " + "detection." + << std::endl; + std::cout << std::endl; + std::cout << "Usage:" << std::endl; + std::cout << " " << filename << " filename [device]" << std::endl; + std::cout << std::endl; + std::cout << " filename the filename of classifier (.dat)" << std::endl; + std::cout << " device integer selecting device from enumeration list " + "(default 0)" + << std::endl; + std::cout << " highgui capture devices are prefered" + << std::endl; + std::cout << std::endl; + std::cout << "Keyboard Shortcuts:" << std::endl; + std::cout << " q: quit" << std::endl; + std::cout << std::endl; + + if (argc < 2) + { + std::cout << "Filename not specified." << std::endl; + return 0; } - else { - gray = image; + std::string classifierFilename(argv[1]); + + // Initialise GlutViewer and CvTestbed + GlutViewer::Start(argc, argv, 640, 480); + CvTestbed::Instance().SetVideoCallback(videocallback); + + // Enumerate possible capture plugins + CaptureFactory::CapturePluginVector plugins = + CaptureFactory::instance()->enumeratePlugins(); + if (plugins.size() < 1) + { + std::cout << "Could not find any capture plugins." << std::endl; + return 0; } - vector ipts; - vector mpts; - - fernDetector.findFeatures(gray, true); - fernDetector.imagePoints(ipts); - fernDetector.modelPoints(mpts, true); - double test = fernDetector.inlierRatio(); - if (test > 0.15 && mpts.size() > 4) { - fernEstimator.calculateFromPointCorrespondences(mpts, ipts); + // Display capture plugins + std::cout << "Available Plugins: "; + outputEnumeratedPlugins(plugins); + std::cout << std::endl; + + // Enumerate possible capture devices + CaptureFactory::CaptureDeviceVector devices = + CaptureFactory::instance()->enumerateDevices(); + if (devices.size() < 1) + { + std::cout << "Could not find any capture devices." << std::endl; + return 0; } - GlutViewer::DrawableClear(); - Pose pose = fernEstimator.pose(); - pose.GetMatrixGL(d.gl_mat); - GlutViewer::DrawableAdd(&d); - - if (flip_image) { - cvFlip(image); - image->origin = !image->origin; + // Check command line argument for which device to use + int selectedDevice = defaultDevice(devices); + if (argc > 2) + { + selectedDevice = atoi(argv[2]); + } + if (selectedDevice >= (int)devices.size()) + { + selectedDevice = defaultDevice(devices); } -} -int main(int argc, char *argv[]) -{ - try { - // Output usage message - std::string filename(argv[0]); - filename = filename.substr(filename.find_last_of('\\') + 1); - std::cout << "SampleMarkerlessDetector" << std::endl; - std::cout << "========================" << std::endl; - std::cout << std::endl; - std::cout << "Description:" << std::endl; - std::cout << " This is an example of how to use the 'FernImageDetector' and" << std::endl; - std::cout << " 'FernPoseEstimator' classes to detect and track an image and" << std::endl; - std::cout << " visualize it using 'GlutViewer'. The classification must first" << std::endl; - std::cout << " be trained with the SampleMarkerlessCreator sample and the" << std::endl; - std::cout << " resulting file passed as an argument to this sample." << std::endl; - std::cout << std::endl; - std::cout << " For optimal results, a high quality USB camera or a Firewire" << std::endl; - std::cout << " camera is necessary. It is also advised to calibrate the camera" << std::endl; - std::cout << " using the SampleCamCalib sample. It should be noted that the size" << std::endl; - std::cout << " of the trained image will affect the optimal distance for detection." << std::endl; - std::cout << std::endl; - std::cout << "Usage:" << std::endl; - std::cout << " " << filename << " filename [device]" << std::endl; - std::cout << std::endl; - std::cout << " filename the filename of classifier (.dat)" << std::endl; - std::cout << " device integer selecting device from enumeration list (default 0)" << std::endl; - std::cout << " highgui capture devices are prefered" << std::endl; - std::cout << std::endl; - std::cout << "Keyboard Shortcuts:" << std::endl; - std::cout << " q: quit" << std::endl; - std::cout << std::endl; - - if (argc < 2) { - std::cout << "Filename not specified." << std::endl; - return 0; - } - std::string classifierFilename(argv[1]); - - // Initialise GlutViewer and CvTestbed - GlutViewer::Start(argc, argv, 640, 480); - CvTestbed::Instance().SetVideoCallback(videocallback); - - // Enumerate possible capture plugins - CaptureFactory::CapturePluginVector plugins = CaptureFactory::instance()->enumeratePlugins(); - if (plugins.size() < 1) { - std::cout << "Could not find any capture plugins." << std::endl; - return 0; - } - - // Display capture plugins - std::cout << "Available Plugins: "; - outputEnumeratedPlugins(plugins); - std::cout << std::endl; - - // Enumerate possible capture devices - CaptureFactory::CaptureDeviceVector devices = CaptureFactory::instance()->enumerateDevices(); - if (devices.size() < 1) { - std::cout << "Could not find any capture devices." << std::endl; - return 0; - } - - // Check command line argument for which device to use - int selectedDevice = defaultDevice(devices); - if (argc > 2) { - selectedDevice = atoi(argv[2]); - } - if (selectedDevice >= (int)devices.size()) { - selectedDevice = defaultDevice(devices); - } - - // Display capture devices - std::cout << "Enumerated Capture Devices:" << std::endl; - outputEnumeratedDevices(devices, selectedDevice); - std::cout << std::endl; - - // Create capture object from camera - Capture *cap = CaptureFactory::instance()->createCapture(devices[selectedDevice]); - std::string uniqueName = devices[selectedDevice].uniqueName(); - - // Handle capture lifecycle and start video capture - // Note that loadSettings/saveSettings are not supported by all plugins - if (cap) { - std::cout << "Loading classifier." << std::endl; - if (!fernDetector.read(classifierFilename)) { - std::cout << "Loading classifier failed." << std::endl; - cap->stop(); - delete cap; - return 1; - } - - std::stringstream settingsFilename; - settingsFilename << "camera_settings_" << uniqueName << ".xml"; - calibrationFilename << "camera_calibration_" << uniqueName << ".xml"; - - cap->start(); - cap->setResolution(640, 480); - - if (cap->loadSettings(settingsFilename.str())) { - std::cout << "Loading settings: " << settingsFilename.str() << std::endl; - } - - std::stringstream title; - title << "SampleMarkerDetector (" << cap->captureDevice().captureType() << ")"; - - CvTestbed::Instance().StartVideo(cap, title.str().c_str()); - - if (cap->saveSettings(settingsFilename.str())) { - std::cout << "Saving settings: " << settingsFilename.str() << std::endl; - } - - cap->stop(); - delete cap; - } - else if (CvTestbed::Instance().StartVideo(0, argv[0])) { - } - else { - std::cout << "Could not initialize the selected capture backend." << std::endl; - } - - return 0; + // Display capture devices + std::cout << "Enumerated Capture Devices:" << std::endl; + outputEnumeratedDevices(devices, selectedDevice); + std::cout << std::endl; + + // Create capture object from camera + Capture* cap = + CaptureFactory::instance()->createCapture(devices[selectedDevice]); + std::string uniqueName = devices[selectedDevice].uniqueName(); + + // Handle capture lifecycle and start video capture + // Note that loadSettings/saveSettings are not supported by all plugins + if (cap) + { + std::cout << "Loading classifier." << std::endl; + if (!fernDetector.read(classifierFilename)) + { + std::cout << "Loading classifier failed." << std::endl; + cap->stop(); + delete cap; + return 1; + } + + std::stringstream settingsFilename; + settingsFilename << "camera_settings_" << uniqueName << ".xml"; + calibrationFilename << "camera_calibration_" << uniqueName << ".xml"; + + cap->start(); + cap->setResolution(640, 480); + + if (cap->loadSettings(settingsFilename.str())) + { + std::cout << "Loading settings: " << settingsFilename.str() + << std::endl; + } + + std::stringstream title; + title << "SampleMarkerDetector (" << cap->captureDevice().captureType() + << ")"; + + CvTestbed::Instance().StartVideo(cap, title.str().c_str()); + + if (cap->saveSettings(settingsFilename.str())) + { + std::cout << "Saving settings: " << settingsFilename.str() << std::endl; + } + + cap->stop(); + delete cap; } - catch (const std::exception &e) { - std::cout << "Exception: " << e.what() << endl; + else if (CvTestbed::Instance().StartVideo(0, argv[0])) + { } - catch (...) { - std::cout << "Exception: unknown" << std::endl; + else + { + std::cout << "Could not initialize the selected capture backend." + << std::endl; } + + return 0; + } + catch (const std::exception& e) + { + std::cout << "Exception: " << e.what() << endl; + } + catch (...) + { + std::cout << "Exception: unknown" << std::endl; + } } diff --git a/ar_track_alvar/src/SampleMultiMarker.cpp b/ar_track_alvar/src/SampleMultiMarker.cpp index 16333a9..12ba859 100644 --- a/ar_track_alvar/src/SampleMultiMarker.cpp +++ b/ar_track_alvar/src/SampleMultiMarker.cpp @@ -6,226 +6,261 @@ using namespace alvar; using namespace std; -int visualize=0; +int visualize = 0; bool detect_additional = false; const int nof_markers = 5; const double marker_size = 4; MarkerDetector marker_detector; -//MultiMarker *multi_marker; -MultiMarker *multi_marker; +// MultiMarker *multi_marker; +MultiMarker* multi_marker; std::stringstream calibrationFilename; -void videocallback(IplImage *image) +void videocallback(IplImage* image) { - static Camera cam; - Pose pose; - bool flip_image = (image->origin?true:false); - if (flip_image) { - cvFlip(image); - image->origin = !image->origin; - } + static Camera cam; + Pose pose; + bool flip_image = (image->origin ? true : false); + if (flip_image) + { + cvFlip(image); + image->origin = !image->origin; + } - static bool init = true; - if (init) - { + static bool init = true; + if (init) + { + init = false; - init = false; - - // Initialize camera - cout<<"Loading calibration: "<width, image->height)) - { - cout<<" [Ok]"<width, image->height); - cout<<" [Fail]"< id_vector; - for(int i = 0; i < nof_markers; ++i) - id_vector.push_back(i); - - // We make the initialization for MultiMarkerBundle using a fixed marker field (can be printed from ALVAR.pdf) - marker_detector.SetMarkerSize(marker_size); - marker_detector.SetMarkerSizeForId(0, marker_size*2); - multi_marker = new MultiMarker(id_vector); - pose.Reset(); - multi_marker->PointCloudAdd(0, marker_size*2, pose); - pose.SetTranslation(-marker_size*2.5, +marker_size*1.5, 0); - multi_marker->PointCloudAdd(1, marker_size, pose); - pose.SetTranslation(+marker_size*2.5, +marker_size*1.5, 0); - multi_marker->PointCloudAdd(2, marker_size, pose); - pose.SetTranslation(-marker_size*2.5, -marker_size*1.5, 0); - multi_marker->PointCloudAdd(3, marker_size, pose); - pose.SetTranslation(+marker_size*2.5, -marker_size*1.5, 0); - multi_marker->PointCloudAdd(4, marker_size, pose); - } + // Initialize camera + cout << "Loading calibration: " << calibrationFilename.str(); - double error=-1; - if (marker_detector.Detect(image, &cam, true, (visualize == 1), 0.0)) { - if (detect_additional) { - error = multi_marker->Update(marker_detector.markers, &cam, pose); - multi_marker->SetTrackMarkers(marker_detector, &cam, pose, visualize ? image : NULL); - marker_detector.DetectAdditional(image, &cam, (visualize == 1)); - } - if (visualize == 2) - error = multi_marker->Update(marker_detector.markers, &cam, pose, image); - else - error = multi_marker->Update(marker_detector.markers, &cam, pose); + if (cam.SetCalib(calibrationFilename.str().c_str(), image->width, + image->height)) + { + cout << " [Ok]" << endl; } - - static Marker foo; - foo.SetMarkerSize(marker_size*4); - if ((error >= 0) && (error < 5)) + else { - foo.pose = pose; + cam.SetRes(image->width, image->height); + cout << " [Fail]" << endl; } - foo.Visualize(image, &cam); - if (flip_image) { - cvFlip(image); - image->origin = !image->origin; + vector id_vector; + for (int i = 0; i < nof_markers; ++i) + id_vector.push_back(i); + + // We make the initialization for MultiMarkerBundle using a fixed marker + // field (can be printed from ALVAR.pdf) + marker_detector.SetMarkerSize(marker_size); + marker_detector.SetMarkerSizeForId(0, marker_size * 2); + multi_marker = new MultiMarker(id_vector); + pose.Reset(); + multi_marker->PointCloudAdd(0, marker_size * 2, pose); + pose.SetTranslation(-marker_size * 2.5, +marker_size * 1.5, 0); + multi_marker->PointCloudAdd(1, marker_size, pose); + pose.SetTranslation(+marker_size * 2.5, +marker_size * 1.5, 0); + multi_marker->PointCloudAdd(2, marker_size, pose); + pose.SetTranslation(-marker_size * 2.5, -marker_size * 1.5, 0); + multi_marker->PointCloudAdd(3, marker_size, pose); + pose.SetTranslation(+marker_size * 2.5, -marker_size * 1.5, 0); + multi_marker->PointCloudAdd(4, marker_size, pose); + } + + double error = -1; + if (marker_detector.Detect(image, &cam, true, (visualize == 1), 0.0)) + { + if (detect_additional) + { + error = multi_marker->Update(marker_detector.markers, &cam, pose); + multi_marker->SetTrackMarkers(marker_detector, &cam, pose, + visualize ? image : NULL); + marker_detector.DetectAdditional(image, &cam, (visualize == 1)); } + if (visualize == 2) + error = multi_marker->Update(marker_detector.markers, &cam, pose, image); + else + error = multi_marker->Update(marker_detector.markers, &cam, pose); + } + + static Marker foo; + foo.SetMarkerSize(marker_size * 4); + if ((error >= 0) && (error < 5)) + { + foo.pose = pose; + } + foo.Visualize(image, &cam); + + if (flip_image) + { + cvFlip(image); + image->origin = !image->origin; + } } int keycallback(int key) { - if(key == 'v') + if (key == 'v') + { + visualize = (visualize + 1) % 3; + } + else if (key == 'l') + { + if (multi_marker->Load("mmarker.xml", alvar::FILE_FORMAT_XML)) { - visualize = (visualize+1)%3; + cout << "Multi marker loaded" << endl; } - else if(key == 'l') + else + cout << "Cannot load multi marker" << endl; + } + else if (key == 'd') + { + detect_additional = !detect_additional; + if (detect_additional) + cout << "Unreadable marker detection enabled." << endl; + else + cout << "Unreadable marker detection disabled." << endl; + } + else + return key; + + return 0; +} + +int main(int argc, char* argv[]) +{ + try + { + // Output usage message + std::string filename(argv[0]); + filename = filename.substr(filename.find_last_of('\\') + 1); + std::cout << "SampleMultiMarker" << std::endl; + std::cout << "=================" << std::endl; + std::cout << std::endl; + std::cout << "Description:" << std::endl; + std::cout << " This is an example of how to use the 'MultiMarker' class " + "to detect" + << std::endl; + std::cout << " preconfigured multi-marker setup (see ALVAR.pdf). A large " + "cube is drawn" + << std::endl; + std::cout << " over the detected multi-marker even when only some of the " + "markers are" + << std::endl; + std::cout << " visible." << std::endl; + std::cout << std::endl; + std::cout << "Usage:" << std::endl; + std::cout << " " << filename << " [device]" << std::endl; + std::cout << std::endl; + std::cout << " device integer selecting device from enumeration list " + "(default 0)" + << std::endl; + std::cout << " highgui capture devices are prefered" + << std::endl; + std::cout << std::endl; + std::cout << "Keyboard Shortcuts:" << std::endl; + std::cout << " v: switch between three debug visualizations" << std::endl; + std::cout << " l: load marker configuration from mmarker.txt" << std::endl; + std::cout << " d: toggle the detection of non-readable markers" + << std::endl; + std::cout << " q: quit" << std::endl; + std::cout << std::endl; + + // Initialise CvTestbed + CvTestbed::Instance().SetVideoCallback(videocallback); + CvTestbed::Instance().SetKeyCallback(keycallback); + + // Enumerate possible capture plugins + CaptureFactory::CapturePluginVector plugins = + CaptureFactory::instance()->enumeratePlugins(); + if (plugins.size() < 1) { - if(multi_marker->Load("mmarker.xml", alvar::FILE_FORMAT_XML)) - { - cout<<"Multi marker loaded"<enumerateDevices(); + if (devices.size() < 1) { - detect_additional = !detect_additional; - if(detect_additional) - cout<<"Unreadable marker detection enabled."< 1) + { + selectedDevice = atoi(argv[1]); + } + if (selectedDevice >= (int)devices.size()) + { + selectedDevice = defaultDevice(devices); + } -int main(int argc, char *argv[]) -{ - try { - // Output usage message - std::string filename(argv[0]); - filename = filename.substr(filename.find_last_of('\\') + 1); - std::cout << "SampleMultiMarker" << std::endl; - std::cout << "=================" << std::endl; - std::cout << std::endl; - std::cout << "Description:" << std::endl; - std::cout << " This is an example of how to use the 'MultiMarker' class to detect" << std::endl; - std::cout << " preconfigured multi-marker setup (see ALVAR.pdf). A large cube is drawn" << std::endl; - std::cout << " over the detected multi-marker even when only some of the markers are" << std::endl; - std::cout << " visible." << std::endl; - std::cout << std::endl; - std::cout << "Usage:" << std::endl; - std::cout << " " << filename << " [device]" << std::endl; - std::cout << std::endl; - std::cout << " device integer selecting device from enumeration list (default 0)" << std::endl; - std::cout << " highgui capture devices are prefered" << std::endl; - std::cout << std::endl; - std::cout << "Keyboard Shortcuts:" << std::endl; - std::cout << " v: switch between three debug visualizations" << std::endl; - std::cout << " l: load marker configuration from mmarker.txt" << std::endl; - std::cout << " d: toggle the detection of non-readable markers" << std::endl; - std::cout << " q: quit" << std::endl; - std::cout << std::endl; - - // Initialise CvTestbed - CvTestbed::Instance().SetVideoCallback(videocallback); - CvTestbed::Instance().SetKeyCallback(keycallback); - - // Enumerate possible capture plugins - CaptureFactory::CapturePluginVector plugins = CaptureFactory::instance()->enumeratePlugins(); - if (plugins.size() < 1) { - std::cout << "Could not find any capture plugins." << std::endl; - return 0; - } - - // Display capture plugins - std::cout << "Available Plugins: "; - outputEnumeratedPlugins(plugins); - std::cout << std::endl; - - // Enumerate possible capture devices - CaptureFactory::CaptureDeviceVector devices = CaptureFactory::instance()->enumerateDevices(); - if (devices.size() < 1) { - std::cout << "Could not find any capture devices." << std::endl; - return 0; - } - - // Check command line argument for which device to use - int selectedDevice = defaultDevice(devices); - if (argc > 1) { - selectedDevice = atoi(argv[1]); - } - if (selectedDevice >= (int)devices.size()) { - selectedDevice = defaultDevice(devices); - } - - // Display capture devices - std::cout << "Enumerated Capture Devices:" << std::endl; - outputEnumeratedDevices(devices, selectedDevice); - std::cout << std::endl; - - // Create capture object from camera - Capture *cap = CaptureFactory::instance()->createCapture(devices[selectedDevice]); - std::string uniqueName = devices[selectedDevice].uniqueName(); - - // Handle capture lifecycle and start video capture - // Note that loadSettings/saveSettings are not supported by all plugins - if (cap) { - std::stringstream settingsFilename; - settingsFilename << "camera_settings_" << uniqueName << ".xml"; - calibrationFilename << "camera_calibration_" << uniqueName << ".xml"; - - cap->start(); - cap->setResolution(640, 480); - - if (cap->loadSettings(settingsFilename.str())) { - std::cout << "Loading settings: " << settingsFilename.str() << std::endl; - } - - std::stringstream title; - title << "SampleMultiMarker (" << cap->captureDevice().captureType() << ")"; - - CvTestbed::Instance().StartVideo(cap, title.str().c_str()); - - if (cap->saveSettings(settingsFilename.str())) { - std::cout << "Saving settings: " << settingsFilename.str() << std::endl; - } - - cap->stop(); - delete cap; - } - else if (CvTestbed::Instance().StartVideo(0, argv[0])) { - } - else { - std::cout << "Could not initialize the selected capture backend." << std::endl; - } - - return 0; + // Display capture devices + std::cout << "Enumerated Capture Devices:" << std::endl; + outputEnumeratedDevices(devices, selectedDevice); + std::cout << std::endl; + + // Create capture object from camera + Capture* cap = + CaptureFactory::instance()->createCapture(devices[selectedDevice]); + std::string uniqueName = devices[selectedDevice].uniqueName(); + + // Handle capture lifecycle and start video capture + // Note that loadSettings/saveSettings are not supported by all plugins + if (cap) + { + std::stringstream settingsFilename; + settingsFilename << "camera_settings_" << uniqueName << ".xml"; + calibrationFilename << "camera_calibration_" << uniqueName << ".xml"; + + cap->start(); + cap->setResolution(640, 480); + + if (cap->loadSettings(settingsFilename.str())) + { + std::cout << "Loading settings: " << settingsFilename.str() + << std::endl; + } + + std::stringstream title; + title << "SampleMultiMarker (" << cap->captureDevice().captureType() + << ")"; + + CvTestbed::Instance().StartVideo(cap, title.str().c_str()); + + if (cap->saveSettings(settingsFilename.str())) + { + std::cout << "Saving settings: " << settingsFilename.str() << std::endl; + } + + cap->stop(); + delete cap; } - catch (const std::exception &e) { - std::cout << "Exception: " << e.what() << endl; + else if (CvTestbed::Instance().StartVideo(0, argv[0])) + { } - catch (...) { - std::cout << "Exception: unknown" << std::endl; + else + { + std::cout << "Could not initialize the selected capture backend." + << std::endl; } + + return 0; + } + catch (const std::exception& e) + { + std::cout << "Exception: " << e.what() << endl; + } + catch (...) + { + std::cout << "Exception: unknown" << std::endl; + } } diff --git a/ar_track_alvar/src/SampleMultiMarkerBundle.cpp b/ar_track_alvar/src/SampleMultiMarkerBundle.cpp index 3500495..5229f09 100644 --- a/ar_track_alvar/src/SampleMultiMarkerBundle.cpp +++ b/ar_track_alvar/src/SampleMultiMarkerBundle.cpp @@ -7,306 +7,357 @@ using namespace alvar; using namespace std; -int visualize=1; +int visualize = 1; const int nof_markers = 8; const double marker_size = 4; -bool add_measurement=false; +bool add_measurement = false; bool optimize = false; bool optimize_done = false; MarkerDetector marker_detector; std::stringstream calibrationFilename; -MultiMarkerInitializer *multi_marker_init=NULL; -MultiMarkerBundle *multi_marker_bundle=NULL; -double GetMultiMarkerPose(IplImage *image, Camera *cam, Pose &pose) { - static bool init=true; - if (init) { - cout<<"Using manual multimarker approach with MultiMarkerInitializer & MultiMarkerBundle."< id_vector; - for(int i = 0; i < nof_markers; ++i) - id_vector.push_back(i); - // We make the initialization for MultiMarkerBundle using MultiMarkerInitializer - // Each marker needs to be visible in at least two images and at most 64 image are used. - multi_marker_init = new MultiMarkerInitializer(id_vector, 2, 64); - pose.Reset(); - multi_marker_init->PointCloudAdd(id_vector[0], marker_size, pose); - multi_marker_bundle = new MultiMarkerBundle(id_vector); - } +MultiMarkerInitializer* multi_marker_init = NULL; +MultiMarkerBundle* multi_marker_bundle = NULL; +double GetMultiMarkerPose(IplImage* image, Camera* cam, Pose& pose) +{ + static bool init = true; + if (init) + { + cout << "Using manual multimarker approach with MultiMarkerInitializer & " + "MultiMarkerBundle." + << endl; + cout << "Use 'p' for taking keyframes and 'o' for optimizing." << endl; + init = false; + vector id_vector; + for (int i = 0; i < nof_markers; ++i) + id_vector.push_back(i); + // We make the initialization for MultiMarkerBundle using + // MultiMarkerInitializer Each marker needs to be visible in at least two + // images and at most 64 image are used. + multi_marker_init = new MultiMarkerInitializer(id_vector, 2, 64); + pose.Reset(); + multi_marker_init->PointCloudAdd(id_vector[0], marker_size, pose); + multi_marker_bundle = new MultiMarkerBundle(id_vector); + } - double error = -1; - if (!optimize_done) { - if (marker_detector.Detect(image, cam, true, (visualize == 1), 0.0)) { - if (visualize == 2) - error = multi_marker_init->Update(marker_detector.markers, cam, pose, image); - else - error = multi_marker_init->Update(marker_detector.markers, cam, pose); - } - } else { - if (marker_detector.Detect(image, cam, true, (visualize == 1), 0.0)) { - if (visualize == 2) - error = multi_marker_bundle->Update(marker_detector.markers, cam, pose, image); - else - error = multi_marker_bundle->Update(marker_detector.markers, cam, pose); - if ((multi_marker_bundle->SetTrackMarkers(marker_detector, cam, pose, image) > 0) && - (marker_detector.DetectAdditional(image, cam, (visualize == 3)) > 0)) - { - if (visualize == 3) - error = multi_marker_bundle->Update(marker_detector.markers, cam, pose, image); - else - error = multi_marker_bundle->Update(marker_detector.markers, cam, pose); - } - } + double error = -1; + if (!optimize_done) + { + if (marker_detector.Detect(image, cam, true, (visualize == 1), 0.0)) + { + if (visualize == 2) + error = multi_marker_init->Update(marker_detector.markers, cam, pose, + image); + else + error = multi_marker_init->Update(marker_detector.markers, cam, pose); + } + } + else + { + if (marker_detector.Detect(image, cam, true, (visualize == 1), 0.0)) + { + if (visualize == 2) + error = multi_marker_bundle->Update(marker_detector.markers, cam, pose, + image); + else + error = multi_marker_bundle->Update(marker_detector.markers, cam, pose); + if ((multi_marker_bundle->SetTrackMarkers(marker_detector, cam, pose, + image) > 0) && + (marker_detector.DetectAdditional(image, cam, (visualize == 3)) > 0)) + { + if (visualize == 3) + error = multi_marker_bundle->Update(marker_detector.markers, cam, + pose, image); + else + error = + multi_marker_bundle->Update(marker_detector.markers, cam, pose); + } } + } - if (add_measurement) { - if (marker_detector.markers->size() >= 2) { - cout<<"Adding measurement..."<MeasurementsAdd(marker_detector.markers); - add_measurement=false; - } + if (add_measurement) + { + if (marker_detector.markers->size() >= 2) + { + cout << "Adding measurement..." << endl; + multi_marker_init->MeasurementsAdd(marker_detector.markers); + add_measurement = false; } + } - if (optimize) { - cout<<"Initializing..."<Initialize(cam)) { - cout<<"Initialization failed, add some more measurements."<Reset(); - multi_marker_bundle->MeasurementsReset(); - // Copy all measurements into the bundle adjuster. - for (int i = 0; i < multi_marker_init->getMeasurementCount(); ++i) { - Pose pose; - multi_marker_init->getMeasurementPose(i, cam, pose); - const std::vector markers - = multi_marker_init->getMeasurementMarkers(i); - multi_marker_bundle->MeasurementsAdd(&markers, pose); - } - // Initialize the bundle adjuster with initial marker poses. - multi_marker_bundle->PointCloudCopy(multi_marker_init); - cout<<"Optimizing..."<Optimize(cam, 0.01, 20)) { - cout<<"Optimizing done"<Initialize(cam)) + { + cout << "Initialization failed, add some more measurements." << endl; + } + else + { + // Reset the bundle adjuster. + multi_marker_bundle->Reset(); + multi_marker_bundle->MeasurementsReset(); + // Copy all measurements into the bundle adjuster. + for (int i = 0; i < multi_marker_init->getMeasurementCount(); ++i) + { + Pose pose; + multi_marker_init->getMeasurementPose(i, cam, pose); + const std::vector markers = + multi_marker_init->getMeasurementMarkers(i); + multi_marker_bundle->MeasurementsAdd(&markers, pose); + } + // Initialize the bundle adjuster with initial marker poses. + multi_marker_bundle->PointCloudCopy(multi_marker_init); + cout << "Optimizing..." << endl; + if (multi_marker_bundle->Optimize(cam, 0.01, 20)) + { + cout << "Optimizing done" << endl; + optimize_done = true; + } + else + { + cout << "Optimizing FAILED!" << endl; + } } - return error; + optimize = false; + } + return error; } -void videocallback(IplImage *image) +void videocallback(IplImage* image) { - static Camera cam; - static Pose pose; - bool flip_image = (image->origin?true:false); - if (flip_image) { - cvFlip(image); - image->origin = !image->origin; - } - - static bool init = true; - if (init) - { - init = false; + static Camera cam; + static Pose pose; + bool flip_image = (image->origin ? true : false); + if (flip_image) + { + cvFlip(image); + image->origin = !image->origin; + } - // Initialize camera - cout<<"Loading calibration: "<width, image->height)) - { - cout<<" [Ok]"<width, image->height); - cout<<" [Fail]"<width, + image->height)) + { + cout << " [Ok]" << endl; } - - // Manual approach (use 'p' for taking keyframes and 'o' for optimizing) - double error = GetMultiMarkerPose(image, &cam, pose); - - // Visualize a blue marker at the origo. - static Marker foo; - foo.SetMarkerSize(marker_size); - if ((error >= 0) && (error < 5)) + else { - foo.pose = pose; + cam.SetRes(image->width, image->height); + cout << " [Fail]" << endl; } - foo.Visualize(image, &cam, CV_RGB(0,0,255)); - if (flip_image) { - cvFlip(image); - image->origin = !image->origin; - } + marker_detector.SetMarkerSize(marker_size); + } + + // Manual approach (use 'p' for taking keyframes and 'o' for optimizing) + double error = GetMultiMarkerPose(image, &cam, pose); + + // Visualize a blue marker at the origo. + static Marker foo; + foo.SetMarkerSize(marker_size); + if ((error >= 0) && (error < 5)) + { + foo.pose = pose; + } + foo.Visualize(image, &cam, CV_RGB(0, 0, 255)); + + if (flip_image) + { + cvFlip(image); + image->origin = !image->origin; + } } int keycallback(int key) { - static bool fixed = false; + static bool fixed = false; - if(key == 'r') + if (key == 'r') + { + cout << "Reseting multi marker" << endl; + multi_marker_init->Reset(); + multi_marker_init->MeasurementsReset(); + multi_marker_bundle->Reset(); + multi_marker_bundle->MeasurementsReset(); + add_measurement = false; + optimize = false; + optimize_done = false; + } + else if (key == 'v') + { + visualize = (visualize + 1) % 3; + } + else if (key == 'l') + { + if (multi_marker_bundle->Load("mmarker.xml", FILE_FORMAT_XML)) { - cout<<"Reseting multi marker"<Reset(); - multi_marker_init->MeasurementsReset(); - multi_marker_bundle->Reset(); - multi_marker_bundle->MeasurementsReset(); - add_measurement = false; - optimize = false; - optimize_done = false; + cout << "Multi marker loaded" << endl; + multi_marker_init->PointCloudCopy(multi_marker_bundle); + optimize_done = true; } - else if(key == 'v') + else + cout << "Cannot load multi marker" << endl; + } + else if (key == 's') + { + if (multi_marker_bundle->Save("mmarker.xml", FILE_FORMAT_XML)) + cout << "Multi marker saved" << endl; + else + cout << "Cannot save multi marker" << endl; + } + else if (key == 'p') + { + add_measurement = true; + } + else if (key == 'o') + { + optimize = true; + } + else + return key; + + return 0; +} + +int main(int argc, char* argv[]) +{ + try + { + // Output usage message + std::string filename(argv[0]); + filename = filename.substr(filename.find_last_of('\\') + 1); + std::cout << "SampleMultiMarkerBundle" << std::endl; + std::cout << "=======================" << std::endl; + std::cout << std::endl; + std::cout << "Description:" << std::endl; + std::cout << " This is an example of how to use the 'MultiMarkerBundle' " + "class to" + << std::endl; + std::cout << " automatically deduce and optimize 'MultiMarker' setups." + << std::endl; + std::cout << std::endl; + std::cout << "Usage:" << std::endl; + std::cout << " " << filename << " [device]" << std::endl; + std::cout << std::endl; + std::cout << " device integer selecting device from enumeration list " + "(default 0)" + << std::endl; + std::cout << " highgui capture devices are prefered" + << std::endl; + std::cout << std::endl; + std::cout << "Keyboard Shortcuts:" << std::endl; + std::cout << " l: load marker configuration from mmarker.txt" << std::endl; + std::cout << " s: save marker configuration to mmarker.txt" << std::endl; + std::cout << " r: reset marker configuration" << std::endl; + std::cout << " p: add measurement" << std::endl; + std::cout << " o: optimize bundle" << std::endl; + std::cout << " q: quit" << std::endl; + std::cout << std::endl; + + // Initialise CvTestbed + CvTestbed::Instance().SetVideoCallback(videocallback); + CvTestbed::Instance().SetKeyCallback(keycallback); + + // Enumerate possible capture plugins + CaptureFactory::CapturePluginVector plugins = + CaptureFactory::instance()->enumeratePlugins(); + if (plugins.size() < 1) { - visualize = (visualize+1)%3; + std::cout << "Could not find any capture plugins." << std::endl; + return 0; } - else if(key == 'l') + + // Display capture plugins + std::cout << "Available Plugins: "; + outputEnumeratedPlugins(plugins); + std::cout << std::endl; + + // Enumerate possible capture devices + CaptureFactory::CaptureDeviceVector devices = + CaptureFactory::instance()->enumerateDevices(); + if (devices.size() < 1) { - if(multi_marker_bundle->Load("mmarker.xml", FILE_FORMAT_XML)) - { - cout<<"Multi marker loaded"<PointCloudCopy(multi_marker_bundle); - optimize_done = true; - } - else - cout<<"Cannot load multi marker"< 1) { - if(multi_marker_bundle->Save("mmarker.xml", FILE_FORMAT_XML)) - cout<<"Multi marker saved"<= (int)devices.size()) { - add_measurement=true; + selectedDevice = defaultDevice(devices); } - else if(key == 'o') + + // Display capture devices + std::cout << "Enumerated Capture Devices:" << std::endl; + outputEnumeratedDevices(devices, selectedDevice); + std::cout << std::endl; + + // Create capture object from camera + Capture* cap = + CaptureFactory::instance()->createCapture(devices[selectedDevice]); + std::string uniqueName = devices[selectedDevice].uniqueName(); + + // Handle capture lifecycle and start video capture + // Note that loadSettings/saveSettings are not supported by all plugins + if (cap) { - optimize=true; - } - else return key; + std::stringstream settingsFilename; + settingsFilename << "camera_settings_" << uniqueName << ".xml"; + calibrationFilename << "camera_calibration_" << uniqueName << ".xml"; - return 0; -} + cap->start(); + cap->setResolution(640, 480); -int main(int argc, char *argv[]) -{ - try { - // Output usage message - std::string filename(argv[0]); - filename = filename.substr(filename.find_last_of('\\') + 1); - std::cout << "SampleMultiMarkerBundle" << std::endl; - std::cout << "=======================" << std::endl; - std::cout << std::endl; - std::cout << "Description:" << std::endl; - std::cout << " This is an example of how to use the 'MultiMarkerBundle' class to" << std::endl; - std::cout << " automatically deduce and optimize 'MultiMarker' setups." << std::endl; - std::cout << std::endl; - std::cout << "Usage:" << std::endl; - std::cout << " " << filename << " [device]" << std::endl; - std::cout << std::endl; - std::cout << " device integer selecting device from enumeration list (default 0)" << std::endl; - std::cout << " highgui capture devices are prefered" << std::endl; - std::cout << std::endl; - std::cout << "Keyboard Shortcuts:" << std::endl; - std::cout << " l: load marker configuration from mmarker.txt" << std::endl; - std::cout << " s: save marker configuration to mmarker.txt" << std::endl; - std::cout << " r: reset marker configuration" << std::endl; - std::cout << " p: add measurement" << std::endl; - std::cout << " o: optimize bundle" << std::endl; - std::cout << " q: quit" << std::endl; - std::cout << std::endl; - - // Initialise CvTestbed - CvTestbed::Instance().SetVideoCallback(videocallback); - CvTestbed::Instance().SetKeyCallback(keycallback); - - // Enumerate possible capture plugins - CaptureFactory::CapturePluginVector plugins = CaptureFactory::instance()->enumeratePlugins(); - if (plugins.size() < 1) { - std::cout << "Could not find any capture plugins." << std::endl; - return 0; - } - - // Display capture plugins - std::cout << "Available Plugins: "; - outputEnumeratedPlugins(plugins); - std::cout << std::endl; - - // Enumerate possible capture devices - CaptureFactory::CaptureDeviceVector devices = CaptureFactory::instance()->enumerateDevices(); - if (devices.size() < 1) { - std::cout << "Could not find any capture devices." << std::endl; - return 0; - } - - // Check command line argument for which device to use - int selectedDevice = defaultDevice(devices); - if (argc > 1) { - selectedDevice = atoi(argv[1]); - } - if (selectedDevice >= (int)devices.size()) { - selectedDevice = defaultDevice(devices); - } - - // Display capture devices - std::cout << "Enumerated Capture Devices:" << std::endl; - outputEnumeratedDevices(devices, selectedDevice); - std::cout << std::endl; - - // Create capture object from camera - Capture *cap = CaptureFactory::instance()->createCapture(devices[selectedDevice]); - std::string uniqueName = devices[selectedDevice].uniqueName(); - - // Handle capture lifecycle and start video capture - // Note that loadSettings/saveSettings are not supported by all plugins - if (cap) { - std::stringstream settingsFilename; - settingsFilename << "camera_settings_" << uniqueName << ".xml"; - calibrationFilename << "camera_calibration_" << uniqueName << ".xml"; - - cap->start(); - cap->setResolution(640, 480); - - if (cap->loadSettings(settingsFilename.str())) { - std::cout << "Loading settings: " << settingsFilename.str() << std::endl; - } - - std::stringstream title; - title << "SampleMultiMarkerBundle (" << cap->captureDevice().captureType() << ")"; - - CvTestbed::Instance().StartVideo(cap, title.str().c_str()); - - if (cap->saveSettings(settingsFilename.str())) { - std::cout << "Saving settings: " << settingsFilename.str() << std::endl; - } - - cap->stop(); - delete cap; - } - else if (CvTestbed::Instance().StartVideo(0, argv[0])) { - } - else { - std::cout << "Could not initialize the selected capture backend." << std::endl; - } - - return 0; + if (cap->loadSettings(settingsFilename.str())) + { + std::cout << "Loading settings: " << settingsFilename.str() + << std::endl; + } + + std::stringstream title; + title << "SampleMultiMarkerBundle (" << cap->captureDevice().captureType() + << ")"; + + CvTestbed::Instance().StartVideo(cap, title.str().c_str()); + + if (cap->saveSettings(settingsFilename.str())) + { + std::cout << "Saving settings: " << settingsFilename.str() << std::endl; + } + + cap->stop(); + delete cap; } - catch (const std::exception &e) { - std::cout << "Exception: " << e.what() << endl; + else if (CvTestbed::Instance().StartVideo(0, argv[0])) + { } - catch (...) { - std::cout << "Exception: unknown" << std::endl; + else + { + std::cout << "Could not initialize the selected capture backend." + << std::endl; } + + return 0; + } + catch (const std::exception& e) + { + std::cout << "Exception: " << e.what() << endl; + } + catch (...) + { + std::cout << "Exception: unknown" << std::endl; + } } diff --git a/ar_track_alvar/src/SampleOptimization.cpp b/ar_track_alvar/src/SampleOptimization.cpp index 6e34300..4099784 100644 --- a/ar_track_alvar/src/SampleOptimization.cpp +++ b/ar_track_alvar/src/SampleOptimization.cpp @@ -8,136 +8,163 @@ using namespace std; using namespace alvar; -const int res=640; -const double poly_res=8.0; +const int res = 640; +const double poly_res = 8.0; -double random(int dist_type, double param1, double param2) { - static CvRNG rng=0; - if (rng == 0) rng = cvRNG(time(0)); - double m_data; - CvMat m = cvMat(1, 1, CV_64F, &m_data); - cvRandArr(&rng, &m, dist_type, cvScalar(param1), cvScalar(param2)); - return m_data; +double random(int dist_type, double param1, double param2) +{ + static CvRNG rng = 0; + if (rng == 0) + rng = cvRNG(time(0)); + double m_data; + CvMat m = cvMat(1, 1, CV_64F, &m_data); + cvRandArr(&rng, &m, dist_type, cvScalar(param1), cvScalar(param2)); + return m_data; } -double get_y(double x, double a, double b, double c, double d, double e) { - return (a*x*x*x*x + b*x*x*x + c*x*x + d*x + e); +double get_y(double x, double a, double b, double c, double d, double e) +{ + return (a * x * x * x * x + b * x * x * x + c * x * x + d * x + e); } // Just generate some random data that can be used as sensor input -bool get_measurement(double *x, double *y, double a, double b, double c, double d, double e) { - double xx = random(CV_RAND_UNI, -(poly_res/2), +(poly_res/2)); //(rand()*poly_res/RAND_MAX)-(poly_res/2); - double yy = get_y(xx, a, b, c, d, e); - double ry = random(CV_RAND_NORMAL, 0, poly_res/8); //(rand()*(poly_res/4)/RAND_MAX)-(poly_res/8); - yy += ry; - *x = xx; - *y = yy; - if (*y < -(poly_res/2)) return false; - if (*y >= (poly_res/2)) return false; - return true; +bool get_measurement(double* x, double* y, double a, double b, double c, + double d, double e) +{ + double xx = + random(CV_RAND_UNI, -(poly_res / 2), + +(poly_res / 2)); //(rand()*poly_res/RAND_MAX)-(poly_res/2); + double yy = get_y(xx, a, b, c, d, e); + double ry = + random(CV_RAND_NORMAL, 0, + poly_res / 8); //(rand()*(poly_res/4)/RAND_MAX)-(poly_res/8); + yy += ry; + *x = xx; + *y = yy; + if (*y < -(poly_res / 2)) + return false; + if (*y >= (poly_res / 2)) + return false; + return true; } -void Estimate(CvMat* state, CvMat *projection, void *param) { - double *measx=(double *)param; - int data_degree = state->rows-1; - double a = (data_degree >= 4? cvmGet(state, 4, 0) : 0); - double b = (data_degree >= 3? cvmGet(state, 3, 0) : 0); - double c = (data_degree >= 2? cvmGet(state, 2, 0) : 0); - double d = (data_degree >= 1? cvmGet(state, 1, 0) : 0); - double e = (data_degree >= 0? cvmGet(state, 0, 0) : 0); - for (int i=0; irows; i++) { - cvmSet(projection, i, 0, get_y(measx[i], a, b, c, d, e)); - } +void Estimate(CvMat* state, CvMat* projection, void* param) +{ + double* measx = (double*)param; + int data_degree = state->rows - 1; + double a = (data_degree >= 4 ? cvmGet(state, 4, 0) : 0); + double b = (data_degree >= 3 ? cvmGet(state, 3, 0) : 0); + double c = (data_degree >= 2 ? cvmGet(state, 2, 0) : 0); + double d = (data_degree >= 1 ? cvmGet(state, 1, 0) : 0); + double e = (data_degree >= 0 ? cvmGet(state, 0, 0) : 0); + for (int i = 0; i < projection->rows; i++) + { + cvmSet(projection, i, 0, get_y(measx[i], a, b, c, d, e)); + } } -int main(int argc, char *argv[]) +int main(int argc, char* argv[]) { - try { - // Output usage message - std::string filename(argv[0]); - filename = filename.substr(filename.find_last_of('\\') + 1); - std::cout << "SampleOptimization" << std::endl; - std::cout << "==================" << std::endl; - std::cout << std::endl; - std::cout << "Description:" << std::endl; - std::cout << " This is an example of how to use the 'Optimization' class. Random data" << std::endl; - std::cout << " is generated and approximated using curves of increasing degrees." << std::endl; - std::cout << std::endl; - std::cout << "Usage:" << std::endl; - std::cout << " " << filename << std::endl; - std::cout << std::endl; - std::cout << "Keyboard Shortcuts:" << std::endl; - std::cout << " any key: cycle through datasets" << std::endl; - std::cout << " q: quit" << std::endl; - std::cout << std::endl; + try + { + // Output usage message + std::string filename(argv[0]); + filename = filename.substr(filename.find_last_of('\\') + 1); + std::cout << "SampleOptimization" << std::endl; + std::cout << "==================" << std::endl; + std::cout << std::endl; + std::cout << "Description:" << std::endl; + std::cout << " This is an example of how to use the 'Optimization' class. " + "Random data" + << std::endl; + std::cout << " is generated and approximated using curves of increasing " + "degrees." + << std::endl; + std::cout << std::endl; + std::cout << "Usage:" << std::endl; + std::cout << " " << filename << std::endl; + std::cout << std::endl; + std::cout << "Keyboard Shortcuts:" << std::endl; + std::cout << " any key: cycle through datasets" << std::endl; + std::cout << " q: quit" << std::endl; + std::cout << std::endl; - // Processing loop - IplImage *img = cvCreateImage(cvSize(res,res), IPL_DEPTH_8U, 3); - cvNamedWindow("SampleOptimization"); - for (int data_degree=0; data_degree<5; data_degree++) { - double a = (data_degree >= 4? random(CV_RAND_UNI, -0.5, 0.5) : 0); - double b = (data_degree >= 3? random(CV_RAND_UNI, -0.5, 0.5) : 0); - double c = (data_degree >= 2? random(CV_RAND_UNI, -0.5, 0.5) : 0); - double d = (data_degree >= 1? random(CV_RAND_UNI, -0.5, 0.5) : 0); - double e = (data_degree >= 0? random(CV_RAND_UNI, -0.5, 0.5) : 0); - cvZero(img); - vector measvec; - for (int i=0; i<1000; i++) { - double x, y; - if (get_measurement(&x, &y, a, b, c, d, e)) { - measvec.push_back(cvPoint2D32f(x, y)); - x = (x*res/poly_res)+(res/2); - y = (y*res/poly_res)+(res/2); - cvCircle(img, cvPoint(int(x), int(y)), 1, CV_RGB(0,255,0)); - } - } - cvShowImage("SampleOptimization", img); - cvWaitKey(10); - double measx[1000]; - CvMat *meas = cvCreateMat(measvec.size(), 1, CV_64F); - for (size_t i=0; irows); - opt.Optimize(¶m, meas, 0.1, 100, Estimate, measx); - double a = (degree >= 4? cvmGet(¶m, 4, 0) : 0); - double b = (degree >= 3? cvmGet(¶m, 3, 0) : 0); - double c = (degree >= 2? cvmGet(¶m, 2, 0) : 0); - double d = (degree >= 1? cvmGet(¶m, 1, 0) : 0); - double e = (degree >= 0? cvmGet(¶m, 0, 0) : 0); - const int step=5; - for (int x2=step; x2= 4 ? random(CV_RAND_UNI, -0.5, 0.5) : 0); + double b = (data_degree >= 3 ? random(CV_RAND_UNI, -0.5, 0.5) : 0); + double c = (data_degree >= 2 ? random(CV_RAND_UNI, -0.5, 0.5) : 0); + double d = (data_degree >= 1 ? random(CV_RAND_UNI, -0.5, 0.5) : 0); + double e = (data_degree >= 0 ? random(CV_RAND_UNI, -0.5, 0.5) : 0); + cvZero(img); + vector measvec; + for (int i = 0; i < 1000; i++) + { + double x, y; + if (get_measurement(&x, &y, a, b, c, d, e)) + { + measvec.push_back(cvPoint2D32f(x, y)); + x = (x * res / poly_res) + (res / 2); + y = (y * res / poly_res) + (res / 2); + cvCircle(img, cvPoint(int(x), int(y)), 1, CV_RGB(0, 255, 0)); } - cvReleaseImage(&img); - return 0; - } - catch (const std::exception &e) { - std::cout << "Exception: " << e.what() << endl; - } - catch (...) { - std::cout << "Exception: unknown" << std::endl; + } + cvShowImage("SampleOptimization", img); + cvWaitKey(10); + double measx[1000]; + CvMat* meas = cvCreateMat(measvec.size(), 1, CV_64F); + for (size_t i = 0; i < measvec.size(); i++) + { + measx[i] = measvec[i].x; + cvmSet(meas, i, 0, measvec[i].y); + } + for (int degree = 0; degree < 5; degree++) + { + double param_data[5] = { 0 }; + CvMat param = cvMat(degree + 1, 1, CV_64F, param_data); + Optimization opt(param.rows, meas->rows); + opt.Optimize(¶m, meas, 0.1, 100, Estimate, measx); + double a = (degree >= 4 ? cvmGet(¶m, 4, 0) : 0); + double b = (degree >= 3 ? cvmGet(¶m, 3, 0) : 0); + double c = (degree >= 2 ? cvmGet(¶m, 2, 0) : 0); + double d = (degree >= 1 ? cvmGet(¶m, 1, 0) : 0); + double e = (degree >= 0 ? cvmGet(¶m, 0, 0) : 0); + const int step = 5; + for (int x2 = step; x2 < res; x2 += step) + { + int x1 = x2 - step; + double xx1 = (x1 * poly_res / res) - (poly_res / 2); + double xx2 = (x2 * poly_res / res) - (poly_res / 2); + double yy1 = get_y(xx1, a, b, c, d, e); + double yy2 = get_y(xx2, a, b, c, d, e); + int y1 = int((yy1 * res / poly_res) + (res / 2)); + int y2 = int((yy2 * res / poly_res) + (res / 2)); + cvLine(img, cvPoint(x1, y1), cvPoint(x2, y2), + CV_RGB(degree * 50, 255 - (degree * 50), 255)); + } + cvShowImage("SampleOptimization", img); + cvWaitKey(10); + } + cvReleaseMat(&meas); + cvShowImage("SampleOptimization", img); + int key = cvWaitKey(0); + if (key == 'q') + { + break; + } } + cvReleaseImage(&img); + return 0; + } + catch (const std::exception& e) + { + std::cout << "Exception: " << e.what() << endl; + } + catch (...) + { + std::cout << "Exception: unknown" << std::endl; + } } diff --git a/ar_track_alvar/src/SamplePointcloud.cpp b/ar_track_alvar/src/SamplePointcloud.cpp index b971b66..f16ee6c 100644 --- a/ar_track_alvar/src/SamplePointcloud.cpp +++ b/ar_track_alvar/src/SamplePointcloud.cpp @@ -5,288 +5,351 @@ using namespace alvar; using namespace std; -const double marker_size=1; -bool init=true; -SimpleSfM *sfm; +const double marker_size = 1; +bool init = true; +SimpleSfM* sfm; std::stringstream calibrationFilename; // Own drawable 3D cross on features -struct OwnDrawable : public Drawable { - virtual void Draw() { - const double scale = 0.2; - glPushMatrix(); - glMultMatrixd(gl_mat); - glColor3d(color[0], color[1], color[2]); - glBegin(GL_LINES); - glVertex3f(0.0, 0.0, -scale); - glVertex3f(0.0, 0.0, scale); - glVertex3f(0.0, -scale, 0.0); - glVertex3f(0.0, scale, 0.0); - glVertex3f(-scale, 0.0, 0.0); - glVertex3f(scale, 0.0, 0.0); - glEnd(); - glPopMatrix(); - } +struct OwnDrawable : public Drawable +{ + virtual void Draw() + { + const double scale = 0.2; + glPushMatrix(); + glMultMatrixd(gl_mat); + glColor3d(color[0], color[1], color[2]); + glBegin(GL_LINES); + glVertex3f(0.0, 0.0, -scale); + glVertex3f(0.0, 0.0, scale); + glVertex3f(0.0, -scale, 0.0); + glVertex3f(0.0, scale, 0.0); + glVertex3f(-scale, 0.0, 0.0); + glVertex3f(scale, 0.0, 0.0); + glEnd(); + glPopMatrix(); + } }; Drawable d_marker; OwnDrawable d_points[1000]; int own_drawable_count; -bool reset=false; -void videocallback(IplImage *image) +bool reset = false; +void videocallback(IplImage* image) { - static IplImage *rgb = 0; - static IplImage* bg_image = 0; + static IplImage* rgb = 0; + static IplImage* bg_image = 0; - bool flip_image = (image->origin?true:false); - if (flip_image) { - cvFlip(image); - image->origin = !image->origin; - } + bool flip_image = (image->origin ? true : false); + if (flip_image) + { + cvFlip(image); + image->origin = !image->origin; + } - if (init) { - init = false; - sfm->Clear(); - cout<<"Loading calibration: "<GetCamera()->SetCalib(calibrationFilename.str().c_str(), image->width, image->height)) { - cout<<" [Ok]"<GetCamera()->SetRes(image->width, image->height); - cout<<" [Fail]"<GetCamera()->GetOpenglProjectionMatrix(p,image->width,image->height); - GlutViewer::SetGlProjectionMatrix(p); - d_marker.SetScale(marker_size*2); - rgb = CvTestbed::Instance().CreateImageWithProto("RGB", image, 0, 3); - CvTestbed::Instance().ToggleImageVisible(0, 0); - bg_image = CvTestbed::Instance().CreateImage("BG texture", cvSize(512,512),8, 3); - bg_image->origin = 0; + if (init) + { + init = false; + sfm->Clear(); + cout << "Loading calibration: " << calibrationFilename.str(); + if (sfm->GetCamera()->SetCalib(calibrationFilename.str().c_str(), + image->width, image->height)) + { + cout << " [Ok]" << endl; + } + else + { + sfm->GetCamera()->SetRes(image->width, image->height); + cout << " [Fail]" << endl; + } + double p[16]; + sfm->GetCamera()->GetOpenglProjectionMatrix(p, image->width, image->height); + GlutViewer::SetGlProjectionMatrix(p); + d_marker.SetScale(marker_size * 2); + rgb = CvTestbed::Instance().CreateImageWithProto("RGB", image, 0, 3); + CvTestbed::Instance().ToggleImageVisible(0, 0); + bg_image = + CvTestbed::Instance().CreateImage("BG texture", cvSize(512, 512), 8, 3); + bg_image->origin = 0; - sfm->SetScale(10); - if (sfm->AddMultiMarker("mmarker.xml", FILE_FORMAT_XML)) { - std::cout<<"Using MultiMarker defined in mmarker.xml."<AddMarker(0, marker_size*2, pose); - pose.SetTranslation(-marker_size*2.5, +marker_size*1.5, 0); - sfm->AddMarker(1, marker_size, pose); - pose.SetTranslation(+marker_size*2.5, +marker_size*1.5, 0); - sfm->AddMarker(2, marker_size, pose); - pose.SetTranslation(-marker_size*2.5, -marker_size*1.5, 0); - sfm->AddMarker(3, marker_size, pose); - pose.SetTranslation(+marker_size*2.5, -marker_size*1.5, 0); - sfm->AddMarker(4, marker_size, pose); - } - sfm->SetResetPoint(); + sfm->SetScale(10); + if (sfm->AddMultiMarker("mmarker.xml", FILE_FORMAT_XML)) + { + std::cout << "Using MultiMarker defined in mmarker.xml." << std::endl; } - if (reset) { - sfm->Reset(); - reset = false; + else + { + std::cout << "Couldn't load mmarker.xml. Using default " + "'SampleMultiMarker' setup." + << std::endl; + Pose pose; + pose.Reset(); + sfm->AddMarker(0, marker_size * 2, pose); + pose.SetTranslation(-marker_size * 2.5, +marker_size * 1.5, 0); + sfm->AddMarker(1, marker_size, pose); + pose.SetTranslation(+marker_size * 2.5, +marker_size * 1.5, 0); + sfm->AddMarker(2, marker_size, pose); + pose.SetTranslation(-marker_size * 2.5, -marker_size * 1.5, 0); + sfm->AddMarker(3, marker_size, pose); + pose.SetTranslation(+marker_size * 2.5, -marker_size * 1.5, 0); + sfm->AddMarker(4, marker_size, pose); } + sfm->SetResetPoint(); + } + if (reset) + { + sfm->Reset(); + reset = false; + } - //if (sfm->UpdateRotationsOnly(image)) { - //if (sfm->UpdateTriangulateOnly(image)) { - if (sfm->Update(image, false, true, 7.f, 15.f)) { - // Draw the camera (The GlutViewer has little weirdness here...)q - Pose pose = *(sfm->GetPose()); - double gl[16]; - pose.GetMatrixGL(gl, true); - GlutViewer::SetGlModelviewMatrix(gl); - pose.Invert(); - pose.GetMatrixGL(d_marker.gl_mat, false); - GlutViewer::DrawableClear(); - GlutViewer::DrawableAdd(&d_marker); - own_drawable_count=0; + // if (sfm->UpdateRotationsOnly(image)) { + // if (sfm->UpdateTriangulateOnly(image)) { + if (sfm->Update(image, false, true, 7.f, 15.f)) + { + // Draw the camera (The GlutViewer has little weirdness here...)q + Pose pose = *(sfm->GetPose()); + double gl[16]; + pose.GetMatrixGL(gl, true); + GlutViewer::SetGlModelviewMatrix(gl); + pose.Invert(); + pose.GetMatrixGL(d_marker.gl_mat, false); + GlutViewer::DrawableClear(); + GlutViewer::DrawableAdd(&d_marker); + own_drawable_count = 0; - // Draw features - std::map::iterator iter; - iter = sfm->container.begin(); - for(;iter != sfm->container.end(); iter++) { - if (sfm->container_triangulated.find(iter->first) != sfm->container_triangulated.end()) continue; - if (iter->second.has_p3d) - { - if (own_drawable_count < 1000) { - memset(d_points[own_drawable_count].gl_mat, 0, 16*sizeof(double)); - d_points[own_drawable_count].gl_mat[0] = 1; - d_points[own_drawable_count].gl_mat[5] = 1; - d_points[own_drawable_count].gl_mat[10] = 1; - d_points[own_drawable_count].gl_mat[15] = 1; - d_points[own_drawable_count].gl_mat[12] = iter->second.p3d.x; - d_points[own_drawable_count].gl_mat[13] = iter->second.p3d.y; - d_points[own_drawable_count].gl_mat[14] = iter->second.p3d.z; - if (iter->second.type_id == 0) d_points[own_drawable_count].SetColor(1,0,0); - else d_points[own_drawable_count].SetColor(0,1,0); - GlutViewer::DrawableAdd(&(d_points[own_drawable_count])); - own_drawable_count++; - } - } + // Draw features + std::map::iterator iter; + iter = sfm->container.begin(); + for (; iter != sfm->container.end(); iter++) + { + if (sfm->container_triangulated.find(iter->first) != + sfm->container_triangulated.end()) + continue; + if (iter->second.has_p3d) + { + if (own_drawable_count < 1000) + { + memset(d_points[own_drawable_count].gl_mat, 0, 16 * sizeof(double)); + d_points[own_drawable_count].gl_mat[0] = 1; + d_points[own_drawable_count].gl_mat[5] = 1; + d_points[own_drawable_count].gl_mat[10] = 1; + d_points[own_drawable_count].gl_mat[15] = 1; + d_points[own_drawable_count].gl_mat[12] = iter->second.p3d.x; + d_points[own_drawable_count].gl_mat[13] = iter->second.p3d.y; + d_points[own_drawable_count].gl_mat[14] = iter->second.p3d.z; + if (iter->second.type_id == 0) + d_points[own_drawable_count].SetColor(1, 0, 0); + else + d_points[own_drawable_count].SetColor(0, 1, 0); + GlutViewer::DrawableAdd(&(d_points[own_drawable_count])); + own_drawable_count++; } + } + } - // Draw triangulated features - iter = sfm->container_triangulated.begin(); - for(;iter != sfm->container_triangulated.end(); iter++) { - if (iter->second.has_p3d) - { - if (own_drawable_count < 1000) { - memset(d_points[own_drawable_count].gl_mat, 0, 16*sizeof(double)); - d_points[own_drawable_count].gl_mat[0] = 1; - d_points[own_drawable_count].gl_mat[5] = 1; - d_points[own_drawable_count].gl_mat[10] = 1; - d_points[own_drawable_count].gl_mat[15] = 1; - d_points[own_drawable_count].gl_mat[12] = iter->second.p3d.x; - d_points[own_drawable_count].gl_mat[13] = iter->second.p3d.y; - d_points[own_drawable_count].gl_mat[14] = iter->second.p3d.z; - /*if (iter->second.type_id == 0) d_points[own_drawable_count].SetColor(1,0,1); - else*/ d_points[own_drawable_count].SetColor(0,0,1); - GlutViewer::DrawableAdd(&(d_points[own_drawable_count])); - own_drawable_count++; - } - } + // Draw triangulated features + iter = sfm->container_triangulated.begin(); + for (; iter != sfm->container_triangulated.end(); iter++) + { + if (iter->second.has_p3d) + { + if (own_drawable_count < 1000) + { + memset(d_points[own_drawable_count].gl_mat, 0, 16 * sizeof(double)); + d_points[own_drawable_count].gl_mat[0] = 1; + d_points[own_drawable_count].gl_mat[5] = 1; + d_points[own_drawable_count].gl_mat[10] = 1; + d_points[own_drawable_count].gl_mat[15] = 1; + d_points[own_drawable_count].gl_mat[12] = iter->second.p3d.x; + d_points[own_drawable_count].gl_mat[13] = iter->second.p3d.y; + d_points[own_drawable_count].gl_mat[14] = iter->second.p3d.z; + /*if (iter->second.type_id == 0) + d_points[own_drawable_count].SetColor(1,0,1); else*/ + d_points[own_drawable_count].SetColor(0, 0, 1); + GlutViewer::DrawableAdd(&(d_points[own_drawable_count])); + own_drawable_count++; } + } } - if (image->nChannels == 1) cvCvtColor(image, rgb, CV_GRAY2RGB); - else if (image->nChannels == 3) cvCopy(image, rgb); + } + if (image->nChannels == 1) + cvCvtColor(image, rgb, CV_GRAY2RGB); + else if (image->nChannels == 3) + cvCopy(image, rgb); - // Draw video on GlutViewer background - cvResize(rgb, bg_image); - GlutViewer::SetVideo(bg_image); + // Draw video on GlutViewer background + cvResize(rgb, bg_image); + GlutViewer::SetVideo(bg_image); - // Draw debug info to the rgb - sfm->Draw(rgb); + // Draw debug info to the rgb + sfm->Draw(rgb); - if (flip_image) { - cvFlip(image); - image->origin = !image->origin; - } + if (flip_image) + { + cvFlip(image); + image->origin = !image->origin; + } } - int keycallback(int key) { - if(key == 'r') - { - reset = true; - } - else return key; + if (key == 'r') + { + reset = true; + } + else + return key; - return 0; + return 0; } -int main(int argc, char *argv[]) +int main(int argc, char* argv[]) { - try { - // Output usage message - std::string filename(argv[0]); - filename = filename.substr(filename.find_last_of('\\') + 1); - std::cout << "SamplePointcloud" << std::endl; - std::cout << "================" << std::endl; - std::cout << std::endl; - std::cout << "Description:" << std::endl; - std::cout << " This example shows simple structure from motion approach that can be "<< std::endl; - std::cout << " used to track environment beyond an multimarker setup. To get this "<< std::endl; - std::cout << " example work properly be sure to calibrate your camera and tune it "<< std::endl; - std::cout << " to have fast framerate without motion blur. "<< std::endl; - std::cout << std::endl; - std::cout << " There are two possible approaches Update() and UpdateRotationsOnly()."<< std::endl; - std::cout << " By default the Update() is used but you can easily uncomment the "<< std::endl; - std::cout << " other one if needed."<< std::endl; - std::cout << std::endl; - std::cout << "Usage:" << std::endl; - std::cout << " " << filename << " [device]" << std::endl; - std::cout << std::endl; - std::cout << " device integer selecting device from enumeration list (default 0)" << std::endl; - std::cout << " highgui capture devices are prefered" << std::endl; - std::cout << std::endl; - std::cout << "Keyboard Shortcuts:" << std::endl; - std::cout << " r: reset" << std::endl; - std::cout << " q: quit" << std::endl; - std::cout << std::endl; + try + { + // Output usage message + std::string filename(argv[0]); + filename = filename.substr(filename.find_last_of('\\') + 1); + std::cout << "SamplePointcloud" << std::endl; + std::cout << "================" << std::endl; + std::cout << std::endl; + std::cout << "Description:" << std::endl; + std::cout << " This example shows simple structure from motion approach " + "that can be " + << std::endl; + std::cout << " used to track environment beyond an multimarker setup. To " + "get this " + << std::endl; + std::cout << " example work properly be sure to calibrate your camera and " + "tune it " + << std::endl; + std::cout << " to have fast framerate without motion blur. " + " " + << std::endl; + std::cout << std::endl; + std::cout << " There are two possible approaches Update() and " + "UpdateRotationsOnly()." + << std::endl; + std::cout << " By default the Update() is used but you can easily " + "uncomment the " + << std::endl; + std::cout << " other one if needed." << std::endl; + std::cout << std::endl; + std::cout << "Usage:" << std::endl; + std::cout << " " << filename << " [device]" << std::endl; + std::cout << std::endl; + std::cout << " device integer selecting device from enumeration list " + "(default 0)" + << std::endl; + std::cout << " highgui capture devices are prefered" + << std::endl; + std::cout << std::endl; + std::cout << "Keyboard Shortcuts:" << std::endl; + std::cout << " r: reset" << std::endl; + std::cout << " q: quit" << std::endl; + std::cout << std::endl; - // Initialise GlutViewer and CvTestbed - GlutViewer::Start(argc, argv, 640, 480, 100); - CvTestbed::Instance().SetKeyCallback(keycallback); - CvTestbed::Instance().SetVideoCallback(videocallback); - sfm = new SimpleSfM(); + // Initialise GlutViewer and CvTestbed + GlutViewer::Start(argc, argv, 640, 480, 100); + CvTestbed::Instance().SetKeyCallback(keycallback); + CvTestbed::Instance().SetVideoCallback(videocallback); + sfm = new SimpleSfM(); - // Enumerate possible capture plugins - CaptureFactory::CapturePluginVector plugins = CaptureFactory::instance()->enumeratePlugins(); - if (plugins.size() < 1) { - std::cout << "Could not find any capture plugins." << std::endl; - return 0; - } + // Enumerate possible capture plugins + CaptureFactory::CapturePluginVector plugins = + CaptureFactory::instance()->enumeratePlugins(); + if (plugins.size() < 1) + { + std::cout << "Could not find any capture plugins." << std::endl; + return 0; + } - // Display capture plugins - std::cout << "Available Plugins: "; - outputEnumeratedPlugins(plugins); - std::cout << std::endl; + // Display capture plugins + std::cout << "Available Plugins: "; + outputEnumeratedPlugins(plugins); + std::cout << std::endl; - // Enumerate possible capture devices - CaptureFactory::CaptureDeviceVector devices = CaptureFactory::instance()->enumerateDevices(); - if (devices.size() < 1) { - std::cout << "Could not find any capture devices." << std::endl; - return 0; - } + // Enumerate possible capture devices + CaptureFactory::CaptureDeviceVector devices = + CaptureFactory::instance()->enumerateDevices(); + if (devices.size() < 1) + { + std::cout << "Could not find any capture devices." << std::endl; + return 0; + } - // Check command line argument for which device to use - int selectedDevice = defaultDevice(devices); - if (argc > 1) { - selectedDevice = atoi(argv[1]); - } - if (selectedDevice >= (int)devices.size()) { - selectedDevice = defaultDevice(devices); - } - - // Display capture devices - std::cout << "Enumerated Capture Devices:" << std::endl; - outputEnumeratedDevices(devices, selectedDevice); - std::cout << std::endl; - - // Create capture object from camera - Capture *cap = CaptureFactory::instance()->createCapture(devices[selectedDevice]); - std::string uniqueName = devices[selectedDevice].uniqueName(); + // Check command line argument for which device to use + int selectedDevice = defaultDevice(devices); + if (argc > 1) + { + selectedDevice = atoi(argv[1]); + } + if (selectedDevice >= (int)devices.size()) + { + selectedDevice = defaultDevice(devices); + } - // Handle capture lifecycle and start video capture - // Note that loadSettings/saveSettings are not supported by all plugins - if (cap) { - std::stringstream settingsFilename; - settingsFilename << "camera_settings_" << uniqueName << ".xml"; - calibrationFilename << "camera_calibration_" << uniqueName << ".xml"; - - cap->start(); - cap->setResolution(640, 480); - - if (cap->loadSettings(settingsFilename.str())) { - std::cout << "Loading settings: " << settingsFilename.str() << std::endl; - } + // Display capture devices + std::cout << "Enumerated Capture Devices:" << std::endl; + outputEnumeratedDevices(devices, selectedDevice); + std::cout << std::endl; - std::stringstream title; - title << "SamplePointcloud (" << cap->captureDevice().captureType() << ")"; + // Create capture object from camera + Capture* cap = + CaptureFactory::instance()->createCapture(devices[selectedDevice]); + std::string uniqueName = devices[selectedDevice].uniqueName(); - CvTestbed::Instance().StartVideo(cap, title.str().c_str()); + // Handle capture lifecycle and start video capture + // Note that loadSettings/saveSettings are not supported by all plugins + if (cap) + { + std::stringstream settingsFilename; + settingsFilename << "camera_settings_" << uniqueName << ".xml"; + calibrationFilename << "camera_calibration_" << uniqueName << ".xml"; - if (cap->saveSettings(settingsFilename.str())) { - std::cout << "Saving settings: " << settingsFilename.str() << std::endl; - } + cap->start(); + cap->setResolution(640, 480); - cap->stop(); - delete cap; cap = NULL; - } - else if (CvTestbed::Instance().StartVideo(0, argv[0])) { - } - else { - std::cout << "Could not initialize the selected capture backend." << std::endl; - } - delete sfm; sfm = NULL; + if (cap->loadSettings(settingsFilename.str())) + { + std::cout << "Loading settings: " << settingsFilename.str() + << std::endl; + } + + std::stringstream title; + title << "SamplePointcloud (" << cap->captureDevice().captureType() + << ")"; + + CvTestbed::Instance().StartVideo(cap, title.str().c_str()); - return 0; + if (cap->saveSettings(settingsFilename.str())) + { + std::cout << "Saving settings: " << settingsFilename.str() << std::endl; + } + + cap->stop(); + delete cap; + cap = NULL; } - catch (const std::exception &e) { - std::cout << "Exception: " << e.what() << endl; + else if (CvTestbed::Instance().StartVideo(0, argv[0])) + { } - catch (...) { - std::cout << "Exception: unknown" << std::endl; + else + { + std::cout << "Could not initialize the selected capture backend." + << std::endl; } + delete sfm; + sfm = NULL; + + return 0; + } + catch (const std::exception& e) + { + std::cout << "Exception: " << e.what() << endl; + } + catch (...) + { + std::cout << "Exception: unknown" << std::endl; + } } diff --git a/ar_track_alvar/src/SampleTrack.cpp b/ar_track_alvar/src/SampleTrack.cpp index e85f03b..f5b1db8 100644 --- a/ar_track_alvar/src/SampleTrack.cpp +++ b/ar_track_alvar/src/SampleTrack.cpp @@ -12,253 +12,304 @@ bool reset = true; CvFont font; std::stringstream calibrationFilename; -void track_none(IplImage *image, IplImage *img_gray) { +void track_none(IplImage* image, IplImage* img_gray) +{ } -void track_psa(IplImage *image, IplImage *img_gray) { - static TrackerPsa tracker_psa; - static double x, y; - if (reset) { - reset = false; - x = img_gray->width / 2; - y = img_gray->height / 2; - tracker_psa.Track(img_gray); // To reset tracker call it twice - } - tracker_psa.Track(img_gray); - tracker_psa.Compensate(&x, &y); - cvCircle(image, cvPoint(int(x), int(y)), 10, CV_RGB(255,0,0)); +void track_psa(IplImage* image, IplImage* img_gray) +{ + static TrackerPsa tracker_psa; + static double x, y; + if (reset) + { + reset = false; + x = img_gray->width / 2; + y = img_gray->height / 2; + tracker_psa.Track(img_gray); // To reset tracker call it twice + } + tracker_psa.Track(img_gray); + tracker_psa.Compensate(&x, &y); + cvCircle(image, cvPoint(int(x), int(y)), 10, CV_RGB(255, 0, 0)); } -void track_psa_rot(IplImage *image, IplImage *img_gray) { - static TrackerPsaRot tracker_psa_rot; - static double x, y, r; - if (reset) { - reset = false; - x = img_gray->width / 2; - y = img_gray->height / 2; - r = 0; - tracker_psa_rot.Track(img_gray); // To reset tracker call it twice - } - tracker_psa_rot.Track(img_gray); - tracker_psa_rot.Compensate(&x, &y); - r += tracker_psa_rot.rotd; - cvCircle(image, cvPoint(int(x), int(y)), 15, CV_RGB(255,0,0)); - double r_rad = r*3.1415926535/180; - cvLine(image, cvPoint(int(x), int(y)), cvPoint(int(x-sin(r_rad)*15), int(y+cos(r_rad)*15)), CV_RGB(255,0,0)); +void track_psa_rot(IplImage* image, IplImage* img_gray) +{ + static TrackerPsaRot tracker_psa_rot; + static double x, y, r; + if (reset) + { + reset = false; + x = img_gray->width / 2; + y = img_gray->height / 2; + r = 0; + tracker_psa_rot.Track(img_gray); // To reset tracker call it twice + } + tracker_psa_rot.Track(img_gray); + tracker_psa_rot.Compensate(&x, &y); + r += tracker_psa_rot.rotd; + cvCircle(image, cvPoint(int(x), int(y)), 15, CV_RGB(255, 0, 0)); + double r_rad = r * 3.1415926535 / 180; + cvLine(image, cvPoint(int(x), int(y)), + cvPoint(int(x - sin(r_rad) * 15), int(y + cos(r_rad) * 15)), + CV_RGB(255, 0, 0)); } -void track_stat(IplImage *image, IplImage *img_gray) { - static TrackerStat tracker_stat; - static double x, y; - if (reset) { - reset = false; - x = img_gray->width / 2; - y = img_gray->height / 2; - tracker_stat.Track(img_gray); // To reset tracker call it twice - } - tracker_stat.Track(img_gray); - tracker_stat.Compensate(&x, &y); - cvCircle(image, cvPoint(int(x), int(y)), 10, CV_RGB(0,255,0)); +void track_stat(IplImage* image, IplImage* img_gray) +{ + static TrackerStat tracker_stat; + static double x, y; + if (reset) + { + reset = false; + x = img_gray->width / 2; + y = img_gray->height / 2; + tracker_stat.Track(img_gray); // To reset tracker call it twice + } + tracker_stat.Track(img_gray); + tracker_stat.Compensate(&x, &y); + cvCircle(image, cvPoint(int(x), int(y)), 10, CV_RGB(0, 255, 0)); } -void track_stat_rot(IplImage *image, IplImage *img_gray) { - static TrackerStatRot tracker_stat_rot; - static double x, y, r; - if (reset) { - reset = false; - x = img_gray->width / 2; - y = img_gray->height / 2; - r = 0; - tracker_stat_rot.Track(img_gray); // To reset tracker call it twice - } - tracker_stat_rot.Track(img_gray); - tracker_stat_rot.Compensate(&x, &y); - r += tracker_stat_rot.rotd; - cvCircle(image, cvPoint(int(x), int(y)), 15, CV_RGB(0,255,0)); - double r_rad = r*3.1415926535/180; - cvLine(image, cvPoint(int(x), int(y)), cvPoint(int(x-sin(r_rad)*15), int(y+cos(r_rad)*15)), CV_RGB(0,255,0)); +void track_stat_rot(IplImage* image, IplImage* img_gray) +{ + static TrackerStatRot tracker_stat_rot; + static double x, y, r; + if (reset) + { + reset = false; + x = img_gray->width / 2; + y = img_gray->height / 2; + r = 0; + tracker_stat_rot.Track(img_gray); // To reset tracker call it twice + } + tracker_stat_rot.Track(img_gray); + tracker_stat_rot.Compensate(&x, &y); + r += tracker_stat_rot.rotd; + cvCircle(image, cvPoint(int(x), int(y)), 15, CV_RGB(0, 255, 0)); + double r_rad = r * 3.1415926535 / 180; + cvLine(image, cvPoint(int(x), int(y)), + cvPoint(int(x - sin(r_rad) * 15), int(y + cos(r_rad) * 15)), + CV_RGB(0, 255, 0)); } -void track_features(IplImage *image, IplImage *img_gray) { - static TrackerFeatures tracker_features(200, 190, 0.01, 0, 4, 6); - if (reset) { - reset = false; - tracker_features.Reset(); - } - tracker_features.Purge(); - tracker_features.Track(img_gray); - for (int i=0; iwidth, image->height)) { - cout<<" [Ok]"<width, image->height); - cout<<" [Fail]"<width, + image->height)) + { + cout << " [Ok]" << endl; } - if (image->nChannels == 1) cvCopy(image, img_gray); - else cvCvtColor(image, img_gray, CV_RGB2GRAY); + else + { + cam.SetRes(image->width, image->height); + cout << " [Fail]" << endl; + } + } + if (image->nChannels == 1) + cvCopy(image, img_gray); + else + cvCvtColor(image, img_gray, CV_RGB2GRAY); - trackers[tracker](image, img_gray); - cvPutText(image, tracker_names[tracker], cvPoint(3, image->height - 20), &font, CV_RGB(255, 255, 255)); + trackers[tracker](image, img_gray); + cvPutText(image, tracker_names[tracker], cvPoint(3, image->height - 20), + &font, CV_RGB(255, 255, 255)); } -int keycallback(int key) { - if ((key == 'r') || (key == 't')) { - reset = true; - return 0; - } - else if ((key == 'n') || (key == ' ')){ - tracker = ((tracker+1)%nof_trackers); - reset = true; - return 0; - } - return key; +int keycallback(int key) +{ + if ((key == 'r') || (key == 't')) + { + reset = true; + return 0; + } + else if ((key == 'n') || (key == ' ')) + { + tracker = ((tracker + 1) % nof_trackers); + reset = true; + return 0; + } + return key; } -int main(int argc, char *argv[]) +int main(int argc, char* argv[]) { - try { - // Output usage message - std::string filename(argv[0]); - filename = filename.substr(filename.find_last_of('\\') + 1); - std::cout << "SampleTrack" << std::endl; - std::cout << "===========" << std::endl; - std::cout << std::endl; - std::cout << "Description:" << std::endl; - std::cout << " This is an example of how to use the 'TrackerPsa', 'TrackerPsaRot'," << std::endl; - std::cout << " 'TrackerFeatures', 'TrackerStat' and 'TrackerStatRot' classes to" << std::endl; - std::cout << " track the optical flow of the video." << std::endl; - std::cout << std::endl; - std::cout << "Usage:" << std::endl; - std::cout << " " << filename << " [device]" << std::endl; - std::cout << std::endl; - std::cout << " device integer selecting device from enumeration list (default 0)" << std::endl; - std::cout << " highgui capture devices are prefered" << std::endl; - std::cout << std::endl; - std::cout << "Keyboard Shortcuts:" << std::endl; - std::cout << " r,t: reset tracker" << std::endl; - std::cout << " n,space: cycle through tracking algorithms" << std::endl; - std::cout << " q: quit" << std::endl; - std::cout << std::endl; + try + { + // Output usage message + std::string filename(argv[0]); + filename = filename.substr(filename.find_last_of('\\') + 1); + std::cout << "SampleTrack" << std::endl; + std::cout << "===========" << std::endl; + std::cout << std::endl; + std::cout << "Description:" << std::endl; + std::cout << " This is an example of how to use the 'TrackerPsa', " + "'TrackerPsaRot'," + << std::endl; + std::cout << " 'TrackerFeatures', 'TrackerStat' and 'TrackerStatRot' " + "classes to" + << std::endl; + std::cout << " track the optical flow of the video." << std::endl; + std::cout << std::endl; + std::cout << "Usage:" << std::endl; + std::cout << " " << filename << " [device]" << std::endl; + std::cout << std::endl; + std::cout << " device integer selecting device from enumeration list " + "(default 0)" + << std::endl; + std::cout << " highgui capture devices are prefered" + << std::endl; + std::cout << std::endl; + std::cout << "Keyboard Shortcuts:" << std::endl; + std::cout << " r,t: reset tracker" << std::endl; + std::cout << " n,space: cycle through tracking algorithms" << std::endl; + std::cout << " q: quit" << std::endl; + std::cout << std::endl; + + // Initialize font + cvInitFont(&font, CV_FONT_HERSHEY_PLAIN, 1.0, 1.0); + + // Initialise CvTestbed + CvTestbed::Instance().SetVideoCallback(videocallback); + CvTestbed::Instance().SetKeyCallback(keycallback); - // Initialize font - cvInitFont(&font, CV_FONT_HERSHEY_PLAIN, 1.0, 1.0); + // Enumerate possible capture plugins + CaptureFactory::CapturePluginVector plugins = + CaptureFactory::instance()->enumeratePlugins(); + if (plugins.size() < 1) + { + std::cout << "Could not find any capture plugins." << std::endl; + return 0; + } + + // Display capture plugins + std::cout << "Available Plugins: "; + outputEnumeratedPlugins(plugins); + std::cout << std::endl; - // Initialise CvTestbed - CvTestbed::Instance().SetVideoCallback(videocallback); - CvTestbed::Instance().SetKeyCallback(keycallback); + // Enumerate possible capture devices + CaptureFactory::CaptureDeviceVector devices = + CaptureFactory::instance()->enumerateDevices(); + if (devices.size() < 1) + { + std::cout << "Could not find any capture devices." << std::endl; + return 0; + } - // Enumerate possible capture plugins - CaptureFactory::CapturePluginVector plugins = CaptureFactory::instance()->enumeratePlugins(); - if (plugins.size() < 1) { - std::cout << "Could not find any capture plugins." << std::endl; - return 0; - } + // Check command line argument for which device to use + int selectedDevice = defaultDevice(devices); + if (argc > 1) + { + selectedDevice = atoi(argv[1]); + } + if (selectedDevice >= (int)devices.size()) + { + selectedDevice = defaultDevice(devices); + } - // Display capture plugins - std::cout << "Available Plugins: "; - outputEnumeratedPlugins(plugins); - std::cout << std::endl; + // Display capture devices + std::cout << "Enumerated Capture Devices:" << std::endl; + outputEnumeratedDevices(devices, selectedDevice); + std::cout << std::endl; - // Enumerate possible capture devices - CaptureFactory::CaptureDeviceVector devices = CaptureFactory::instance()->enumerateDevices(); - if (devices.size() < 1) { - std::cout << "Could not find any capture devices." << std::endl; - return 0; - } + // Create capture object from camera + Capture* cap = + CaptureFactory::instance()->createCapture(devices[selectedDevice]); + std::string uniqueName = devices[selectedDevice].uniqueName(); - // Check command line argument for which device to use - int selectedDevice = defaultDevice(devices); - if (argc > 1) { - selectedDevice = atoi(argv[1]); - } - if (selectedDevice >= (int)devices.size()) { - selectedDevice = defaultDevice(devices); - } - - // Display capture devices - std::cout << "Enumerated Capture Devices:" << std::endl; - outputEnumeratedDevices(devices, selectedDevice); - std::cout << std::endl; - - // Create capture object from camera - Capture *cap = CaptureFactory::instance()->createCapture(devices[selectedDevice]); - std::string uniqueName = devices[selectedDevice].uniqueName(); + // Handle capture lifecycle and start video capture + // Note that loadSettings/saveSettings are not supported by all plugins + if (cap) + { + std::stringstream settingsFilename; + settingsFilename << "camera_settings_" << uniqueName << ".xml"; + calibrationFilename << "camera_calibration_" << uniqueName << ".xml"; - // Handle capture lifecycle and start video capture - // Note that loadSettings/saveSettings are not supported by all plugins - if (cap) { - std::stringstream settingsFilename; - settingsFilename << "camera_settings_" << uniqueName << ".xml"; - calibrationFilename << "camera_calibration_" << uniqueName << ".xml"; - - cap->start(); - cap->setResolution(640, 480); - - if (cap->loadSettings(settingsFilename.str())) { - std::cout << "Loading settings: " << settingsFilename.str() << std::endl; - } + cap->start(); + cap->setResolution(640, 480); - std::stringstream title; - title << "SampleTrack (" << cap->captureDevice().captureType() << ")"; + if (cap->loadSettings(settingsFilename.str())) + { + std::cout << "Loading settings: " << settingsFilename.str() + << std::endl; + } - CvTestbed::Instance().StartVideo(cap, title.str().c_str()); + std::stringstream title; + title << "SampleTrack (" << cap->captureDevice().captureType() << ")"; - if (cap->saveSettings(settingsFilename.str())) { - std::cout << "Saving settings: " << settingsFilename.str() << std::endl; - } + CvTestbed::Instance().StartVideo(cap, title.str().c_str()); - cap->stop(); - delete cap; - } - else if (CvTestbed::Instance().StartVideo(0, argv[0])) { - } - else { - std::cout << "Could not initialize the selected capture backend." << std::endl; - } + if (cap->saveSettings(settingsFilename.str())) + { + std::cout << "Saving settings: " << settingsFilename.str() << std::endl; + } - return 0; + cap->stop(); + delete cap; } - catch (const std::exception &e) { - std::cout << "Exception: " << e.what() << endl; + else if (CvTestbed::Instance().StartVideo(0, argv[0])) + { } - catch (...) { - std::cout << "Exception: unknown" << std::endl; + else + { + std::cout << "Could not initialize the selected capture backend." + << std::endl; } + + return 0; + } + catch (const std::exception& e) + { + std::cout << "Exception: " << e.what() << endl; + } + catch (...) + { + std::cout << "Exception: unknown" << std::endl; + } } diff --git a/ar_track_alvar/src/SfM.cpp b/ar_track_alvar/src/SfM.cpp index efd36e3..728304c 100644 --- a/ar_track_alvar/src/SfM.cpp +++ b/ar_track_alvar/src/SfM.cpp @@ -23,485 +23,651 @@ #include "SfM.h" -namespace alvar { - +namespace alvar +{ class CameraMoves { - bool is_initialized; - double prev[3]; + bool is_initialized; + double prev[3]; public: - - CameraMoves() - { - is_initialized = false; - memset(prev, 0, 3*sizeof(double)); - } - - bool UpdateDistance(Pose* pose, double limit=10) - { - double trad[3]; CvMat tra = cvMat(3, 1, CV_64F, trad); - Pose p = *pose; - p.Invert(); - p.GetTranslation(&tra); - if(is_initialized == false) - { - memcpy(prev, trad, 3*sizeof(double)); - is_initialized = true; - return false; - } - double dist = (prev[0]-trad[0])*(prev[0]-trad[0]) + - (prev[1]-trad[1])*(prev[1]-trad[1]) + - (prev[2]-trad[2])*(prev[2]-trad[2]); - if(dist > limit) - { - memcpy(prev, trad, 3*sizeof(double)); - return true; - } - return false; - } + CameraMoves() + { + is_initialized = false; + memset(prev, 0, 3 * sizeof(double)); + } + + bool UpdateDistance(Pose* pose, double limit = 10) + { + double trad[3]; + CvMat tra = cvMat(3, 1, CV_64F, trad); + Pose p = *pose; + p.Invert(); + p.GetTranslation(&tra); + if (is_initialized == false) + { + memcpy(prev, trad, 3 * sizeof(double)); + is_initialized = true; + return false; + } + double dist = (prev[0] - trad[0]) * (prev[0] - trad[0]) + + (prev[1] - trad[1]) * (prev[1] - trad[1]) + + (prev[2] - trad[2]) * (prev[2] - trad[2]); + if (dist > limit) + { + memcpy(prev, trad, 3 * sizeof(double)); + return true; + } + return false; + } }; CameraMoves moving; -void SimpleSfM::Reset(bool reset_also_triangulated) { - pose_ok = false; - container = container_reset_point; - if (reset_also_triangulated) { - container_triangulated = container_triangulated_reset_point; - } - pose_difference.Reset(); - pose_difference.Mirror(false, true, true); +void SimpleSfM::Reset(bool reset_also_triangulated) +{ + pose_ok = false; + container = container_reset_point; + if (reset_also_triangulated) + { + container_triangulated = container_triangulated_reset_point; + } + pose_difference.Reset(); + pose_difference.Mirror(false, true, true); } -void SimpleSfM::SetResetPoint() { - container_reset_point = container; - container_triangulated_reset_point = container_triangulated; +void SimpleSfM::SetResetPoint() +{ + container_reset_point = container; + container_triangulated_reset_point = container_triangulated; } -void SimpleSfM::Clear() { - container.clear(); +void SimpleSfM::Clear() +{ + container.clear(); } -CameraEC *SimpleSfM::GetCamera() { return &cam; } - -Pose *SimpleSfM::GetPose() { return &pose; } +CameraEC* SimpleSfM::GetCamera() +{ + return &cam; +} -bool SimpleSfM::AddMultiMarker(const char *fname, FILE_FORMAT format/* = FILE_FORMAT_XML*/) { - MultiMarkerEC mm; - return mm.Load(container, fname, format, 0, 0,1023); +Pose* SimpleSfM::GetPose() +{ + return &pose; } -bool SimpleSfM::AddMultiMarker(MultiMarkerEC *mm) { - return mm->MarkersToEC(container, 0, 0, 1023); +bool SimpleSfM::AddMultiMarker(const char* fname, + FILE_FORMAT format /* = FILE_FORMAT_XML*/) +{ + MultiMarkerEC mm; + return mm.Load(container, fname, format, 0, 0, 1023); } -void SimpleSfM::AddMarker(int marker_id, double edge_length, Pose &pose) { - marker_detector.MarkerToEC(container, marker_id, edge_length, pose, 0, 0, 1023); +bool SimpleSfM::AddMultiMarker(MultiMarkerEC* mm) +{ + return mm->MarkersToEC(container, 0, 0, 1023); } -float PointVectorFromCamera(CvPoint3D32f p3d, CvPoint3D32f &p3d_vec, Pose *camera_pose) { - double pd[16], v[4] = {0,0,0,1}; - CvMat Pi = cvMat(4, 4, CV_64F, pd); - CvMat V = cvMat(4, 1, CV_64F, v); - - // Camera location in marker coordinates - camera_pose->GetMatrix(&Pi); - cvInv(&Pi, &Pi); - cvMatMul(&Pi, &V, &V); - v[0] /= v[3]; - v[1] /= v[3]; - v[2] /= v[3]; - - // Vector from camera-point to the 3D-point in marker coordinates - p3d_vec.x = float(p3d.x - v[0]); - p3d_vec.y = float(p3d.y - v[1]); - p3d_vec.z = float(p3d.z - v[2]); - - return std::sqrt(p3d_vec.x*p3d_vec.x + p3d_vec.y*p3d_vec.y + p3d_vec.z*p3d_vec.z); +void SimpleSfM::AddMarker(int marker_id, double edge_length, Pose& pose) +{ + marker_detector.MarkerToEC(container, marker_id, edge_length, pose, 0, 0, + 1023); } -void CreateShadowPoint(CvPoint3D32f &p3d_sh, CvPoint3D32f p3d, CameraEC *cam, Pose *camera_pose, float parallax_length, float triangulate_angle) { - // Vector from camera-point to the 3D-point in marker coordinates - float l = PointVectorFromCamera(p3d, p3d_sh, camera_pose); - - // Figure out a suitable matching shadow point distance - CvPoint2D32f p2d; - CvPoint3D32f p3d2; - cam->ProjectPoint(p3d, camera_pose, p2d); - p2d.x += parallax_length; - cam->Get3dOnDepth(camera_pose, p2d, l, p3d2); - p3d2.x -= p3d.x; - p3d2.y -= p3d.y; - p3d2.z -= p3d.z; - float pl = std::sqrt(p3d2.x*p3d2.x + p3d2.y*p3d2.y + p3d2.z*p3d2.z); - float shadow_point_dist = float(pl / std::abs(tan(triangulate_angle*3.1415926535/180.))); // How far we need the shadow point to have the parallax limit match - - // Create the shadow point - p3d_sh.x /= l; - p3d_sh.y /= l; - p3d_sh.z /= l; - p3d_sh.x *= shadow_point_dist; - p3d_sh.y *= shadow_point_dist; - p3d_sh.z *= shadow_point_dist; - p3d_sh.x += p3d.x; - p3d_sh.y += p3d.y; - p3d_sh.z += p3d.z; - - //std::cout<<"p : "<GetMatrix(&Pi); + cvInv(&Pi, &Pi); + cvMatMul(&Pi, &V, &V); + v[0] /= v[3]; + v[1] /= v[3]; + v[2] /= v[3]; + + // Vector from camera-point to the 3D-point in marker coordinates + p3d_vec.x = float(p3d.x - v[0]); + p3d_vec.y = float(p3d.y - v[1]); + p3d_vec.z = float(p3d.z - v[2]); + + return std::sqrt(p3d_vec.x * p3d_vec.x + p3d_vec.y * p3d_vec.y + + p3d_vec.z * p3d_vec.z); } -bool SimpleSfM::Update(IplImage *image, bool assume_plane, bool triangulate, float reproj_err_limit, float triangulate_angle) { - const int min_triangulate=50, max_triangulate=150; - const float plane_dist_limit = 100.f; - const bool remember_3d_points = true; - const float parallax_length = 10.f; - const float parallax_length_sq = parallax_length*parallax_length; - float reproj_err_limit_sq = reproj_err_limit*reproj_err_limit; - std::map::iterator iter; - std::map::iterator iter_end = container.end(); - - // #1. Detect marker corners and track features - marker_detector.Detect(image, &cam, container, 0, 0, 1023, true, false); - tf.Purge(); - tf.Track(image, 0, container, 1); // Track without adding features - //tf.Track(image, 0, container, 1, 1024, 65535); // Track with adding features - tf.EraseNonTracked(container, 1); - - // #2. Update pose - if (!pose_ok) pose_ok = cam.CalcExteriorOrientation(container, &pose); - pose_ok = cam.UpdatePose(container, &pose); - if (!pose_ok) pose_ok = cam.UpdatePose(container, &pose, 0); // Only markers - if (!pose_ok) return false; - - // #3. Reproject features and their shadows - double err_markers = cam.Reproject(container, &pose, 0, true, false, 100); - if (err_markers > reproj_err_limit*2) { Reset(false); return false;} - double err_features = cam.Reproject(container, &pose, 1, false, false, 100); - if ((err_markers == 0) && (err_features > reproj_err_limit*2)) { - // Error is so large, that we just are satisfied we have some pose - // Don't triangulate and remove points based on this result - return true; - } - - // #4. Add previously triangulated features to container if they are visible... - if (remember_3d_points) { - IplImage *mask = tf.NewFeatureMask(); - iter = container_triangulated.begin(); - while(iter != container_triangulated.end()) { - if (container.find(iter->first) == container.end()) { - Camera *c = &cam; - c->ProjectPoint(iter->second.p3d, &pose, iter->second.projected_p2d); - if ((iter->second.projected_p2d.x >= 0) && - (iter->second.projected_p2d.y >= 0) && - (iter->second.projected_p2d.x < image->width) && - (iter->second.projected_p2d.y < image->height)) - { - CvScalar s = cvGet2D(mask, int(iter->second.projected_p2d.y), int(iter->second.projected_p2d.x)); - if (s.val[0] == 0) { - container[iter->first] = iter->second; - container[iter->first].estimation_type = 3; - container[iter->first].p2d = container[iter->first].projected_p2d; - } - } - } - iter++; - } - } - - // #5. Add other features to container - // Assume them initially on marker plane if (assume_plane == true) - tf.AddFeatures(container, 1, 1024, 0xffffff); //65535); - if (assume_plane) { - for (iter = container.begin(); iter != iter_end; iter++) { - Feature &f = iter->second; - //if (f.type_id == 0) continue; - if (!f.has_p3d && f.estimation_type < 1) { - //cam.Get3dOnPlane(&pose, f.p2d, f.p3d); - - //CvPoint3D32f p3d_vec; - //float l = PointVectorFromCamera(f.p3d, p3d_vec, &pose); - //if (l > plane_dist_limit) cam.Get3dOnDepth(&pose, f.p2d, plane_dist_limit, f.p3d); - cam.Get3dOnDepth(&pose, f.p2d, plane_dist_limit, f.p3d); - - // TODO: Now we don't create a shadow point for plane points. This is because - // we don't want to remember them as 3d points in container_triangulated. - // We probably get the same benefit just by assuming that everything is on plane? - //CreateShadowPoint(f.p3d_sh, f.p3d, &pose, shadow_point_dist); - f.p3d_sh = f.p3d; - - f.has_p3d = true; - f.estimation_type = 1; - } - } - } - - // #6. Triangulate features with good parallax - if (triangulate) { - for (iter = container.begin(); iter != iter_end; iter++) { - Feature &f = iter->second; - //if (f.type_id == 0) continue; - if (!f.has_p3d && !f.has_stored_pose) { - f.pose1 = pose; - f.p2d1 = f.p2d; - f.has_stored_pose = true; - cam.Get3dOnDepth(&pose, f.p2d, 8, f.p3d); - CreateShadowPoint(f.p3d_sh, f.p3d, &cam, &pose, parallax_length, triangulate_angle); - } - if (!f.has_p3d && f.has_stored_pose && f.estimation_type < 2) { - double dist_sh_sq = (f.projected_p2d_sh.x-f.projected_p2d.x)*(f.projected_p2d_sh.x-f.projected_p2d.x); - dist_sh_sq += (f.projected_p2d_sh.y-f.projected_p2d.y)*(f.projected_p2d_sh.y-f.projected_p2d.y); - if (dist_sh_sq > parallax_length_sq) { - CvPoint2D32f u1 = f.p2d1; - CvPoint2D32f u2 = f.p2d; - cam.Camera::Undistort(u1); - cam.Camera::Undistort(u2); - if(cam.ReconstructFeature(&f.pose1, &pose, &u1, &u2, &f.p3d, 10e-6)) { - CreateShadowPoint(f.p3d_sh, f.p3d, &cam, &pose, parallax_length, triangulate_angle); - f.has_p3d = true; - } else - f.has_stored_pose = false; - f.estimation_type = 2; - } - } - } - } - - // #7. Erase poor features - cam.Reproject(container, &pose, 1, false, false, 100); // TODO: Why again... - iter = container.begin(); - while(iter != container.end()) { - bool iter_inc = true; - if (iter->second.type_id > 0) { - Feature &f = iter->second; - if ((f.estimation_type == 0) || !f.has_p2d || !f.has_p3d); - else { - // Note that the projected_p2d needs to have valid content at this point - double dist_sq = (f.p2d.x-f.projected_p2d.x)*(f.p2d.x-f.projected_p2d.x); - dist_sq += (f.p2d.y-f.projected_p2d.y)*(f.p2d.y-f.projected_p2d.y); - - if (f.estimation_type == 1) { - if (dist_sq > (reproj_err_limit_sq)) f.has_p3d = false; - } else if (f.estimation_type == 2) { - if (dist_sq > (reproj_err_limit_sq)) { - container.erase(iter++); - iter_inc = false; - } - } else if (f.estimation_type == 3) { - if (container_triangulated.size() < min_triangulate) { - if (dist_sq > (reproj_err_limit_sq)) { - container.erase(iter++); - iter_inc = false; - } - } else { - if (dist_sq > (reproj_err_limit_sq)) f.has_p3d = false; - if (dist_sq > (reproj_err_limit_sq*2)) { - std::map::iterator iter_tmp; - iter_tmp = container_triangulated.find(iter->first); - if (iter_tmp != container_triangulated.end()) { - container_triangulated.erase(iter_tmp); - } - container.erase(iter++); - iter_inc = false; - } - } - } - } - } - if (iter_inc) iter++; - } - - // #8. Remember good features that have little reprojection error while the parallax is noticeable - if (remember_3d_points && (container_triangulated.size() < max_triangulate)) { - iter = container.begin(); - while(iter != container.end()) { - if ((iter->second.type_id > 0) && - (container_triangulated.find(iter->first) == container_triangulated.end())) - { - Feature &f = iter->second; - double dist_sq = (f.p2d.x-f.projected_p2d.x)*(f.p2d.x-f.projected_p2d.x); - dist_sq += (f.p2d.y-f.projected_p2d.y)*(f.p2d.y-f.projected_p2d.y); - double dist_sh_sq = (f.projected_p2d_sh.x-f.projected_p2d.x)*(f.projected_p2d_sh.x-f.projected_p2d.x); - dist_sh_sq += (f.projected_p2d_sh.y-f.projected_p2d.y)*(f.projected_p2d_sh.y-f.projected_p2d.y); - if ((dist_sq < reproj_err_limit_sq) && (dist_sh_sq > parallax_length_sq)) { - container_triangulated[iter->first] = iter->second; - f.estimation_type = 3; - } - } - iter++; - if (container_triangulated.size() >= max_triangulate) break; - } - } - - return true; +void CreateShadowPoint(CvPoint3D32f& p3d_sh, CvPoint3D32f p3d, CameraEC* cam, + Pose* camera_pose, float parallax_length, + float triangulate_angle) +{ + // Vector from camera-point to the 3D-point in marker coordinates + float l = PointVectorFromCamera(p3d, p3d_sh, camera_pose); + + // Figure out a suitable matching shadow point distance + CvPoint2D32f p2d; + CvPoint3D32f p3d2; + cam->ProjectPoint(p3d, camera_pose, p2d); + p2d.x += parallax_length; + cam->Get3dOnDepth(camera_pose, p2d, l, p3d2); + p3d2.x -= p3d.x; + p3d2.y -= p3d.y; + p3d2.z -= p3d.z; + float pl = std::sqrt(p3d2.x * p3d2.x + p3d2.y * p3d2.y + p3d2.z * p3d2.z); + float shadow_point_dist = + float(pl / std::abs(tan(triangulate_angle * 3.1415926535 / + 180.))); // How far we need the shadow point to + // have the parallax limit match + + // Create the shadow point + p3d_sh.x /= l; + p3d_sh.y /= l; + p3d_sh.z /= l; + p3d_sh.x *= shadow_point_dist; + p3d_sh.y *= shadow_point_dist; + p3d_sh.z *= shadow_point_dist; + p3d_sh.x += p3d.x; + p3d_sh.y += p3d.y; + p3d_sh.z += p3d.z; + + // std::cout<<"p : "<::iterator iter = container.begin(); - std::map::iterator iter_end = container.end(); - for(;iter != iter_end; iter++) { - if (pose_ok && (iter->second.type_id == 1) && (!iter->second.has_stored_pose) && (!iter->second.has_p3d)) { - iter->second.pose1 = pose; - iter->second.p2d1 = iter->second.p2d; - iter->second.has_stored_pose = true; - } - } - iter = container.begin(); - for(;iter != iter_end; iter++) - { - Feature &f = iter->second; - if(update_tri && f.has_stored_pose && !f.has_p3d) { - f.tri_cntr++; - } - if(f.tri_cntr==3) { - CvPoint2D32f u1 = f.p2d1; - CvPoint2D32f u2 = f.p2d; - cam.Camera::Undistort(u1); - cam.Camera::Undistort(u2); - if(cam.ReconstructFeature(&f.pose1, &pose, &u1, &u2, &f.p3d, 10e-6)) - f.has_p3d = true; - else - f.has_stored_pose = false; - f.tri_cntr = 0; - } - } - if (pose_ok) { - double err_markers = cam.Reproject(container, &pose, 0, true, false, 100); - if(err_markers > 30) { Reset(false); return false;} - double err_features = cam.Reproject(container, &pose, 1, false, false, 100); - cam.EraseUsingReprojectionError(container, 30, 1); - } - return pose_ok; +bool SimpleSfM::Update(IplImage* image, bool assume_plane, bool triangulate, + float reproj_err_limit, float triangulate_angle) +{ + const int min_triangulate = 50, max_triangulate = 150; + const float plane_dist_limit = 100.f; + const bool remember_3d_points = true; + const float parallax_length = 10.f; + const float parallax_length_sq = parallax_length * parallax_length; + float reproj_err_limit_sq = reproj_err_limit * reproj_err_limit; + std::map::iterator iter; + std::map::iterator iter_end = container.end(); + + // #1. Detect marker corners and track features + marker_detector.Detect(image, &cam, container, 0, 0, 1023, true, false); + tf.Purge(); + tf.Track(image, 0, container, 1); // Track without adding features + // tf.Track(image, 0, container, 1, 1024, 65535); // Track with adding + // features + tf.EraseNonTracked(container, 1); + + // #2. Update pose + if (!pose_ok) + pose_ok = cam.CalcExteriorOrientation(container, &pose); + pose_ok = cam.UpdatePose(container, &pose); + if (!pose_ok) + pose_ok = cam.UpdatePose(container, &pose, 0); // Only markers + if (!pose_ok) + return false; + + // #3. Reproject features and their shadows + double err_markers = cam.Reproject(container, &pose, 0, true, false, 100); + if (err_markers > reproj_err_limit * 2) + { + Reset(false); + return false; + } + double err_features = cam.Reproject(container, &pose, 1, false, false, 100); + if ((err_markers == 0) && (err_features > reproj_err_limit * 2)) + { + // Error is so large, that we just are satisfied we have some pose + // Don't triangulate and remove points based on this result + return true; + } + + // #4. Add previously triangulated features to container if they are + // visible... + if (remember_3d_points) + { + IplImage* mask = tf.NewFeatureMask(); + iter = container_triangulated.begin(); + while (iter != container_triangulated.end()) + { + if (container.find(iter->first) == container.end()) + { + Camera* c = &cam; + c->ProjectPoint(iter->second.p3d, &pose, iter->second.projected_p2d); + if ((iter->second.projected_p2d.x >= 0) && + (iter->second.projected_p2d.y >= 0) && + (iter->second.projected_p2d.x < image->width) && + (iter->second.projected_p2d.y < image->height)) + { + CvScalar s = cvGet2D(mask, int(iter->second.projected_p2d.y), + int(iter->second.projected_p2d.x)); + if (s.val[0] == 0) + { + container[iter->first] = iter->second; + container[iter->first].estimation_type = 3; + container[iter->first].p2d = container[iter->first].projected_p2d; + } + } + } + iter++; + } + } + + // #5. Add other features to container + // Assume them initially on marker plane if (assume_plane == true) + tf.AddFeatures(container, 1, 1024, 0xffffff); // 65535); + if (assume_plane) + { + for (iter = container.begin(); iter != iter_end; iter++) + { + Feature& f = iter->second; + // if (f.type_id == 0) continue; + if (!f.has_p3d && f.estimation_type < 1) + { + // cam.Get3dOnPlane(&pose, f.p2d, f.p3d); + + // CvPoint3D32f p3d_vec; + // float l = PointVectorFromCamera(f.p3d, p3d_vec, &pose); + // if (l > plane_dist_limit) cam.Get3dOnDepth(&pose, f.p2d, + // plane_dist_limit, f.p3d); + cam.Get3dOnDepth(&pose, f.p2d, plane_dist_limit, f.p3d); + + // TODO: Now we don't create a shadow point for plane points. This is + // because + // we don't want to remember them as 3d points in + // container_triangulated. We probably get the same benefit just + // by assuming that everything is on plane? + // CreateShadowPoint(f.p3d_sh, f.p3d, &pose, shadow_point_dist); + f.p3d_sh = f.p3d; + + f.has_p3d = true; + f.estimation_type = 1; + } + } + } + + // #6. Triangulate features with good parallax + if (triangulate) + { + for (iter = container.begin(); iter != iter_end; iter++) + { + Feature& f = iter->second; + // if (f.type_id == 0) continue; + if (!f.has_p3d && !f.has_stored_pose) + { + f.pose1 = pose; + f.p2d1 = f.p2d; + f.has_stored_pose = true; + cam.Get3dOnDepth(&pose, f.p2d, 8, f.p3d); + CreateShadowPoint(f.p3d_sh, f.p3d, &cam, &pose, parallax_length, + triangulate_angle); + } + if (!f.has_p3d && f.has_stored_pose && f.estimation_type < 2) + { + double dist_sh_sq = (f.projected_p2d_sh.x - f.projected_p2d.x) * + (f.projected_p2d_sh.x - f.projected_p2d.x); + dist_sh_sq += (f.projected_p2d_sh.y - f.projected_p2d.y) * + (f.projected_p2d_sh.y - f.projected_p2d.y); + if (dist_sh_sq > parallax_length_sq) + { + CvPoint2D32f u1 = f.p2d1; + CvPoint2D32f u2 = f.p2d; + cam.Camera::Undistort(u1); + cam.Camera::Undistort(u2); + if (cam.ReconstructFeature(&f.pose1, &pose, &u1, &u2, &f.p3d, 10e-6)) + { + CreateShadowPoint(f.p3d_sh, f.p3d, &cam, &pose, parallax_length, + triangulate_angle); + f.has_p3d = true; + } + else + f.has_stored_pose = false; + f.estimation_type = 2; + } + } + } + } + + // #7. Erase poor features + cam.Reproject(container, &pose, 1, false, false, 100); // TODO: Why again... + iter = container.begin(); + while (iter != container.end()) + { + bool iter_inc = true; + if (iter->second.type_id > 0) + { + Feature& f = iter->second; + if ((f.estimation_type == 0) || !f.has_p2d || !f.has_p3d) + ; + else + { + // Note that the projected_p2d needs to have valid content at this point + double dist_sq = + (f.p2d.x - f.projected_p2d.x) * (f.p2d.x - f.projected_p2d.x); + dist_sq += + (f.p2d.y - f.projected_p2d.y) * (f.p2d.y - f.projected_p2d.y); + + if (f.estimation_type == 1) + { + if (dist_sq > (reproj_err_limit_sq)) + f.has_p3d = false; + } + else if (f.estimation_type == 2) + { + if (dist_sq > (reproj_err_limit_sq)) + { + container.erase(iter++); + iter_inc = false; + } + } + else if (f.estimation_type == 3) + { + if (container_triangulated.size() < min_triangulate) + { + if (dist_sq > (reproj_err_limit_sq)) + { + container.erase(iter++); + iter_inc = false; + } + } + else + { + if (dist_sq > (reproj_err_limit_sq)) + f.has_p3d = false; + if (dist_sq > (reproj_err_limit_sq * 2)) + { + std::map::iterator iter_tmp; + iter_tmp = container_triangulated.find(iter->first); + if (iter_tmp != container_triangulated.end()) + { + container_triangulated.erase(iter_tmp); + } + container.erase(iter++); + iter_inc = false; + } + } + } + } + } + if (iter_inc) + iter++; + } + + // #8. Remember good features that have little reprojection error while the + // parallax is noticeable + if (remember_3d_points && (container_triangulated.size() < max_triangulate)) + { + iter = container.begin(); + while (iter != container.end()) + { + if ((iter->second.type_id > 0) && + (container_triangulated.find(iter->first) == + container_triangulated.end())) + { + Feature& f = iter->second; + double dist_sq = + (f.p2d.x - f.projected_p2d.x) * (f.p2d.x - f.projected_p2d.x); + dist_sq += + (f.p2d.y - f.projected_p2d.y) * (f.p2d.y - f.projected_p2d.y); + double dist_sh_sq = (f.projected_p2d_sh.x - f.projected_p2d.x) * + (f.projected_p2d_sh.x - f.projected_p2d.x); + dist_sh_sq += (f.projected_p2d_sh.y - f.projected_p2d.y) * + (f.projected_p2d_sh.y - f.projected_p2d.y); + if ((dist_sq < reproj_err_limit_sq) && + (dist_sh_sq > parallax_length_sq)) + { + container_triangulated[iter->first] = iter->second; + f.estimation_type = 3; + } + } + iter++; + if (container_triangulated.size() >= max_triangulate) + break; + } + } + + return true; } -bool SimpleSfM::UpdateRotationsOnly(IplImage *image) { - int n_markers = marker_detector.Detect(image, &cam, container, 0, 0, 1023, true, false); - static int n_markers_prev = 0; - tf.Purge(); - tf.Track(image, 0, container, 1, 1024, 65535); - tf.EraseNonTracked(container, 1); - int type_id = -1; - if(n_markers>=1) type_id = 0; - if (n_markers==1) - { - pose_ok = cam.CalcExteriorOrientation(container, &pose, 0); - if(pose_ok) pose_original = pose; - } - else if(n_markers>1) - { - if(pose_ok) - pose_ok = cam.UpdatePose(container, &pose, 0); - else - pose_ok = cam.CalcExteriorOrientation(container, &pose, 0); - if(pose_ok) pose_original = pose; - } - else //if(pose_ok) // Tracking going on... - { - if (n_markers_prev > 0) { - pose_difference.Reset(); - pose_difference.Mirror(false, true, true); - std::map::iterator iter = container.begin(); - std::map::iterator iter_end = container.end(); - for(;iter != iter_end; iter++) { - if ((iter->second.type_id == 1) && (iter->second.has_p2d)) { - CvPoint2D32f _p2d = iter->second.p2d; - cam.Get3dOnDepth(&pose_difference, _p2d, 1, iter->second.p3d); - iter->second.has_p3d = true; - } - } - } - cam.UpdateRotation(container, &pose_difference, 1); - if(pose_ok) - { - double rot_mat[16]; - double gl_mat[16]; - pose_difference.GetMatrixGL(rot_mat); - pose_original.GetMatrixGL(gl_mat); - CvMat rot = cvMat(4, 4, CV_64F, rot_mat);// Rotation matrix (difference) from the tracker - CvMat mod = cvMat(4, 4, CV_64F, gl_mat); // Original modelview matrix (camera location) - cvMatMul(&mod, &rot, &rot); - /*if(pose_ok)*/ pose.SetMatrixGL(rot_mat); - } - //pose_ok = true; - } - n_markers_prev = n_markers; - if (pose_ok) { - if (n_markers < 1) { - std::map::iterator iter = container.begin(); - std::map::iterator iter_end = container.end(); - for(;iter != iter_end; iter++) { - if ((iter->second.type_id == 1) && (!iter->second.has_p3d) && (iter->second.has_p2d)) { - CvPoint2D32f _p2d = iter->second.p2d; - cam.Get3dOnDepth(&pose_difference, _p2d, 1, iter->second.p3d); - iter->second.has_p3d = true; - } - } - } - - double err_markers = cam.Reproject(container, &pose, 0, true, false, 100); - if(err_markers > 10) { Reset(false); return false;} - double err_features = cam.Reproject(container, &pose_difference, 1, false, false, 100); - cam.EraseUsingReprojectionError(container, 10, 1); - } - return pose_ok; +bool SimpleSfM::UpdateTriangulateOnly(IplImage* image) +{ + marker_detector.Detect(image, &cam, container, 0, 0, 1023, true, false); + tf.Purge(); + tf.Track(image, 0, container, 1, 1024, 65535); + tf.EraseNonTracked(container, 1); + if (!pose_ok) + pose_ok = cam.CalcExteriorOrientation(container, &pose); + else + pose_ok = cam.UpdatePose(container, &pose); + if (pose_ok) + update_tri = moving.UpdateDistance(&pose, scale); + std::map::iterator iter = container.begin(); + std::map::iterator iter_end = container.end(); + for (; iter != iter_end; iter++) + { + if (pose_ok && (iter->second.type_id == 1) && + (!iter->second.has_stored_pose) && (!iter->second.has_p3d)) + { + iter->second.pose1 = pose; + iter->second.p2d1 = iter->second.p2d; + iter->second.has_stored_pose = true; + } + } + iter = container.begin(); + for (; iter != iter_end; iter++) + { + Feature& f = iter->second; + if (update_tri && f.has_stored_pose && !f.has_p3d) + { + f.tri_cntr++; + } + if (f.tri_cntr == 3) + { + CvPoint2D32f u1 = f.p2d1; + CvPoint2D32f u2 = f.p2d; + cam.Camera::Undistort(u1); + cam.Camera::Undistort(u2); + if (cam.ReconstructFeature(&f.pose1, &pose, &u1, &u2, &f.p3d, 10e-6)) + f.has_p3d = true; + else + f.has_stored_pose = false; + f.tri_cntr = 0; + } + } + if (pose_ok) + { + double err_markers = cam.Reproject(container, &pose, 0, true, false, 100); + if (err_markers > 30) + { + Reset(false); + return false; + } + double err_features = cam.Reproject(container, &pose, 1, false, false, 100); + cam.EraseUsingReprojectionError(container, 30, 1); + } + return pose_ok; } -void SimpleSfM::Draw(IplImage *rgba) { - if(markers_found) return; - std::map::iterator iter = container.begin(); - std::map::iterator iter_end = container.end(); - for(;iter != iter_end;iter++) { - Feature &f = iter->second; - if (f.has_p2d) { - int r=0, g=0, b=0, rad=1; - if (f.type_id == 0) {r=255; g=0; b=0;} - else if (f.estimation_type == 0) { - if (f.has_stored_pose) {r=255; g=0; b=255;} - else {r=0; g=255; b=255;} - } - else if (f.estimation_type == 1) {r=0; g=255; b=0;} - else if (f.estimation_type == 2) {r=255; g=0; b=255;} - else if (f.estimation_type == 3) {r=0; g=0; b=255;} - if (f.has_p3d) rad=5; - else rad = f.tri_cntr+1; - - cvCircle(rgba, cvPointFrom32f(f.p2d), rad, CV_RGB(r,g,b)); - if (pose_ok) { - // The shadow point line - if (f.type_id > 0 && f.estimation_type < 3 && f.p3d_sh.x != 0.f) { - cvLine(rgba, cvPointFrom32f(f.projected_p2d), cvPointFrom32f(f.projected_p2d_sh), CV_RGB(0,255,0)); - } - // Reprojection error - if (f.has_p3d) { - cvLine(rgba, cvPointFrom32f(f.p2d), cvPointFrom32f(f.projected_p2d), CV_RGB(255,0,255)); - } - } - //if (pose_ok && f.has_p3d) { - // cvLine(rgba, cvPointFrom32f(f.p2d), cvPointFrom32f(f.projected_p2d), CV_RGB(255,0,255)); - //} - //if (f.type_id == 0) { - // int id = iter->first; - // cvCircle(rgba, cvPointFrom32f(f.p2d), 7, CV_RGB(255,0,0)); - //} else { - // int id = iter->first-1024+1; - // if (f.has_p3d) { - // cvCircle(rgba, cvPointFrom32f(f.p2d), 5, CV_RGB(0,255,0)); - // } - // else if (f.has_stored_pose) - // cvCircle(rgba, cvPointFrom32f(f.p2d), f.tri_cntr+1, CV_RGB(255,0,255)); - // else - // cvCircle(rgba, cvPointFrom32f(f.p2d), 5, CV_RGB(0,255,255)); - //} - } - } +bool SimpleSfM::UpdateRotationsOnly(IplImage* image) +{ + int n_markers = + marker_detector.Detect(image, &cam, container, 0, 0, 1023, true, false); + static int n_markers_prev = 0; + tf.Purge(); + tf.Track(image, 0, container, 1, 1024, 65535); + tf.EraseNonTracked(container, 1); + int type_id = -1; + if (n_markers >= 1) + type_id = 0; + if (n_markers == 1) + { + pose_ok = cam.CalcExteriorOrientation(container, &pose, 0); + if (pose_ok) + pose_original = pose; + } + else if (n_markers > 1) + { + if (pose_ok) + pose_ok = cam.UpdatePose(container, &pose, 0); + else + pose_ok = cam.CalcExteriorOrientation(container, &pose, 0); + if (pose_ok) + pose_original = pose; + } + else // if(pose_ok) // Tracking going on... + { + if (n_markers_prev > 0) + { + pose_difference.Reset(); + pose_difference.Mirror(false, true, true); + std::map::iterator iter = container.begin(); + std::map::iterator iter_end = container.end(); + for (; iter != iter_end; iter++) + { + if ((iter->second.type_id == 1) && (iter->second.has_p2d)) + { + CvPoint2D32f _p2d = iter->second.p2d; + cam.Get3dOnDepth(&pose_difference, _p2d, 1, iter->second.p3d); + iter->second.has_p3d = true; + } + } + } + cam.UpdateRotation(container, &pose_difference, 1); + if (pose_ok) + { + double rot_mat[16]; + double gl_mat[16]; + pose_difference.GetMatrixGL(rot_mat); + pose_original.GetMatrixGL(gl_mat); + CvMat rot = cvMat(4, 4, CV_64F, rot_mat); // Rotation matrix (difference) + // from the tracker + CvMat mod = cvMat(4, 4, CV_64F, + gl_mat); // Original modelview matrix (camera location) + cvMatMul(&mod, &rot, &rot); + /*if(pose_ok)*/ pose.SetMatrixGL(rot_mat); + } + // pose_ok = true; + } + n_markers_prev = n_markers; + if (pose_ok) + { + if (n_markers < 1) + { + std::map::iterator iter = container.begin(); + std::map::iterator iter_end = container.end(); + for (; iter != iter_end; iter++) + { + if ((iter->second.type_id == 1) && (!iter->second.has_p3d) && + (iter->second.has_p2d)) + { + CvPoint2D32f _p2d = iter->second.p2d; + cam.Get3dOnDepth(&pose_difference, _p2d, 1, iter->second.p3d); + iter->second.has_p3d = true; + } + } + } + + double err_markers = cam.Reproject(container, &pose, 0, true, false, 100); + if (err_markers > 10) + { + Reset(false); + return false; + } + double err_features = + cam.Reproject(container, &pose_difference, 1, false, false, 100); + cam.EraseUsingReprojectionError(container, 10, 1); + } + return pose_ok; } -} // namespace alvar +void SimpleSfM::Draw(IplImage* rgba) +{ + if (markers_found) + return; + std::map::iterator iter = container.begin(); + std::map::iterator iter_end = container.end(); + for (; iter != iter_end; iter++) + { + Feature& f = iter->second; + if (f.has_p2d) + { + int r = 0, g = 0, b = 0, rad = 1; + if (f.type_id == 0) + { + r = 255; + g = 0; + b = 0; + } + else if (f.estimation_type == 0) + { + if (f.has_stored_pose) + { + r = 255; + g = 0; + b = 255; + } + else + { + r = 0; + g = 255; + b = 255; + } + } + else if (f.estimation_type == 1) + { + r = 0; + g = 255; + b = 0; + } + else if (f.estimation_type == 2) + { + r = 255; + g = 0; + b = 255; + } + else if (f.estimation_type == 3) + { + r = 0; + g = 0; + b = 255; + } + if (f.has_p3d) + rad = 5; + else + rad = f.tri_cntr + 1; + + cvCircle(rgba, cvPointFrom32f(f.p2d), rad, CV_RGB(r, g, b)); + if (pose_ok) + { + // The shadow point line + if (f.type_id > 0 && f.estimation_type < 3 && f.p3d_sh.x != 0.f) + { + cvLine(rgba, cvPointFrom32f(f.projected_p2d), + cvPointFrom32f(f.projected_p2d_sh), CV_RGB(0, 255, 0)); + } + // Reprojection error + if (f.has_p3d) + { + cvLine(rgba, cvPointFrom32f(f.p2d), cvPointFrom32f(f.projected_p2d), + CV_RGB(255, 0, 255)); + } + } + // if (pose_ok && f.has_p3d) { + // cvLine(rgba, cvPointFrom32f(f.p2d), cvPointFrom32f(f.projected_p2d), + // CV_RGB(255,0,255)); + //} + // if (f.type_id == 0) { + // int id = iter->first; + // cvCircle(rgba, cvPointFrom32f(f.p2d), 7, CV_RGB(255,0,0)); + //} else { + // int id = iter->first-1024+1; + // if (f.has_p3d) { + // cvCircle(rgba, cvPointFrom32f(f.p2d), 5, CV_RGB(0,255,0)); + // } + // else if (f.has_stored_pose) + // cvCircle(rgba, cvPointFrom32f(f.p2d), f.tri_cntr+1, + // CV_RGB(255,0,255)); else cvCircle(rgba, cvPointFrom32f(f.p2d), 5, + // CV_RGB(0,255,255)); + //} + } + } +} +} // namespace alvar diff --git a/ar_track_alvar/src/Threads.cpp b/ar_track_alvar/src/Threads.cpp index fc4ef7b..114bbc3 100644 --- a/ar_track_alvar/src/Threads.cpp +++ b/ar_track_alvar/src/Threads.cpp @@ -25,21 +25,20 @@ #include "ar_track_alvar/Threads_private.h" -namespace alvar { - -Threads::Threads() - : d(new ThreadsPrivate()) +namespace alvar +{ +Threads::Threads() : d(new ThreadsPrivate()) { } Threads::~Threads() { - delete d; + delete d; } -bool Threads::create(void *(*method)(void *), void *parameters) +bool Threads::create(void* (*method)(void*), void* parameters) { - return d->create(method, parameters); + return d->create(method, parameters); } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/Threads_unix.cpp b/ar_track_alvar/src/Threads_unix.cpp index 2e91bd7..8500a27 100644 --- a/ar_track_alvar/src/Threads_unix.cpp +++ b/ar_track_alvar/src/Threads_unix.cpp @@ -26,42 +26,42 @@ #include #include -namespace alvar { - +namespace alvar +{ class ThreadsPrivateData { public: - ThreadsPrivateData() - : mHandles() - { - } + ThreadsPrivateData() : mHandles() + { + } - std::vector mHandles; + std::vector mHandles; }; -ThreadsPrivate::ThreadsPrivate() - : d(new ThreadsPrivateData()) +ThreadsPrivate::ThreadsPrivate() : d(new ThreadsPrivateData()) { } ThreadsPrivate::~ThreadsPrivate() { - for (int i = 0; i < (int)d->mHandles.size(); ++i) { - pthread_exit(&d->mHandles.at(i)); - } - d->mHandles.clear(); + for (int i = 0; i < (int)d->mHandles.size(); ++i) + { + pthread_exit(&d->mHandles.at(i)); + } + d->mHandles.clear(); - delete d; + delete d; } -bool ThreadsPrivate::create(void *(*method)(void *), void *parameters) +bool ThreadsPrivate::create(void* (*method)(void*), void* parameters) { - pthread_t thread; - if (pthread_create(&thread, 0, method, parameters)) { - d->mHandles.push_back(thread); - return true; - } - return false; + pthread_t thread; + if (pthread_create(&thread, 0, method, parameters)) + { + d->mHandles.push_back(thread); + return true; + } + return false; } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/Threads_win.cpp b/ar_track_alvar/src/Threads_win.cpp index 3ed7355..8ca8200 100644 --- a/ar_track_alvar/src/Threads_win.cpp +++ b/ar_track_alvar/src/Threads_win.cpp @@ -26,68 +26,63 @@ #include #include -namespace alvar { - +namespace alvar +{ struct StartThreadParameters { - void *(*method)(void *); - void *parameters; + void* (*method)(void*); + void* parameters; }; -static DWORD WINAPI startThread(void *parameters) +static DWORD WINAPI startThread(void* parameters) { - DWORD value; - struct StartThreadParameters *p = (struct StartThreadParameters *)parameters; - value = (DWORD)p->method(p->parameters); - delete p; - return value; + DWORD value; + struct StartThreadParameters* p = (struct StartThreadParameters*)parameters; + value = (DWORD)p->method(p->parameters); + delete p; + return value; } class ThreadsPrivateData { public: - ThreadsPrivateData() - : mHandles() - { - } + ThreadsPrivateData() : mHandles() + { + } - std::vector mHandles; + std::vector mHandles; }; -ThreadsPrivate::ThreadsPrivate() - : d(new ThreadsPrivateData()) +ThreadsPrivate::ThreadsPrivate() : d(new ThreadsPrivateData()) { } ThreadsPrivate::~ThreadsPrivate() { - for (int i = 0; i < (int)d->mHandles.size(); ++i) { - CloseHandle(d->mHandles.at(i)); - } - d->mHandles.clear(); + for (int i = 0; i < (int)d->mHandles.size(); ++i) + { + CloseHandle(d->mHandles.at(i)); + } + d->mHandles.clear(); - delete d; + delete d; } -bool ThreadsPrivate::create(void *(*method)(void *), void *parameters) +bool ThreadsPrivate::create(void* (*method)(void*), void* parameters) { - StartThreadParameters *p = new StartThreadParameters; - p->method = method; - p->parameters = parameters; + StartThreadParameters* p = new StartThreadParameters; + p->method = method; + p->parameters = parameters; - HANDLE thread = CreateThread( - NULL, - 0, - (LPTHREAD_START_ROUTINE)startThread, - p, - 0, - NULL); + HANDLE thread = + CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)startThread, p, 0, NULL); - if (thread != NULL) { - d->mHandles.push_back(thread); - return true; - } - return false; + if (thread != NULL) + { + d->mHandles.push_back(thread); + return true; + } + return false; } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/Timer.cpp b/ar_track_alvar/src/Timer.cpp index ee40e2d..379a199 100644 --- a/ar_track_alvar/src/Timer.cpp +++ b/ar_track_alvar/src/Timer.cpp @@ -25,26 +25,25 @@ #include "Timer_private.h" -namespace alvar { - -Timer::Timer() - : d(new TimerPrivate()) +namespace alvar +{ +Timer::Timer() : d(new TimerPrivate()) { } Timer::~Timer() { - delete d; + delete d; } void Timer::start() { - return d->start(); + return d->start(); } double Timer::stop() { - return d->stop(); + return d->stop(); } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/Timer_unix.cpp b/ar_track_alvar/src/Timer_unix.cpp index a34071b..5dd005e 100644 --- a/ar_track_alvar/src/Timer_unix.cpp +++ b/ar_track_alvar/src/Timer_unix.cpp @@ -25,40 +25,38 @@ #include -namespace alvar { - +namespace alvar +{ class TimerPrivateData { public: - TimerPrivateData() - : mStart() - { - } + TimerPrivateData() : mStart() + { + } - timespec mStart; + timespec mStart; }; -TimerPrivate::TimerPrivate() - : d(new TimerPrivateData()) +TimerPrivate::TimerPrivate() : d(new TimerPrivateData()) { } TimerPrivate::~TimerPrivate() { - delete d; + delete d; } void TimerPrivate::start() { - clock_gettime(CLOCK_MONOTONIC, &d->mStart); + clock_gettime(CLOCK_MONOTONIC, &d->mStart); } double TimerPrivate::stop() { - timespec stop; - clock_gettime(CLOCK_MONOTONIC, &stop); - return (stop.tv_sec - d->mStart.tv_sec) + - (stop.tv_nsec - d->mStart.tv_nsec) / 1000000000.0; + timespec stop; + clock_gettime(CLOCK_MONOTONIC, &stop); + return (stop.tv_sec - d->mStart.tv_sec) + + (stop.tv_nsec - d->mStart.tv_nsec) / 1000000000.0; } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/Timer_win.cpp b/ar_track_alvar/src/Timer_win.cpp index 3d5cb0a..eaefe90 100644 --- a/ar_track_alvar/src/Timer_win.cpp +++ b/ar_track_alvar/src/Timer_win.cpp @@ -25,61 +25,65 @@ #include -namespace alvar { - +namespace alvar +{ class TimerPrivateData { public: - TimerPrivateData() - : mPerformanceQuerySupported(false) - , mPerformanceFrequency() - , mPerformanceStart() - , mStart() - { - } + TimerPrivateData() + : mPerformanceQuerySupported(false) + , mPerformanceFrequency() + , mPerformanceStart() + , mStart() + { + } - bool mPerformanceQuerySupported; - LARGE_INTEGER mPerformanceFrequency; - LARGE_INTEGER mPerformanceStart; - DWORD mStart; + bool mPerformanceQuerySupported; + LARGE_INTEGER mPerformanceFrequency; + LARGE_INTEGER mPerformanceStart; + DWORD mStart; }; -TimerPrivate::TimerPrivate() - : d(new TimerPrivateData()) +TimerPrivate::TimerPrivate() : d(new TimerPrivateData()) { - QueryPerformanceFrequency(&d->mPerformanceFrequency); - if (d->mPerformanceFrequency.QuadPart) { - d->mPerformanceQuerySupported = true; - } + QueryPerformanceFrequency(&d->mPerformanceFrequency); + if (d->mPerformanceFrequency.QuadPart) + { + d->mPerformanceQuerySupported = true; + } } TimerPrivate::~TimerPrivate() { - delete d; + delete d; } void TimerPrivate::start() { - if (d->mPerformanceQuerySupported) { - QueryPerformanceCounter(&d->mPerformanceStart); - } - else { - d->mStart = GetTickCount(); - } + if (d->mPerformanceQuerySupported) + { + QueryPerformanceCounter(&d->mPerformanceStart); + } + else + { + d->mStart = GetTickCount(); + } } double TimerPrivate::stop() { - if (d->mPerformanceQuerySupported) { - LARGE_INTEGER stop; - LARGE_INTEGER difference; - QueryPerformanceCounter(&stop); - difference.QuadPart = stop.QuadPart - d->mPerformanceStart.QuadPart; - return double(difference.QuadPart) / d->mPerformanceFrequency.QuadPart; - } - else { - return (GetTickCount() - d->mStart) / 1000.0; - } + if (d->mPerformanceQuerySupported) + { + LARGE_INTEGER stop; + LARGE_INTEGER difference; + QueryPerformanceCounter(&stop); + difference.QuadPart = stop.QuadPart - d->mPerformanceStart.QuadPart; + return double(difference.QuadPart) / d->mPerformanceFrequency.QuadPart; + } + else + { + return (GetTickCount() - d->mStart) / 1000.0; + } } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/TrackerFeatures.cpp b/ar_track_alvar/src/TrackerFeatures.cpp index 9ddcf1a..a0f9dc4 100644 --- a/ar_track_alvar/src/TrackerFeatures.cpp +++ b/ar_track_alvar/src/TrackerFeatures.cpp @@ -26,236 +26,343 @@ using namespace std; -namespace alvar { +namespace alvar +{ using namespace std; -TrackerFeatures::TrackerFeatures(int _max_features, int _min_features, double _quality_level, double _min_distance, int _pyr_levels, int _win_size) : - x_res(0), y_res(0), frame_count(0), quality_level(0), min_distance(0), min_features(0), max_features(0), status(0), - img_eig(0), img_tmp(0), gray(0), prev_gray(0), pyramid(0), prev_pyramid(0), mask(0), next_id(0), win_size(0), pyr_levels(0), - prev_features(0), features(0), prev_feature_count(0), feature_count(0), prev_ids(0), ids(0) +TrackerFeatures::TrackerFeatures(int _max_features, int _min_features, + double _quality_level, double _min_distance, + int _pyr_levels, int _win_size) + : x_res(0) + , y_res(0) + , frame_count(0) + , quality_level(0) + , min_distance(0) + , min_features(0) + , max_features(0) + , status(0) + , img_eig(0) + , img_tmp(0) + , gray(0) + , prev_gray(0) + , pyramid(0) + , prev_pyramid(0) + , mask(0) + , next_id(0) + , win_size(0) + , pyr_levels(0) + , prev_features(0) + , features(0) + , prev_feature_count(0) + , feature_count(0) + , prev_ids(0) + , ids(0) { - next_id=1; // When this should be reset? - pyr_levels = _pyr_levels; - win_size = _win_size; - ChangeSettings(_max_features, _min_features, _quality_level, _min_distance); + next_id = 1; // When this should be reset? + pyr_levels = _pyr_levels; + win_size = _win_size; + ChangeSettings(_max_features, _min_features, _quality_level, _min_distance); } -TrackerFeatures::~TrackerFeatures() { - if (status) delete [] status; - if (prev_features) delete [] prev_features; - if (features) delete [] features; - if (prev_ids) delete [] prev_ids; - if (ids) delete [] ids; - if (img_eig) cvReleaseImage(&img_eig); - if (img_tmp) cvReleaseImage(&img_tmp); - if (gray) cvReleaseImage(&gray); - if (prev_gray) cvReleaseImage(&prev_gray); - if (pyramid) cvReleaseImage(&pyramid); - if (prev_pyramid) cvReleaseImage(&prev_pyramid); - if (mask) cvReleaseImage(&mask); +TrackerFeatures::~TrackerFeatures() +{ + if (status) + delete[] status; + if (prev_features) + delete[] prev_features; + if (features) + delete[] features; + if (prev_ids) + delete[] prev_ids; + if (ids) + delete[] ids; + if (img_eig) + cvReleaseImage(&img_eig); + if (img_tmp) + cvReleaseImage(&img_tmp); + if (gray) + cvReleaseImage(&gray); + if (prev_gray) + cvReleaseImage(&prev_gray); + if (pyramid) + cvReleaseImage(&pyramid); + if (prev_pyramid) + cvReleaseImage(&prev_pyramid); + if (mask) + cvReleaseImage(&mask); } -void TrackerFeatures::ChangeSettings(int _max_features, int _min_features, double _quality_level, double _min_distance) { - - if(_max_features==max_features && _min_features==min_features && _quality_level==quality_level && _min_distance==min_distance) return; +void TrackerFeatures::ChangeSettings(int _max_features, int _min_features, + double _quality_level, + double _min_distance) +{ + if (_max_features == max_features && _min_features == min_features && + _quality_level == quality_level && _min_distance == min_distance) + return; - int common_features = min(feature_count, _max_features); - max_features = _max_features; - min_features = _min_features; - quality_level = _quality_level; - min_distance = _min_distance; - if (status) delete [] status; status = NULL; - if (prev_ids) delete [] prev_ids; prev_ids = NULL; - if (prev_features) delete [] prev_features; prev_features = NULL; - if (ids) { - int *ids_new = new int[max_features]; - assert(common_features < max_features); - memcpy(ids_new, ids, sizeof(int)*common_features); - delete [] ids; - ids = ids_new; - } else { - ids = new int[max_features]; - } - if (features) { - CvPoint2D32f *features_new = new CvPoint2D32f[max_features]; - memcpy(features_new, features, sizeof(CvPoint2D32f)*common_features); - delete [] features; - features = features_new; - } else { - features = new CvPoint2D32f[max_features]; - } - status = new char[max_features]; - prev_ids = new int[max_features]; - prev_features = new CvPoint2D32f[max_features]; - prev_feature_count=0; - feature_count=common_features; + int common_features = min(feature_count, _max_features); + max_features = _max_features; + min_features = _min_features; + quality_level = _quality_level; + min_distance = _min_distance; + if (status) + delete[] status; + status = NULL; + if (prev_ids) + delete[] prev_ids; + prev_ids = NULL; + if (prev_features) + delete[] prev_features; + prev_features = NULL; + if (ids) + { + int* ids_new = new int[max_features]; + assert(common_features < max_features); + memcpy(ids_new, ids, sizeof(int) * common_features); + delete[] ids; + ids = ids_new; + } + else + { + ids = new int[max_features]; + } + if (features) + { + CvPoint2D32f* features_new = new CvPoint2D32f[max_features]; + memcpy(features_new, features, sizeof(CvPoint2D32f) * common_features); + delete[] features; + features = features_new; + } + else + { + features = new CvPoint2D32f[max_features]; + } + status = new char[max_features]; + prev_ids = new int[max_features]; + prev_features = new CvPoint2D32f[max_features]; + prev_feature_count = 0; + feature_count = common_features; - assert(ids); - assert(features); - assert(status); - assert(prev_ids); - assert(prev_features); + assert(ids); + assert(features); + assert(status); + assert(prev_ids); + assert(prev_features); } -void TrackerFeatures::Reset() { - feature_count=0; - frame_count=0; +void TrackerFeatures::Reset() +{ + feature_count = 0; + frame_count = 0; } -bool TrackerFeatures::DelFeature(int index) { - if (index > feature_count) return false; - feature_count--; - for (int i=index; i feature_count) + return false; + feature_count--; + for (int i = index; i < feature_count; i++) + { + features[i] = features[i + 1]; + ids[i] = ids[i + 1]; + } + return true; } -bool TrackerFeatures::DelFeatureId(int id) { - for (int i=0; iwidth) || (y_res != img->height)) { - if (img_eig) cvReleaseImage(&img_eig); - if (img_tmp) cvReleaseImage(&img_tmp); - if (gray) cvReleaseImage(&gray); - if (prev_gray) cvReleaseImage(&prev_gray); - if (pyramid) cvReleaseImage(&pyramid); - if (prev_pyramid) cvReleaseImage(&prev_pyramid); - if (mask) cvReleaseImage(&mask); - x_res = img->width; y_res = img->height; - img_eig = cvCreateImage(cvGetSize(img), IPL_DEPTH_32F, 1); - img_tmp = cvCreateImage(cvGetSize(img), IPL_DEPTH_32F, 1); - gray = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1); - prev_gray = cvCreateImage(cvGetSize(img),IPL_DEPTH_8U, 1); - pyramid = cvCreateImage(cvSize(img->width+8,img->height/3), IPL_DEPTH_8U, 1); - prev_pyramid = cvCreateImage(cvSize(img->width+8,img->height/3), IPL_DEPTH_8U, 1); - mask = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1); - frame_count=0; - if (min_distance == 0) { - min_distance = std::sqrt(double(img->width*img->height/max_features)); - min_distance *= 0.8; //(double(min_features)/max_features); - } - } - // Swap - IplImage* tmp; - CvPoint2D32f* tmp2; - CV_SWAP(prev_gray, gray, tmp); - CV_SWAP(prev_features, features, tmp2); - prev_feature_count=feature_count; - memcpy(prev_ids, ids, sizeof(int)*max_features); - if (img->nChannels == 1) { - cvCopy(img, gray); - } else { - cvCvtColor(img, gray, CV_RGB2GRAY); - } - // TODO: We used to add features here - //if (prev_feature_count < 1) return -1; - frame_count++; - if (frame_count <= 1) { - memcpy(features, prev_features, sizeof(CvPoint2D32f)*prev_feature_count); - memcpy(ids, prev_ids, sizeof(int)*prev_feature_count); - feature_count = prev_feature_count; - } else if (prev_feature_count > 0) { - // Track - cvCalcOpticalFlowPyrLK(prev_gray, gray, prev_pyramid, pyramid, - prev_features, features, prev_feature_count, cvSize(win_size, win_size), pyr_levels, status, 0, - cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), 0); - feature_count=0; - for (int i=0; iwidth) || (y_res != img->height)) + { + if (img_eig) + cvReleaseImage(&img_eig); + if (img_tmp) + cvReleaseImage(&img_tmp); + if (gray) + cvReleaseImage(&gray); + if (prev_gray) + cvReleaseImage(&prev_gray); + if (pyramid) + cvReleaseImage(&pyramid); + if (prev_pyramid) + cvReleaseImage(&prev_pyramid); + if (mask) + cvReleaseImage(&mask); + x_res = img->width; + y_res = img->height; + img_eig = cvCreateImage(cvGetSize(img), IPL_DEPTH_32F, 1); + img_tmp = cvCreateImage(cvGetSize(img), IPL_DEPTH_32F, 1); + gray = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1); + prev_gray = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1); + pyramid = + cvCreateImage(cvSize(img->width + 8, img->height / 3), IPL_DEPTH_8U, 1); + prev_pyramid = + cvCreateImage(cvSize(img->width + 8, img->height / 3), IPL_DEPTH_8U, 1); + mask = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1); + frame_count = 0; + if (min_distance == 0) + { + min_distance = std::sqrt(double(img->width * img->height / max_features)); + min_distance *= 0.8; //(double(min_features)/max_features); + } + } + // Swap + IplImage* tmp; + CvPoint2D32f* tmp2; + CV_SWAP(prev_gray, gray, tmp); + CV_SWAP(prev_features, features, tmp2); + prev_feature_count = feature_count; + memcpy(prev_ids, ids, sizeof(int) * max_features); + if (img->nChannels == 1) + { + cvCopy(img, gray); + } + else + { + cvCvtColor(img, gray, CV_RGB2GRAY); + } + // TODO: We used to add features here + // if (prev_feature_count < 1) return -1; + frame_count++; + if (frame_count <= 1) + { + memcpy(features, prev_features, sizeof(CvPoint2D32f) * prev_feature_count); + memcpy(ids, prev_ids, sizeof(int) * prev_feature_count); + feature_count = prev_feature_count; + } + else if (prev_feature_count > 0) + { + // Track + cvCalcOpticalFlowPyrLK( + prev_gray, gray, prev_pyramid, pyramid, prev_features, features, + prev_feature_count, cvSize(win_size, win_size), pyr_levels, status, 0, + cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03), 0); + feature_count = 0; + for (int i = 0; i < prev_feature_count; i++) + { + if (!status[i]) + continue; + features[feature_count] = features[i]; + ids[feature_count] = prev_ids[i]; + feature_count++; + } + } - if (add_features) AddFeatures(new_features_mask); + if (add_features) + AddFeatures(new_features_mask); - return 1; + return 1; } -double TrackerFeatures::Reset(IplImage *img, IplImage *new_features_mask) { - feature_count=0; - frame_count=0; - return TrackHid(img, new_features_mask); +double TrackerFeatures::Reset(IplImage* img, IplImage* new_features_mask) +{ + feature_count = 0; + frame_count = 0; + return TrackHid(img, new_features_mask); } -double TrackerFeatures::Track(IplImage *img, bool add_features) { - return TrackHid(img, NULL); //, add_features); +double TrackerFeatures::Track(IplImage* img, bool add_features) +{ + return TrackHid(img, NULL); //, add_features); } double TrackerFeatures::Track(IplImage* img, IplImage* mask) { - return TrackHid(img, mask); //, true); + return TrackHid(img, mask); //, true); } -IplImage *TrackerFeatures::NewFeatureMask() { - cvSet(mask, cvScalar(255)); - for (int i=0; i= 1) { - for (int i=feature_count; i= 1) + { + for (int i = feature_count; i < feature_count + new_feature_count; i++) + { + ids[i] = next_id; + next_id = ((next_id + 1) % (0x7fff)); + } + feature_count += new_feature_count; + } + } + return feature_count; } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/TrackerOrientation.cpp b/ar_track_alvar/src/TrackerOrientation.cpp index 1ed352f..a058d1a 100644 --- a/ar_track_alvar/src/TrackerOrientation.cpp +++ b/ar_track_alvar/src/TrackerOrientation.cpp @@ -26,221 +26,243 @@ using namespace std; -namespace alvar { +namespace alvar +{ using namespace std; -void TrackerOrientation::Project(CvMat* state, CvMat* projection, void *param) +void TrackerOrientation::Project(CvMat* state, CvMat* projection, void* param) { - TrackerOrientation *tracker = (TrackerOrientation*)param; - int count = projection->rows; - CvMat rot_mat = cvMat(3, 1, CV_64F, &(state->data.db[0+0])); - double zeros[3] = {0}; - CvMat zero_tra = cvMat(3, 1, CV_64F, zeros); - cvReshape(projection, projection, 2, 1); - cvProjectPoints2(tracker->_object_model, &rot_mat, &zero_tra, &(tracker->_camera->calib_K), &(tracker->_camera->calib_D), projection); - cvReshape(projection, projection, 1, count); + TrackerOrientation* tracker = (TrackerOrientation*)param; + int count = projection->rows; + CvMat rot_mat = cvMat(3, 1, CV_64F, &(state->data.db[0 + 0])); + double zeros[3] = { 0 }; + CvMat zero_tra = cvMat(3, 1, CV_64F, zeros); + cvReshape(projection, projection, 2, 1); + cvProjectPoints2(tracker->_object_model, &rot_mat, &zero_tra, + &(tracker->_camera->calib_K), &(tracker->_camera->calib_D), + projection); + cvReshape(projection, projection, 1, count); } void TrackerOrientation::Reset() -{ - _pose.Reset(); - _pose.Mirror(false, true, true); +{ + _pose.Reset(); + _pose.Mirror(false, true, true); } // Pose difference is stored... -double TrackerOrientation::Track(IplImage *image) +double TrackerOrientation::Track(IplImage* image) { - UpdateRotationOnly(image, 0); - return 0; + UpdateRotationOnly(image, 0); + return 0; } -bool TrackerOrientation::UpdatePose(IplImage *image) +bool TrackerOrientation::UpdatePose(IplImage* image) { - int count_points = _F_v.size(); - if(count_points < 6) return false; - - CvMat* _M = cvCreateMat(count_points, 1, CV_64FC3); - CvMat* image_observations = cvCreateMat(count_points*2, 1, CV_64F); // [u v u v u v ...]' - - //map::iterator it; - int ind = 0; - for(map::iterator it=_F_v.begin(); it!=_F_v.end(); it++) - { - if((it->second.status3D == Feature::USE_FOR_POSE || - it->second.status3D == Feature::IS_INITIAL) && - it->second.status2D == Feature::IS_TRACKED) - { - _M->data.db[ind*3+0] = it->second.point3d.x; - _M->data.db[ind*3+1] = it->second.point3d.y; - _M->data.db[ind*3+2] = it->second.point3d.z; - - image_observations->data.db[ind*2+0] = it->second.point.x; - image_observations->data.db[ind*2+1] = it->second.point.y; - ind++; - } - } - - if(ind < 6) - { - cvReleaseMat(&image_observations); - cvReleaseMat(&_M); - return false; - } - - double rot[3]; CvMat rotm = cvMat(3, 1, CV_64F, rot); - _pose.GetRodriques(&rotm); - - CvMat* par = cvCreateMat(3, 1, CV_64F); - memcpy(&(par->data.db[0+0]), rot, 3*sizeof(double)); - //par->data.db[3] = 0; - - CvRect r; r.x = 0; r.y = 0; r.height = ind; r.width = 1; - CvMat Msub; - cvGetSubRect(_M, &Msub, r); - _object_model = &Msub; - - r.height = 2*ind; - CvMat image_observations_sub; - cvGetSubRect(image_observations, &image_observations_sub, r); - - alvar::Optimization *opt = new alvar::Optimization(3, 2*ind); - - double foo = opt->Optimize(par, &image_observations_sub, 0.0005, 5, Project, this, alvar::Optimization::TUKEY_LM); - memcpy(rot, &(par->data.db[0+0]), 3*sizeof(double)); - _pose.SetRodriques(&rotm); - - delete opt; - - cvReleaseMat(&par); - cvReleaseMat(&image_observations); - cvReleaseMat(&_M); - - return true; + int count_points = _F_v.size(); + if (count_points < 6) + return false; + + CvMat* _M = cvCreateMat(count_points, 1, CV_64FC3); + CvMat* image_observations = + cvCreateMat(count_points * 2, 1, CV_64F); // [u v u v u v ...]' + + // map::iterator it; + int ind = 0; + for (map::iterator it = _F_v.begin(); it != _F_v.end(); it++) + { + if ((it->second.status3D == Feature::USE_FOR_POSE || + it->second.status3D == Feature::IS_INITIAL) && + it->second.status2D == Feature::IS_TRACKED) + { + _M->data.db[ind * 3 + 0] = it->second.point3d.x; + _M->data.db[ind * 3 + 1] = it->second.point3d.y; + _M->data.db[ind * 3 + 2] = it->second.point3d.z; + + image_observations->data.db[ind * 2 + 0] = it->second.point.x; + image_observations->data.db[ind * 2 + 1] = it->second.point.y; + ind++; + } + } + + if (ind < 6) + { + cvReleaseMat(&image_observations); + cvReleaseMat(&_M); + return false; + } + + double rot[3]; + CvMat rotm = cvMat(3, 1, CV_64F, rot); + _pose.GetRodriques(&rotm); + + CvMat* par = cvCreateMat(3, 1, CV_64F); + memcpy(&(par->data.db[0 + 0]), rot, 3 * sizeof(double)); + // par->data.db[3] = 0; + + CvRect r; + r.x = 0; + r.y = 0; + r.height = ind; + r.width = 1; + CvMat Msub; + cvGetSubRect(_M, &Msub, r); + _object_model = &Msub; + + r.height = 2 * ind; + CvMat image_observations_sub; + cvGetSubRect(image_observations, &image_observations_sub, r); + + alvar::Optimization* opt = new alvar::Optimization(3, 2 * ind); + + double foo = opt->Optimize(par, &image_observations_sub, 0.0005, 5, Project, + this, alvar::Optimization::TUKEY_LM); + memcpy(rot, &(par->data.db[0 + 0]), 3 * sizeof(double)); + _pose.SetRodriques(&rotm); + + delete opt; + + cvReleaseMat(&par); + cvReleaseMat(&image_observations); + cvReleaseMat(&_M); + + return true; } -bool TrackerOrientation::UpdateRotationOnly(IplImage *gray, IplImage *image) +bool TrackerOrientation::UpdateRotationOnly(IplImage* gray, IplImage* image) { - if(gray->nChannels != 1) return false; - if(!_grsc) _grsc = cvCreateImage(cvSize(_xres, _yres), 8, 1); - if((_xres!=_grsc->width)||(_yres!=_grsc->height)) - cvResize(gray, _grsc); - else - _grsc->imageData = gray->imageData; - - map::iterator it ; - for(it = _F_v.begin(); it != _F_v.end(); it++) - it->second.status2D = Feature::NOT_TRACKED; - - // Track features in image domain (distorted points) - _ft.Track(_grsc); - - // Go through image features and match to previous (_F_v) - for (int i = 0; i < _ft.feature_count; i++) - { - int id = _ft.ids[i]; - _F_v[id].point = _ft.features[i]; - _F_v[id].point.x *= _image_scale; - _F_v[id].point.y *= _image_scale; - _F_v[id].status2D = Feature::IS_TRACKED; - - // Throw outlier away - if(_F_v[id].status3D == Feature::IS_OUTLIER) - { - _ft.DelFeature(i); - } - } - - // Delete features that are not tracked -// map::iterator - it = _F_v.begin(); - while(it != _F_v.end()) - { - if(it->second.status2D == Feature::NOT_TRACKED) - _F_v.erase(it++); - else ++it; - } - - // Update pose based on current information - UpdatePose(); - - it = _F_v.begin(); - while(it != _F_v.end()) - { - Feature *f = &(it->second); - - // Add new points - if(f->status3D == Feature::NONE) - { - double wx, wy, wz; - - CvPoint2D32f fpu = f->point; - _camera->Undistort(fpu); - - // Tassa asetetaan z = inf ja lasketaan x ja y jotenkin?!? - int object_scale = 1; // TODO Same as the pose?!?!?!? - - // inv(K)*[u v 1]'*scale - wx = object_scale*(fpu.x-_camera->calib_K_data[0][2])/_camera->calib_K_data[0][0]; - wy = object_scale*(fpu.y-_camera->calib_K_data[1][2])/_camera->calib_K_data[1][1]; - wz = object_scale; - - // Now the points are in camera coordinate frame. - alvar::Pose p = _pose; - p.Invert(); - - double Xd[4] = {wx, wy, wz, 1}; - CvMat Xdm = cvMat(4, 1, CV_64F, Xd); - double Pd[16]; - CvMat Pdm = cvMat(4, 4, CV_64F, Pd); - p.GetMatrix(&Pdm); - cvMatMul(&Pdm, &Xdm, &Xdm); - f->point3d.x = Xd[0]/Xd[3]; - f->point3d.y = Xd[1]/Xd[3]; - f->point3d.z = Xd[2]/Xd[3]; - //cout<point3d.z<status3D = Feature::USE_FOR_POSE; - } - - if(image) - { - if(f->status3D == Feature::NONE) - cvCircle(image, cvPoint(int(f->point.x), int(f->point.y)), 3, CV_RGB(255,0,0), 1); - else if(f->status3D == Feature::USE_FOR_POSE) - cvCircle(image, cvPoint(int(f->point.x), int(f->point.y)), 3, CV_RGB(0,255,0), 1); - else if(f->status3D == Feature::IS_INITIAL) - cvCircle(image, cvPoint(int(f->point.x), int(f->point.y)), 3, CV_RGB(0,0,255), 1); - else if(f->status3D == Feature::IS_OUTLIER) - cvCircle(image, cvPoint(int(f->point.x), int(f->point.y)), 2, CV_RGB(255,0,255), 1); - } - - // Delete points that bk error is too big - // OK here just change state... - // NYT TEHAAN TURHAAN ASKEN MUKAAN OTETUILLE.. - if( f->status3D == Feature::USE_FOR_POSE || - f->status3D == Feature::IS_INITIAL ) - { - double p3d[3] = {f->point3d.x, f->point3d.y, f->point3d.z}; - CvMat p3dm = cvMat(1, 1, CV_64FC3, p3d); - double p2d[2]; - CvMat p2dm = cvMat(2, 1, CV_64F, p2d); - cvReshape(&p2dm, &p2dm, 2, 1); - - double gl_mat[16]; - _pose.GetMatrixGL(gl_mat); - _camera->ProjectPoints(&p3dm, gl_mat, &p2dm); - - if(image) - cvLine(image, cvPoint(int(p2d[0]), int(p2d[1])), cvPoint(int(f->point.x),int(f->point.y)), CV_RGB(255,0,255)); - - double dist = (p2d[0]-f->point.x)*(p2d[0]-f->point.x)+(p2d[1]-f->point.y)*(p2d[1]-f->point.y); - if(dist > _outlier_limit) - f->status3D = Feature::IS_OUTLIER; - } - - it++; - } - return true; + if (gray->nChannels != 1) + return false; + if (!_grsc) + _grsc = cvCreateImage(cvSize(_xres, _yres), 8, 1); + if ((_xres != _grsc->width) || (_yres != _grsc->height)) + cvResize(gray, _grsc); + else + _grsc->imageData = gray->imageData; + + map::iterator it; + for (it = _F_v.begin(); it != _F_v.end(); it++) + it->second.status2D = Feature::NOT_TRACKED; + + // Track features in image domain (distorted points) + _ft.Track(_grsc); + + // Go through image features and match to previous (_F_v) + for (int i = 0; i < _ft.feature_count; i++) + { + int id = _ft.ids[i]; + _F_v[id].point = _ft.features[i]; + _F_v[id].point.x *= _image_scale; + _F_v[id].point.y *= _image_scale; + _F_v[id].status2D = Feature::IS_TRACKED; + + // Throw outlier away + if (_F_v[id].status3D == Feature::IS_OUTLIER) + { + _ft.DelFeature(i); + } + } + + // Delete features that are not tracked + // map::iterator + it = _F_v.begin(); + while (it != _F_v.end()) + { + if (it->second.status2D == Feature::NOT_TRACKED) + _F_v.erase(it++); + else + ++it; + } + + // Update pose based on current information + UpdatePose(); + + it = _F_v.begin(); + while (it != _F_v.end()) + { + Feature* f = &(it->second); + + // Add new points + if (f->status3D == Feature::NONE) + { + double wx, wy, wz; + + CvPoint2D32f fpu = f->point; + _camera->Undistort(fpu); + + // Tassa asetetaan z = inf ja lasketaan x ja y jotenkin?!? + int object_scale = 1; // TODO Same as the pose?!?!?!? + + // inv(K)*[u v 1]'*scale + wx = object_scale * (fpu.x - _camera->calib_K_data[0][2]) / + _camera->calib_K_data[0][0]; + wy = object_scale * (fpu.y - _camera->calib_K_data[1][2]) / + _camera->calib_K_data[1][1]; + wz = object_scale; + + // Now the points are in camera coordinate frame. + alvar::Pose p = _pose; + p.Invert(); + + double Xd[4] = { wx, wy, wz, 1 }; + CvMat Xdm = cvMat(4, 1, CV_64F, Xd); + double Pd[16]; + CvMat Pdm = cvMat(4, 4, CV_64F, Pd); + p.GetMatrix(&Pdm); + cvMatMul(&Pdm, &Xdm, &Xdm); + f->point3d.x = Xd[0] / Xd[3]; + f->point3d.y = Xd[1] / Xd[3]; + f->point3d.z = Xd[2] / Xd[3]; + // cout<point3d.z<status3D = Feature::USE_FOR_POSE; + } + + if (image) + { + if (f->status3D == Feature::NONE) + cvCircle(image, cvPoint(int(f->point.x), int(f->point.y)), 3, + CV_RGB(255, 0, 0), 1); + else if (f->status3D == Feature::USE_FOR_POSE) + cvCircle(image, cvPoint(int(f->point.x), int(f->point.y)), 3, + CV_RGB(0, 255, 0), 1); + else if (f->status3D == Feature::IS_INITIAL) + cvCircle(image, cvPoint(int(f->point.x), int(f->point.y)), 3, + CV_RGB(0, 0, 255), 1); + else if (f->status3D == Feature::IS_OUTLIER) + cvCircle(image, cvPoint(int(f->point.x), int(f->point.y)), 2, + CV_RGB(255, 0, 255), 1); + } + + // Delete points that bk error is too big + // OK here just change state... + // NYT TEHAAN TURHAAN ASKEN MUKAAN OTETUILLE.. + if (f->status3D == Feature::USE_FOR_POSE || + f->status3D == Feature::IS_INITIAL) + { + double p3d[3] = { f->point3d.x, f->point3d.y, f->point3d.z }; + CvMat p3dm = cvMat(1, 1, CV_64FC3, p3d); + double p2d[2]; + CvMat p2dm = cvMat(2, 1, CV_64F, p2d); + cvReshape(&p2dm, &p2dm, 2, 1); + + double gl_mat[16]; + _pose.GetMatrixGL(gl_mat); + _camera->ProjectPoints(&p3dm, gl_mat, &p2dm); + + if (image) + cvLine(image, cvPoint(int(p2d[0]), int(p2d[1])), + cvPoint(int(f->point.x), int(f->point.y)), CV_RGB(255, 0, 255)); + + double dist = (p2d[0] - f->point.x) * (p2d[0] - f->point.x) + + (p2d[1] - f->point.y) * (p2d[1] - f->point.y); + if (dist > _outlier_limit) + f->status3D = Feature::IS_OUTLIER; + } + + it++; + } + return true; } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/TrackerPsa.cpp b/ar_track_alvar/src/TrackerPsa.cpp index 831d404..c43ea86 100644 --- a/ar_track_alvar/src/TrackerPsa.cpp +++ b/ar_track_alvar/src/TrackerPsa.cpp @@ -25,213 +25,269 @@ using namespace std; -namespace alvar { +namespace alvar +{ using namespace std; -TrackerPsa::TrackerPsa(int _max_shift) { - max_shift = _max_shift; - x_res = 0; y_res = 0; - hor = 0; horprev = 0; - ver = 0; verprev = 0; - framecount = 0; +TrackerPsa::TrackerPsa(int _max_shift) +{ + max_shift = _max_shift; + x_res = 0; + y_res = 0; + hor = 0; + horprev = 0; + ver = 0; + verprev = 0; + framecount = 0; } -TrackerPsa::~TrackerPsa() { - if (hor) delete [] hor; - if (horprev) delete [] horprev; - if (ver) delete [] ver; - if (verprev) delete [] verprev; +TrackerPsa::~TrackerPsa() +{ + if (hor) + delete[] hor; + if (horprev) + delete[] horprev; + if (ver) + delete[] ver; + if (verprev) + delete[] verprev; } -double TrackerPsa::Track(IplImage *img) { - long best_x_factor=99999999; - long best_y_factor=99999999; - long worst_x_factor=0; - long worst_y_factor=0; - double quality=0; - - //std::cout<<"Moi1"<width) || (y_res != img->height)) { - x_res = img->width; - y_res = img->height; - if (hor) delete [] hor; - if (horprev) delete [] horprev; - if (ver) delete [] ver; - if (verprev) delete [] verprev; - hor = new long [x_res]; - horprev = new long [x_res]; - ver = new long [y_res]; - verprev = new long [y_res]; - framecount=0; - } - framecount++; - - // Make shift tables - memset(hor, 0, sizeof(long)*x_res); - memset(ver, 0, sizeof(long)*y_res); - for (int y=0; y0 ? s=-s : s=-s+1) ) { - long factor=0; - long factorfirst=0; - int count=0; - for (int x=0; x 0) && (x2 worst_x_factor) worst_x_factor=factor; - } - for (int s=0; s0 ? s=-s : s=-s+1)) { - long factor=0; - int count=0; - for (int y=0; y 0) && (y2 worst_y_factor) worst_y_factor=factor; - } - // Figure out the quality? - // We assume the result is poor if the - // worst factor is very near the best factor - int qual_x = (worst_x_factor - best_x_factor)/y_res; - int qual_y = (worst_y_factor - best_y_factor)/x_res; - quality = std::min(qual_x, qual_y); - } - - // Swap memories - long *tmp; - tmp=hor; hor=horprev; horprev=tmp; - tmp=ver; ver=verprev; verprev=tmp; - - // Return confidence (bigger is better) - // Smaller than 10 is poor, and you almost certainly have - // problems with quality values less than 5. - //printf("%d\n", quality); - return quality; +double TrackerPsa::Track(IplImage* img) +{ + long best_x_factor = 99999999; + long best_y_factor = 99999999; + long worst_x_factor = 0; + long worst_y_factor = 0; + double quality = 0; + + // std::cout<<"Moi1"<width) || (y_res != img->height)) + { + x_res = img->width; + y_res = img->height; + if (hor) + delete[] hor; + if (horprev) + delete[] horprev; + if (ver) + delete[] ver; + if (verprev) + delete[] verprev; + hor = new long[x_res]; + horprev = new long[x_res]; + ver = new long[y_res]; + verprev = new long[y_res]; + framecount = 0; + } + framecount++; + + // Make shift tables + memset(hor, 0, sizeof(long) * x_res); + memset(ver, 0, sizeof(long) * y_res); + for (int y = 0; y < y_res; y++) + { + for (int x = 0; x < x_res; x++) + { + unsigned char c = (unsigned char)cvGet2D(img, y, x).val[0]; + hor[x] += c; + ver[y] += c; + } + } + + // If this is first frame -- no motion + if (framecount == 1) + { + xd = 0; + yd = 0; + } + + // Otherwais detect for motion + else + { + // Sequence for s: 0, 1, -1, 2, -2, ... -(max_shift-1), (max_shift-1) + for (int s = 0; s < max_shift; (s > 0 ? s = -s : s = -s + 1)) + { + long factor = 0; + long factorfirst = 0; + int count = 0; + for (int x = 0; x < x_res; x++) + { + int x2 = x + s; + if ((x2 > 0) && (x2 < x_res)) + { + factor += labs(hor[x2] - horprev[x]); + count++; + } + } + factor /= count; + if (factor < best_x_factor) + { + best_x_factor = factor; + xd = s; + } + if (factor > worst_x_factor) + worst_x_factor = factor; + } + for (int s = 0; s < max_shift; (s > 0 ? s = -s : s = -s + 1)) + { + long factor = 0; + int count = 0; + for (int y = 0; y < y_res; y++) + { + int y2 = y + s; + if ((y2 > 0) && (y2 < y_res)) + { + factor += labs(ver[y2] - verprev[y]); + count++; + } + } + factor /= count; + if (factor < best_y_factor) + { + best_y_factor = factor; + yd = s; + } + if (factor > worst_y_factor) + worst_y_factor = factor; + } + // Figure out the quality? + // We assume the result is poor if the + // worst factor is very near the best factor + int qual_x = (worst_x_factor - best_x_factor) / y_res; + int qual_y = (worst_y_factor - best_y_factor) / x_res; + quality = std::min(qual_x, qual_y); + } + + // Swap memories + long* tmp; + tmp = hor; + hor = horprev; + horprev = tmp; + tmp = ver; + ver = verprev; + verprev = tmp; + + // Return confidence (bigger is better) + // Smaller than 10 is poor, and you almost certainly have + // problems with quality values less than 5. + // printf("%d\n", quality); + return quality; } -void TrackerPsa::Compensate(double *x, double *y) { - *x += xd; *y += yd; +void TrackerPsa::Compensate(double* x, double* y) +{ + *x += xd; + *y += yd; } -TrackerPsaRot::TrackerPsaRot(int _max_shift) : TrackerPsa(_max_shift) { - rotd = 0; - rot=new double [360]; - rotprev=new double[360]; - rot_count=new int[360]; +TrackerPsaRot::TrackerPsaRot(int _max_shift) : TrackerPsa(_max_shift) +{ + rotd = 0; + rot = new double[360]; + rotprev = new double[360]; + rot_count = new int[360]; } -TrackerPsaRot::~TrackerPsaRot() { - if (rot) delete [] rot; - if (rotprev) delete [] rotprev; - if (rot_count) delete [] rot_count; +TrackerPsaRot::~TrackerPsaRot() +{ + if (rot) + delete[] rot; + if (rotprev) + delete[] rotprev; + if (rot_count) + delete[] rot_count; } -double TrackerPsaRot::Track(IplImage *img) { - long best_rot_factor=99999999; - double conf1 = TrackerPsa::Track(img); - - if (framecount == 1) { - rotd=0; - } - else { - memset(rot, 0, sizeof(double)*360); - memset(rot_count, 0, sizeof(int)*360); - for (int y=0; y= 0) && (y < img->height) && - (x >= 0) && (x < img->width)) - { - rot[theta] += (unsigned char)cvGet2D(img, y, x).val[0]; - rot_count[theta] ++; - } - } - } - for (int i=0; i<360; i++) { - rot[i] /= rot_count[i]; - } - for (int s=0; s<45; (s>0 ? s=-s : s=-s+1)) { - long factor=0; - for (int theta=0; theta<360; theta++) { - int theta2 = theta+s; - if (theta2 < 0) theta2+=360; - if (theta2 >= 360) theta2-=360; - factor += (long)labs(long(rot[theta2] - rotprev[theta])); - } - if (factor < best_rot_factor) { - best_rot_factor=factor; - rotd=s; - } - } - } - // Remember rotation based on center - memset(rotprev, 0, sizeof(double)*360); - memset(rot_count, 0, sizeof(int)*360); - for (int y=0; y= 0) && (y < img->height) && - (x >= 0) && (x < img->width)) - { - rotprev[theta] += (unsigned char)cvGet2D(img, y, x).val[0]; - rot_count[theta] ++; - } - } - } - for (int i=0; i<360; i++) { - rotprev[i] /= rot_count[i]; - } - return conf1 + best_rot_factor; +double TrackerPsaRot::Track(IplImage* img) +{ + long best_rot_factor = 99999999; + double conf1 = TrackerPsa::Track(img); + + if (framecount == 1) + { + rotd = 0; + } + else + { + memset(rot, 0, sizeof(double) * 360); + memset(rot_count, 0, sizeof(int) * 360); + for (int y = 0; y < y_res; y++) + { + for (int x = 0; x < x_res; x++) + { + double y2 = y - (y_res / 2) - (yd); + double x2 = x - (x_res / 2) - (xd); + double r = sqrt((double)y2 * y2 + x2 * x2); + int theta = int(atan2((double)y2, (double)x2) * 180 / 3.14159265); + if (theta < 0) + theta += 360; + if ((y >= 0) && (y < img->height) && (x >= 0) && (x < img->width)) + { + rot[theta] += (unsigned char)cvGet2D(img, y, x).val[0]; + rot_count[theta]++; + } + } + } + for (int i = 0; i < 360; i++) + { + rot[i] /= rot_count[i]; + } + for (int s = 0; s < 45; (s > 0 ? s = -s : s = -s + 1)) + { + long factor = 0; + for (int theta = 0; theta < 360; theta++) + { + int theta2 = theta + s; + if (theta2 < 0) + theta2 += 360; + if (theta2 >= 360) + theta2 -= 360; + factor += (long)labs(long(rot[theta2] - rotprev[theta])); + } + if (factor < best_rot_factor) + { + best_rot_factor = factor; + rotd = s; + } + } + } + // Remember rotation based on center + memset(rotprev, 0, sizeof(double) * 360); + memset(rot_count, 0, sizeof(int) * 360); + for (int y = 0; y < y_res; y++) + { + for (int x = 0; x < x_res; x++) + { + double y2 = y - (y_res / 2); + double x2 = x - (x_res / 2); + double r = sqrt((double)y2 * y2 + x2 * x2); + int theta = int(atan2((double)y2, (double)x2) * 180 / 3.14159265); + if (theta < 0) + theta += 360; + if ((y >= 0) && (y < img->height) && (x >= 0) && (x < img->width)) + { + rotprev[theta] += (unsigned char)cvGet2D(img, y, x).val[0]; + rot_count[theta]++; + } + } + } + for (int i = 0; i < 360; i++) + { + rotprev[i] /= rot_count[i]; + } + return conf1 + best_rot_factor; } -void TrackerPsaRot::Compensate(double *x, double *y) +void TrackerPsaRot::Compensate(double* x, double* y) { - double xx = *x - (x_res/2); - double yy = *y - (y_res/2); - double kosini = cos(rotd*3.1415926535/180); - double sini = sin(rotd*3.1415926535/180); - *x = ((kosini * xx) - (sini * yy)) + xd + (x_res/2); - *y = ((sini * xx) + (kosini * yy)) + yd + (y_res/2); + double xx = *x - (x_res / 2); + double yy = *y - (y_res / 2); + double kosini = cos(rotd * 3.1415926535 / 180); + double sini = sin(rotd * 3.1415926535 / 180); + *x = ((kosini * xx) - (sini * yy)) + xd + (x_res / 2); + *y = ((sini * xx) + (kosini * yy)) + yd + (y_res / 2); } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/TrackerStat.cpp b/ar_track_alvar/src/TrackerStat.cpp index b5e3245..8525c0d 100644 --- a/ar_track_alvar/src/TrackerStat.cpp +++ b/ar_track_alvar/src/TrackerStat.cpp @@ -25,90 +25,114 @@ using namespace std; -namespace alvar { +namespace alvar +{ using namespace std; -TrackerStat::TrackerStat(int binsize) : f(100,90) { - hist.AddDimension(binsize); // x - hist.AddDimension(binsize); // y +TrackerStat::TrackerStat(int binsize) : f(100, 90) +{ + hist.AddDimension(binsize); // x + hist.AddDimension(binsize); // y } -void TrackerStat::Reset() { - f.Reset(); +void TrackerStat::Reset() +{ + f.Reset(); } -double TrackerStat::Track(IplImage *img) +double TrackerStat::Track(IplImage* img) { - if (img == NULL) return -1; - f.Track(img); - hist.Clear(); - for (int p=0; pwidth; - y_res = img->height; - hist_rot.Clear(); - for (int p=0; pwidth; + y_res = img->height; + hist_rot.Clear(); + for (int p = 0; p < f.prev_feature_count; p++) + { + for (int c = 0; c < f.feature_count; c++) + { + if (f.prev_ids[p] != f.ids[c]) + continue; + double x_pred = f.prev_features[p].x + xd; + double y_pred = f.prev_features[p].y + yd; + double x_curr = f.features[c].x; + double y_curr = f.features[c].y; + double x = x_curr - x_pred; + double y = y_curr - y_pred; + double theta_pred = + atan2((double)y_pred - (y_res / 2), (double)x_pred - (x_res / 2)) * + 180.0 / 3.1415926535; + double theta_curr = + atan2((double)y_curr - (y_res / 2), (double)x_curr - (x_res / 2)) * + 180.0 / 3.1415926535; + hist_rot.Inc(theta_curr - theta_pred); + } + } + rotd = 0; + hist_rot.GetMax(&rotd); + return ret; } -void TrackerStatRot::Compensate(double *x, double *y) +void TrackerStatRot::Compensate(double* x, double* y) { - double xx = *x - (x_res/2); - double yy = *y - (y_res/2); - double kosini = cos(rotd*3.1415926535/180); - double sini = sin(rotd*3.1415926535/180); - *x = ((kosini * xx) - (sini * yy)) + xd + (x_res/2); - *y = ((sini * xx) + (kosini * yy)) + yd + (y_res/2); + double xx = *x - (x_res / 2); + double yy = *y - (y_res / 2); + double kosini = cos(rotd * 3.1415926535 / 180); + double sini = sin(rotd * 3.1415926535 / 180); + *x = ((kosini * xx) - (sini * yy)) + xd + (x_res / 2); + *y = ((sini * xx) + (kosini * yy)) + yd + (y_res / 2); } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/TrifocalTensor.cpp b/ar_track_alvar/src/TrifocalTensor.cpp index 0deb495..0ffc995 100644 --- a/ar_track_alvar/src/TrifocalTensor.cpp +++ b/ar_track_alvar/src/TrifocalTensor.cpp @@ -23,181 +23,148 @@ #include "TrifocalTensor.h" -namespace alvar { - -TrifocalTensor::TrifocalTensor() { +namespace alvar +{ +TrifocalTensor::TrifocalTensor() +{ } -TrifocalTensor::TrifocalTensor(const Pose &p0, const Pose &p1) { +TrifocalTensor::TrifocalTensor(const Pose& p0, const Pose& p1) +{ computeTensor(p0, p1); } -TrifocalTensor::TrifocalTensor(const Pose &p0, const Pose &p1, const Pose &p2) { +TrifocalTensor::TrifocalTensor(const Pose& p0, const Pose& p1, const Pose& p2) +{ computeTensor(p0, p1, p2); } -TrifocalTensor::~TrifocalTensor() { +TrifocalTensor::~TrifocalTensor() +{ } -void TrifocalTensor::computeTensor(const Pose &p0, const Pose &p1) { +void TrifocalTensor::computeTensor(const Pose& p0, const Pose& p1) +{ double data_p12[4][4], data_p13[4][4]; - CvMat p12 = cvMat( 4, 4, CV_64F, data_p12 ); - CvMat p13 = cvMat( 4, 4, CV_64F, data_p13 ); + CvMat p12 = cvMat(4, 4, CV_64F, data_p12); + CvMat p13 = cvMat(4, 4, CV_64F, data_p13); p0.GetMatrix(&p12); p1.GetMatrix(&p13); for (int i = 0; i < 3; i++) for (int j = 0; j < 3; j++) - for (int k = 0; k < 3; k++) { - - T[i][j][k] = - (data_p12[j][i] * data_p13[k][3]) - - - (data_p12[j][3] * data_p13[k][i]); - } + for (int k = 0; k < 3; k++) + { + T[i][j][k] = (data_p12[j][i] * data_p13[k][3]) - + (data_p12[j][3] * data_p13[k][i]); + } } -double *getRow(double* m, int row) { - return &m[4*row]; +double* getRow(double* m, int row) +{ + return &m[4 * row]; } -double det(double *r0, double *r1, double *r2, double *r3) { +double det(double* r0, double* r1, double* r2, double* r3) +{ double m[16]; - memcpy(&m[0], r0, 4*sizeof(double)); - memcpy(&m[4], r1, 4*sizeof(double)); - memcpy(&m[8], r2, 4*sizeof(double)); - memcpy(&m[12], r3, 4*sizeof(double)); + memcpy(&m[0], r0, 4 * sizeof(double)); + memcpy(&m[4], r1, 4 * sizeof(double)); + memcpy(&m[8], r2, 4 * sizeof(double)); + memcpy(&m[12], r3, 4 * sizeof(double)); CvMat M = cvMat(4, 4, CV_64F, m); return cvDet(&M); } -void TrifocalTensor::computeTensor(const Pose &p0, const Pose &p1, const Pose &p2) { +void TrifocalTensor::computeTensor(const Pose& p0, const Pose& p1, + const Pose& p2) +{ double data_p0[16], data_p1[16], data_p2[16]; - CvMat P0 = cvMat( 4, 4, CV_64F, data_p0 ); - CvMat P1 = cvMat( 4, 4, CV_64F, data_p1 ); - CvMat P2 = cvMat( 4, 4, CV_64F, data_p2 ); + CvMat P0 = cvMat(4, 4, CV_64F, data_p0); + CvMat P1 = cvMat(4, 4, CV_64F, data_p1); + CvMat P2 = cvMat(4, 4, CV_64F, data_p2); p0.GetMatrix(&P0); p1.GetMatrix(&P1); p2.GetMatrix(&P2); for (int i = 0; i < 3; i++) for (int j = 0; j < 3; j++) - for (int k = 0; k < 3; k++) { - double sign = i==1 ? -1 : 1; - T[i][j][k] = sign * det(getRow(data_p0, (i+1)%3), - getRow(data_p0, (i+2)%3), - getRow(data_p1, j), - getRow(data_p2, k)); - } + for (int k = 0; k < 3; k++) + { + double sign = i == 1 ? -1 : 1; + T[i][j][k] = sign * det(getRow(data_p0, (i + 1) % 3), + getRow(data_p0, (i + 2) % 3), + getRow(data_p1, j), getRow(data_p2, k)); + } } -double TrifocalTensor::projectAxis(const CvPoint2D64f &p0, const CvPoint2D64f &p1, int l) { - double v00 = - p1.x * ( p0.x * T[0][0][l] - + p0.y * T[1][0][l] - + T[2][0][l]) - - - p1.x * ( p0.x * T[0][0][l] - + p0.y * T[1][0][l] - + T[2][0][l]); - double v01 = - p1.x * ( p0.x * T[0][1][l] - + p0.y * T[1][1][l] - + T[2][1][l]) - - - p1.y * ( p0.x * T[0][0][l] - + p0.y * T[1][0][l] - + T[2][0][l]); - double v02 = - p1.x * ( p0.x * T[0][2][l] - + p0.y * T[1][2][l] - + T[2][2][l]) - - ( p0.x * T[0][0][l] - + p0.y * T[1][0][l] - + T[2][0][l]); - - double v10 = - p1.y * ( p0.x * T[0][0][l] - + p0.y * T[1][0][l] - + T[2][0][l]) - - - p1.x * ( p0.x * T[0][1][l] - + p0.y * T[1][1][l] - + T[2][1][l]); - double v11 = - p1.y * ( p0.x * T[0][1][l] - + p0.y * T[1][1][l] - + T[2][1][l]) - - - p1.y * ( p0.x * T[0][1][l] - + p0.y * T[1][1][l] - + T[2][1][l]); - double v12 = - p1.y * ( p0.x * T[0][2][l] - + p0.y * T[1][2][l] - + T[2][2][l]) - - ( p0.x * T[0][1][l] - + p0.y * T[1][1][l] - + T[2][1][l]); - - double v20 = - ( p0.x * T[0][0][l] - + p0.y * T[1][0][l] - + T[2][0][l]) - - - p1.x * ( p0.x * T[0][2][l] - + p0.y * T[1][2][l] - + T[2][2][l]); - double v21 = - ( p0.x * T[0][1][l] - + p0.y * T[1][1][l] - + T[2][1][l]) - - - p1.y * ( p0.x * T[0][2][l] - + p0.y * T[1][2][l] - + T[2][2][l]); - - double v22 = - ( p0.x * T[0][2][l] - + p0.y * T[1][2][l] - + T[2][2][l]) - - - ( p0.x * T[0][2][l] - + p0.y * T[1][2][l] - + T[2][2][l]); - +double TrifocalTensor::projectAxis(const CvPoint2D64f& p0, + const CvPoint2D64f& p1, int l) +{ + double v00 = p1.x * (p0.x * T[0][0][l] + p0.y * T[1][0][l] + T[2][0][l]) - + p1.x * (p0.x * T[0][0][l] + p0.y * T[1][0][l] + T[2][0][l]); + double v01 = p1.x * (p0.x * T[0][1][l] + p0.y * T[1][1][l] + T[2][1][l]) - + p1.y * (p0.x * T[0][0][l] + p0.y * T[1][0][l] + T[2][0][l]); + double v02 = p1.x * (p0.x * T[0][2][l] + p0.y * T[1][2][l] + T[2][2][l]) - + (p0.x * T[0][0][l] + p0.y * T[1][0][l] + T[2][0][l]); + + double v10 = p1.y * (p0.x * T[0][0][l] + p0.y * T[1][0][l] + T[2][0][l]) - + p1.x * (p0.x * T[0][1][l] + p0.y * T[1][1][l] + T[2][1][l]); + double v11 = p1.y * (p0.x * T[0][1][l] + p0.y * T[1][1][l] + T[2][1][l]) - + p1.y * (p0.x * T[0][1][l] + p0.y * T[1][1][l] + T[2][1][l]); + double v12 = p1.y * (p0.x * T[0][2][l] + p0.y * T[1][2][l] + T[2][2][l]) - + (p0.x * T[0][1][l] + p0.y * T[1][1][l] + T[2][1][l]); + + double v20 = (p0.x * T[0][0][l] + p0.y * T[1][0][l] + T[2][0][l]) - + p1.x * (p0.x * T[0][2][l] + p0.y * T[1][2][l] + T[2][2][l]); + double v21 = (p0.x * T[0][1][l] + p0.y * T[1][1][l] + T[2][1][l]) - + p1.y * (p0.x * T[0][2][l] + p0.y * T[1][2][l] + T[2][2][l]); + + double v22 = (p0.x * T[0][2][l] + p0.y * T[1][2][l] + T[2][2][l]) - + (p0.x * T[0][2][l] + p0.y * T[1][2][l] + T[2][2][l]); + double v = 0; - if (fabs(v00) > fabs(v)) v = v00; - if (fabs(v01) > fabs(v)) v = v01; - if (fabs(v02) > fabs(v)) v = v02; - if (fabs(v10) > fabs(v)) v = v10; - if (fabs(v11) > fabs(v)) v = v11; - if (fabs(v12) > fabs(v)) v = v12; - if (fabs(v20) > fabs(v)) v = v20; - if (fabs(v21) > fabs(v)) v = v21; - if (fabs(v22) > fabs(v)) v = v22; + if (fabs(v00) > fabs(v)) + v = v00; + if (fabs(v01) > fabs(v)) + v = v01; + if (fabs(v02) > fabs(v)) + v = v02; + if (fabs(v10) > fabs(v)) + v = v10; + if (fabs(v11) > fabs(v)) + v = v11; + if (fabs(v12) > fabs(v)) + v = v12; + if (fabs(v20) > fabs(v)) + v = v20; + if (fabs(v21) > fabs(v)) + v = v21; + if (fabs(v22) > fabs(v)) + v = v22; return v; } -void TrifocalTensor::project(const CvPoint2D64f &p0, - const CvPoint2D64f &p1, - CvPoint2D64f &p2) { +void TrifocalTensor::project(const CvPoint2D64f& p0, const CvPoint2D64f& p1, + CvPoint2D64f& p2) +{ double z = projectAxis(p0, p1, 2); p2.x = projectAxis(p0, p1, 0) / z; p2.y = projectAxis(p0, p1, 1) / z; } -double TrifocalTensor::projectError(const CvPoint2D64f &p0, - const CvPoint2D64f &p1, - const CvPoint2D64f &p2) { +double TrifocalTensor::projectError(const CvPoint2D64f& p0, + const CvPoint2D64f& p1, + const CvPoint2D64f& p2) +{ double v0 = projectAxis(p0, p1, 0); double v1 = projectAxis(p0, p1, 1); double v2 = projectAxis(p0, p1, 2); - double e0 = v0/v2 - p2.x; - double e1 = v1/v2 - p2.y; - return e0*e0+e1*e1; + double e0 = v0 / v2 - p2.x; + double e1 = v1 / v2 - p2.y; + return e0 * e0 + e1 * e1; } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/UnscentedKalman.cpp b/ar_track_alvar/src/UnscentedKalman.cpp index 00f6e59..45b9d69 100644 --- a/ar_track_alvar/src/UnscentedKalman.cpp +++ b/ar_track_alvar/src/UnscentedKalman.cpp @@ -25,50 +25,71 @@ #include #include -namespace alvar { - -UnscentedKalman::UnscentedKalman(int state_n, int obs_n, int state_k, double alpha, double beta) { +namespace alvar +{ +UnscentedKalman::UnscentedKalman(int state_n, int obs_n, int state_k, + double alpha, double beta) +{ state_k = 0; - //TODO: support a separate noise vector/covariance matrix: state_k; + // TODO: support a separate noise vector/covariance matrix: state_k; this->state_k = state_k; this->state_n = state_n; this->obs_n = obs_n; sigma_n = 2 * state_n + 1; double L = state_n + state_k; - lambda = alpha*alpha * L - L; - lambda2 = 1 - alpha*alpha + beta; - - state = cvCreateMat(state_n, 1, CV_64F); cvSetZero(state); - stateCovariance = cvCreateMat(state_n, state_n, CV_64F); cvSetZero(stateCovariance); - sqrtStateCovariance = cvCreateMat(state_n, state_n, CV_64F); cvSetZero(sqrtStateCovariance); - stateD = cvCreateMat(state_n, state_n, CV_64F); cvSetZero(stateD); - stateU = cvCreateMat(state_n, state_n, CV_64F); cvSetZero(stateU); - stateV = cvCreateMat(state_n, state_n, CV_64F); cvSetZero(stateV); - stateTmp = cvCreateMat(state_n, state_n, CV_64F); cvSetZero(stateTmp); - stateDiff = cvCreateMat(state_n, 1, CV_64F); cvSetZero(stateDiff); - - predObs = cvCreateMat(obs_n, 1, CV_64F); cvSetZero(predObs); - predObsCovariance = cvCreateMat(obs_n, obs_n, CV_64F); cvSetZero(predObsCovariance); - predObsDiff = cvCreateMat(obs_n, 1, CV_64F); cvSetZero(predObsDiff); - - invPredObsCovariance = cvCreateMat(obs_n, obs_n, CV_64F); cvSetZero(invPredObsCovariance); - statePredObsCrossCorrelation = cvCreateMat(state_n, obs_n, CV_64F); cvSetZero(statePredObsCrossCorrelation); - kalmanGain = cvCreateMat(state_n, obs_n, CV_64F); cvSetZero(kalmanGain); - kalmanTmp = cvCreateMat(state_n, obs_n, CV_64F); cvSetZero(kalmanTmp); + lambda = alpha * alpha * L - L; + lambda2 = 1 - alpha * alpha + beta; + + state = cvCreateMat(state_n, 1, CV_64F); + cvSetZero(state); + stateCovariance = cvCreateMat(state_n, state_n, CV_64F); + cvSetZero(stateCovariance); + sqrtStateCovariance = cvCreateMat(state_n, state_n, CV_64F); + cvSetZero(sqrtStateCovariance); + stateD = cvCreateMat(state_n, state_n, CV_64F); + cvSetZero(stateD); + stateU = cvCreateMat(state_n, state_n, CV_64F); + cvSetZero(stateU); + stateV = cvCreateMat(state_n, state_n, CV_64F); + cvSetZero(stateV); + stateTmp = cvCreateMat(state_n, state_n, CV_64F); + cvSetZero(stateTmp); + stateDiff = cvCreateMat(state_n, 1, CV_64F); + cvSetZero(stateDiff); + + predObs = cvCreateMat(obs_n, 1, CV_64F); + cvSetZero(predObs); + predObsCovariance = cvCreateMat(obs_n, obs_n, CV_64F); + cvSetZero(predObsCovariance); + predObsDiff = cvCreateMat(obs_n, 1, CV_64F); + cvSetZero(predObsDiff); + + invPredObsCovariance = cvCreateMat(obs_n, obs_n, CV_64F); + cvSetZero(invPredObsCovariance); + statePredObsCrossCorrelation = cvCreateMat(state_n, obs_n, CV_64F); + cvSetZero(statePredObsCrossCorrelation); + kalmanGain = cvCreateMat(state_n, obs_n, CV_64F); + cvSetZero(kalmanGain); + kalmanTmp = cvCreateMat(state_n, obs_n, CV_64F); + cvSetZero(kalmanTmp); sigma_state = new CvMat*[sigma_n]; sigma_predObs = new CvMat*[sigma_n]; - for (int i = 0; i < sigma_n; i++) { - sigma_state[i] = cvCreateMat(state_n, 1, CV_64F); cvSetZero(sigma_state[i]); - sigma_predObs[i] = cvCreateMat(obs_n, 1, CV_64F); cvSetZero(sigma_predObs[i]); + for (int i = 0; i < sigma_n; i++) + { + sigma_state[i] = cvCreateMat(state_n, 1, CV_64F); + cvSetZero(sigma_state[i]); + sigma_predObs[i] = cvCreateMat(obs_n, 1, CV_64F); + cvSetZero(sigma_predObs[i]); } sigmasUpdated = false; } -UnscentedKalman::~UnscentedKalman() { +UnscentedKalman::~UnscentedKalman() +{ cvReleaseMat(&state); cvReleaseMat(&stateCovariance); cvReleaseMat(&sqrtStateCovariance); @@ -86,7 +107,8 @@ UnscentedKalman::~UnscentedKalman() { cvReleaseMat(&predObsCovariance); cvReleaseMat(&predObsDiff); - for (int i = 0; i < sigma_n; i++) { + for (int i = 0; i < sigma_n; i++) + { cvReleaseMat(&sigma_state[i]); cvReleaseMat(&sigma_predObs[i]); } @@ -94,58 +116,57 @@ UnscentedKalman::~UnscentedKalman() { delete[] sigma_predObs; } -void UnscentedKalman::initialize() { +void UnscentedKalman::initialize() +{ // Computes new sigma points from current state estimate. // 1. Compute square root of state co-variance: //[E D] = eig(A); sqrtm(A) = E * sqrt(D) * E' where D is a diagonal matrix. - //sqrt(D) is formed by taking the square root of the diagonal entries in D. - #ifdef MYDEBUG - printf("stateCovariance:\n"); - for (int i = 0; i < 5; i++) - printf("%+.10f %+.10f %+.10f %+.10f %+.10f\n", - cvGetReal2D(stateCovariance, 0, i), - cvGetReal2D(stateCovariance, 1, i), - cvGetReal2D(stateCovariance, 2, i), - cvGetReal2D(stateCovariance, 3, i), - cvGetReal2D(stateCovariance, 4, i)); + // sqrt(D) is formed by taking the square root of the diagonal entries in D. +#ifdef MYDEBUG + printf("stateCovariance:\n"); + for (int i = 0; i < 5; i++) + printf( + "%+.10f %+.10f %+.10f %+.10f %+.10f\n", + cvGetReal2D(stateCovariance, 0, i), cvGetReal2D(stateCovariance, 1, i), + cvGetReal2D(stateCovariance, 2, i), cvGetReal2D(stateCovariance, 3, i), + cvGetReal2D(stateCovariance, 4, i)); #endif - //Another equivilant way is to use: + // Another equivilant way is to use: // [U S V] = svd(A); sqrtm(A) = U * sqrt(S) * V' - cvSVD(stateCovariance, stateD, stateU, stateV); //, CV_SVD_V_T + cvSVD(stateCovariance, stateD, stateU, stateV); //, CV_SVD_V_T double L = state_n + state_k; double scale = L + lambda; - for (int i = 0; i < state_n; i++) { + for (int i = 0; i < state_n; i++) + { double d = cvGetReal2D(stateD, i, i); - cvSetReal2D(stateD, i, i, sqrt(scale*d)); + cvSetReal2D(stateD, i, i, sqrt(scale * d)); } cvGEMM(stateD, stateV, 1., NULL, 0, stateTmp, CV_GEMM_B_T); cvGEMM(stateU, stateTmp, 1., NULL, 0, sqrtStateCovariance); #ifdef MYDEBUG - printf("sqrtStateCovariance:\n"); - for (int i = 0; i < 5; i++) - printf("%+.10f %+.10f %+.10f %+.10f %+.10f\n", - cvGetReal2D(sqrtStateCovariance, 0, i), - cvGetReal2D(sqrtStateCovariance, 1, i), - cvGetReal2D(sqrtStateCovariance, 2, i), - cvGetReal2D(sqrtStateCovariance, 3, i), - cvGetReal2D(sqrtStateCovariance, 4, i)); - cvGEMM(sqrtStateCovariance, sqrtStateCovariance, 1., NULL, 0, stateTmp); - printf("sqrtStateCovariance^2:\n"); - for (int i = 0; i < 5; i++) - printf("%+.10f %+.10f %+.10f %+.10f %+.10f\n", - cvGetReal2D(stateTmp, 0, i), - cvGetReal2D(stateTmp, 1, i), - cvGetReal2D(stateTmp, 2, i), - cvGetReal2D(stateTmp, 3, i), - cvGetReal2D(stateTmp, 4, i)); + printf("sqrtStateCovariance:\n"); + for (int i = 0; i < 5; i++) + printf("%+.10f %+.10f %+.10f %+.10f %+.10f\n", + cvGetReal2D(sqrtStateCovariance, 0, i), + cvGetReal2D(sqrtStateCovariance, 1, i), + cvGetReal2D(sqrtStateCovariance, 2, i), + cvGetReal2D(sqrtStateCovariance, 3, i), + cvGetReal2D(sqrtStateCovariance, 4, i)); + cvGEMM(sqrtStateCovariance, sqrtStateCovariance, 1., NULL, 0, stateTmp); + printf("sqrtStateCovariance^2:\n"); + for (int i = 0; i < 5; i++) + printf("%+.10f %+.10f %+.10f %+.10f %+.10f\n", cvGetReal2D(stateTmp, 0, i), + cvGetReal2D(stateTmp, 1, i), cvGetReal2D(stateTmp, 2, i), + cvGetReal2D(stateTmp, 3, i), cvGetReal2D(stateTmp, 4, i)); #endif // 2. Form new sigma points. int sigma_i = 0; cvCopy(state, sigma_state[sigma_i++]); - for (int i = 0; i < state_n; i++) { + for (int i = 0; i < state_n; i++) + { CvMat col; cvGetCol(sqrtStateCovariance, &col, i); cvAdd(state, &col, sigma_state[sigma_i++]); @@ -155,59 +176,63 @@ void UnscentedKalman::initialize() { sigmasUpdated = true; } -void UnscentedKalman::predict(UnscentedProcess *process_model) { - if (!sigmasUpdated) initialize(); +void UnscentedKalman::predict(UnscentedProcess* process_model) +{ + if (!sigmasUpdated) + initialize(); // Map sigma points through the process model and compute new state mean. cvSetZero(state); double L = state_n + state_k; double totalWeight = 0; - for (int i = 0; i < sigma_n; i++) { - double weight = i == 0 - ? lambda / (L + lambda) - : .5 / (L + lambda); - totalWeight += weight; + for (int i = 0; i < sigma_n; i++) + { + double weight = i == 0 ? lambda / (L + lambda) : .5 / (L + lambda); + totalWeight += weight; } - for (int i = 0; i < sigma_n; i++) { - CvMat *sigma = sigma_state[i]; + for (int i = 0; i < sigma_n; i++) + { + CvMat* sigma = sigma_state[i]; process_model->f(sigma); - double weight = i == 0 - ? lambda / (L + lambda) - : .5 / (L + lambda); - double scale = weight / totalWeight; + double weight = i == 0 ? lambda / (L + lambda) : .5 / (L + lambda); + double scale = weight / totalWeight; cvAddWeighted(sigma, scale, state, 1., 0., state); } // Compute new state co-variance. cvSetZero(stateCovariance); totalWeight = 0; - for (int i = 0; i < sigma_n; i++) { - double weight = i == 0 - ? lambda / (L + lambda) + lambda2 - : .5 / (L + lambda); - totalWeight += weight; + for (int i = 0; i < sigma_n; i++) + { + double weight = + i == 0 ? lambda / (L + lambda) + lambda2 : .5 / (L + lambda); + totalWeight += weight; } - for (int i = 0; i < sigma_n; i++) { - double weight = i == 0 - ? lambda / (L + lambda) + lambda2 - : .5 / (L + lambda); - double scale = weight / totalWeight; + for (int i = 0; i < sigma_n; i++) + { + double weight = + i == 0 ? lambda / (L + lambda) + lambda2 : .5 / (L + lambda); + double scale = weight / totalWeight; cvSub(sigma_state[i], state, stateDiff); cvGEMM(stateDiff, stateDiff, scale, stateCovariance, 1., stateCovariance, - CV_GEMM_B_T); + CV_GEMM_B_T); } // Add any additive noise. - CvMat *noise = process_model->getProcessNoise(); - if (noise) cvAdd(stateCovariance, noise, stateCovariance); + CvMat* noise = process_model->getProcessNoise(); + if (noise) + cvAdd(stateCovariance, noise, stateCovariance); #ifdef MYDEBUG printf("predicted state: "); - for (int i = 0; i < state_n; i++) printf("%f ", cvGetReal1D(state, i)); + for (int i = 0; i < state_n; i++) + printf("%f ", cvGetReal1D(state, i)); printf("\n"); printf("predicted stateCovariance:\n"); - for (int i = 0; i < state_n; i++) { - for (int j = 0; j < state_n; j++) printf("%+f ", cvGetReal2D(stateCovariance, i, j)); + for (int i = 0; i < state_n; i++) + { + for (int j = 0; j < state_n; j++) + printf("%+f ", cvGetReal2D(stateCovariance, i, j)); printf("\n"); } #endif @@ -215,11 +240,14 @@ void UnscentedKalman::predict(UnscentedProcess *process_model) { sigmasUpdated = false; } -void UnscentedKalman::update(UnscentedObservation *obs) { - if (!sigmasUpdated) initialize(); - CvMat *innovation = obs->getObservation(); +void UnscentedKalman::update(UnscentedObservation* obs) +{ + if (!sigmasUpdated) + initialize(); + CvMat* innovation = obs->getObservation(); int obs_n = innovation->rows; - if (obs_n > this->obs_n) { + if (obs_n > this->obs_n) + { printf("Observation exceeds maximum size!\n"); abort(); } @@ -227,55 +255,62 @@ void UnscentedKalman::update(UnscentedObservation *obs) { // Map sigma points through the observation model and compute predicted mean. CvMat predObs = cvMat(obs_n, 1, CV_64F, this->predObs->data.db); cvSetZero(&predObs); - for (int i = 0; i < sigma_n; i++) { + for (int i = 0; i < sigma_n; i++) + { CvMat sigma_h = cvMat(obs_n, 1, CV_64F, sigma_predObs[i]->data.db); - double scale = i == 0 - ? (double)state_k / (double)(state_n + state_k) - : .5 / (double)(state_n + state_k); + double scale = i == 0 ? (double)state_k / (double)(state_n + state_k) : + .5 / (double)(state_n + state_k); obs->h(&sigma_h, sigma_state[i]); cvAddWeighted(&sigma_h, scale, &predObs, 1., 0., &predObs); } // Compute predicted observation co-variance. - CvMat predObsCovariance = cvMat(obs_n, obs_n, CV_64F, - this->predObsCovariance->data.db); - CvMat statePredObsCrossCorrelation = cvMat(state_n, obs_n, CV_64F, - this->statePredObsCrossCorrelation->data.db); + CvMat predObsCovariance = + cvMat(obs_n, obs_n, CV_64F, this->predObsCovariance->data.db); + CvMat statePredObsCrossCorrelation = cvMat( + state_n, obs_n, CV_64F, this->statePredObsCrossCorrelation->data.db); CvMat predObsDiff = cvMat(obs_n, 1, CV_64F, this->predObsDiff->data.db); cvSetZero(&predObsCovariance); cvSetZero(&statePredObsCrossCorrelation); - for (int i = 0; i < sigma_n; i++) { + for (int i = 0; i < sigma_n; i++) + { CvMat sigma_h = cvMat(obs_n, 1, CV_64F, sigma_predObs[i]->data.db); - double scale = i == 0 - ? (double)state_k / (double)(state_n + state_k) - : .5 / (double)(state_n + state_k); + double scale = i == 0 ? (double)state_k / (double)(state_n + state_k) : + .5 / (double)(state_n + state_k); cvSub(sigma_state[i], state, stateDiff); cvSub(&sigma_h, &predObs, &predObsDiff); - cvGEMM(&predObsDiff, &predObsDiff, scale, &predObsCovariance, 1., &predObsCovariance, - CV_GEMM_B_T); - cvGEMM(stateDiff, &predObsDiff, scale, &statePredObsCrossCorrelation, 1., - &statePredObsCrossCorrelation, CV_GEMM_B_T); + cvGEMM(&predObsDiff, &predObsDiff, scale, &predObsCovariance, 1., + &predObsCovariance, CV_GEMM_B_T); + cvGEMM(stateDiff, &predObsDiff, scale, &statePredObsCrossCorrelation, 1., + &statePredObsCrossCorrelation, CV_GEMM_B_T); } // Add any additive noise. - CvMat *noise = obs->getObservationNoise(); - if (noise) cvAdd(&predObsCovariance, noise, &predObsCovariance); + CvMat* noise = obs->getObservationNoise(); + if (noise) + cvAdd(&predObsCovariance, noise, &predObsCovariance); #ifdef MYDEBUG printf("real observation: "); - for (int i = 0; i < obs_n; i++) printf("%+f ", cvGetReal1D(innovation ,i)); + for (int i = 0; i < obs_n; i++) + printf("%+f ", cvGetReal1D(innovation, i)); printf("\n"); printf("predicted observation: "); - for (int i = 0; i < obs_n; i++) printf("%+f ", cvGetReal1D(&predObs,i)); + for (int i = 0; i < obs_n; i++) + printf("%+f ", cvGetReal1D(&predObs, i)); printf("\n"); printf("predicted observation co-variance\n"); - for (int i = 0; i < obs_n; i++) { - for (int j = 0; j < obs_n; j++) printf("%+f ", cvGetReal2D(&predObsCovariance,i,j)); + for (int i = 0; i < obs_n; i++) + { + for (int j = 0; j < obs_n; j++) + printf("%+f ", cvGetReal2D(&predObsCovariance, i, j)); printf("\n"); } printf("state observation cross-correlation\n"); - for (int i = 0; i < state_n; i++) { - for (int j = 0; j < obs_n; j++) printf("%+f ", cvGetReal2D(&statePredObsCrossCorrelation,i,j)); + for (int i = 0; i < state_n; i++) + { + for (int j = 0; j < obs_n; j++) + printf("%+f ", cvGetReal2D(&statePredObsCrossCorrelation, i, j)); printf("\n"); } #endif @@ -286,20 +321,21 @@ void UnscentedKalman::update(UnscentedObservation *obs) { // state: x = x + _W * v // co-var: P = P - W * (R + Z) * W^T - CvMat invPredObsCovariance = cvMat(obs_n, obs_n, CV_64F, - this->invPredObsCovariance->data.db); + CvMat invPredObsCovariance = + cvMat(obs_n, obs_n, CV_64F, this->invPredObsCovariance->data.db); CvMat kalmanGain = cvMat(state_n, obs_n, CV_64F, this->kalmanGain->data.db); CvMat kalmanTmp = cvMat(state_n, obs_n, CV_64F, this->kalmanTmp->data.db); cvSub(innovation, &predObs, innovation); - //double inno_norm = cvNorm(innovation) / obs_n; - //if (inno_norm > 5.0) { + // double inno_norm = cvNorm(innovation) / obs_n; + // if (inno_norm > 5.0) { // return; //} #ifdef MYDEBUG printf("innovation: "); - for (int i = 0; i < obs_n; i++) printf("%f ", cvGetReal1D(innovation,i)); + for (int i = 0; i < obs_n; i++) + printf("%f ", cvGetReal1D(innovation, i)); printf("\n"); double inn_norm = cvNorm(innovation); printf("innivation norm: %f\n", inn_norm); @@ -310,14 +346,15 @@ void UnscentedKalman::update(UnscentedObservation *obs) { cvGEMM(&kalmanGain, innovation, 1., state, 1., state); cvMatMul(&kalmanGain, &predObsCovariance, &kalmanTmp); cvGEMM(&kalmanTmp, &kalmanGain, -1., stateCovariance, 1., stateCovariance, - CV_GEMM_B_T); + CV_GEMM_B_T); #ifdef MYDEBUG printf("estimated state: "); - for (int i = 0; i < state_n; i++) printf("%f ", cvGetReal1D(state, i)); + for (int i = 0; i < state_n; i++) + printf("%f ", cvGetReal1D(state, i)); printf("\n"); #endif sigmasUpdated = false; } -} // namespace alvar +} // namespace alvar diff --git a/ar_track_alvar/src/Util.cpp b/ar_track_alvar/src/Util.cpp index 5a7e6f7..8398a88 100644 --- a/ar_track_alvar/src/Util.cpp +++ b/ar_track_alvar/src/Util.cpp @@ -23,489 +23,625 @@ #include "ar_track_alvar/Util.h" #include "ar_track_alvar/FileFormatUtils.h" +#include using namespace std; -namespace alvar { +namespace alvar +{ using namespace std; -//ttesis +// ttesis + +int dot(const cv::Point& A, const cv::Point& B, const cv::Point& C) +{ + cv::Point AB, BC; + AB.x = B.x - A.x; + AB.y = B.y - A.y; + BC.x = C.x - B.x; + BC.y = C.y - B.y; + int dot = AB.x * BC.x + AB.y * BC.y; + return dot; +} + +int cross(const cv::Point& A, const cv::Point& B, const cv::Point& C) +{ + cv::Point AB, AC; + AB.x = B.x - A.x; + AB.y = B.y - A.y; + AC.x = C.x - A.x; + AC.y = C.y - A.y; + int cross = AB.x * AC.y - AB.y * AC.x; + return cross; +} + +double distance(const cv::Point& A, const cv::Point& B) +{ + double d1 = A.x - B.x; + double d2 = A.y - B.y; + return sqrt(d1 * d1 + d2 * d2); +} + +double linePointDist(const cv::Point& A, const cv::Point& B, const cv::Point& C, + bool isSegment) +{ + double dist = cross(A, B, C) / distance(A, B); + if (isSegment) + { + int dot1 = dot(A, B, C); + if (dot1 > 0) + return distance(B, C); + int dot2 = dot(B, A, C); + if (dot2 > 0) + return distance(A, C); + } + return abs(dist); +} + +double angle(const cv::Point& A, const cv::Point& B, const cv::Point& C, + const cv::Point& D, int isDirectionDependent) +{ + double angle; + double a = B.x - A.x; + double b = B.y - A.y; + double c = D.x - C.x; + double d = D.y - C.y; + angle = + acos(((a * c) + (b * d)) / (sqrt(a * a + b * b) * sqrt(c * c + d * d))); + if (isDirectionDependent) + { + return angle; + } + else + { + if (angle > CV_PI / 2) + { + return CV_PI - angle; + } + else + return angle; + } +} + +double polyLinePointDist(const std::vector& points, + const cv::Point& C, int* index, int isClosedPolygon) +{ + // Calculate minimum distance of Point C to Polygon whose points are in list + // PointList if isClosedPolygon is true polygon is closed (segnment of the + // first and last point is also checked) index is the index of point A in + // pointlist, where A is the starting point of the closest polygon segment + *index = -1; + double mindist = -1; + double dist; + for (int i = 0; i < points.size() - 1; i++) + { + dist = linePointDist(points[i], points[i + 1], C, 1); + if (mindist == -1 || dist < mindist) + { + mindist = dist; + *index = i; + } + } + if (isClosedPolygon) + { + dist = linePointDist(points[points.size() - 1], points[0], C, 1); + if (dist < mindist) + { + mindist = dist; + *index = points.size() - 1; + } + } + return mindist; +} + +// ttesis + +void FitCVEllipse(const vector& points, + cv::RotatedRect& ellipse_box) +{ + if (points.size() < 8) + return; + + cv::Mat vector = cv::Mat(1, int(points.size()), CV_64FC2); + for (size_t i = 0; i < points.size(); ++i) + { + vector.at(0, i) = cv::Vec2d(points[i].x, points[i].y); + } + ellipse_box = cv::fitEllipse(vector); + vector.release(); +} + +int exp_filt2(vector& v, vector& ret, bool clamp) +{ + double y; + int n = (int)v.size(); + + double a = pow(0.01, 8.0 / n); // 0.8; + double k = -log(a); -int dot(CvPoint *A, CvPoint *B, CvPoint *C){ - CvPoint AB, BC; - AB.x = B->x-A->x; - AB.y = B->y-A->y; - BC.x = C->x-B->x; - BC.y = C->y-B->y; - int dot = AB.x * BC.x + AB.y * BC.y; - return dot; + // Forward + vector yp(n); + + y = 0; + for (int i = 0; i < n; ++i) + y = a * y + v[i]; + + y *= 1.0 / (1.0 - pow(a, n)); + + for (int i = 0; i < n; ++i) + { + y = a * y + v[i]; + yp[i] = y; + } + + // Backward + vector ym(n); + + y = 0; + for (int i = n - 1; i >= 0; --i) + y = a * y + v[i]; + + y *= 1.0 / (1.0 - pow(a, n)); + + for (int i = n - 1; i >= 0; --i) + { + y = a * y + v[i]; + ym[i] = y; + } + + // Filter + ret.resize(n); + for (int i = 0; i < n; ++i) + { + ret[i] = (k / 2.0) * (yp[i] + ym[i] - v[i]); + } + + return int(ret.size()); +} + +int find_zero_crossings(const vector& v, vector& corners, int offs) +{ + int ind = 0; + int len = (int)v.size(); + + int state; + if (Sign(v.at(0)) == 1) + state = 1; + else + state = 2; + + corners.clear(); + for (int i = 0; i < len + offs; ++i) + { + if (i < len) + ind = i; + else + ind = i - len; + + int s = Sign(v.at(ind)); + if (state == 1 && s == -1) + state = 2; + if (state == 2 && s == 1) + { + state = 1; + bool test = true; + for (unsigned j = 0; j < corners.size(); ++j) + if (corners.at(j) == ind) + test = false; + + if (test) + corners.push_back(ind); + } + } + + return int(corners.size()); } -int cross(CvPoint *A,CvPoint *B, CvPoint *C){ - CvPoint AB, AC; - AB.x = B->x-A->x; - AB.y = B->y-A->y; - AC.x = C->x-A->x; - AC.y = C->y-A->y; - int cross = AB.x * AC.y - AB.y * AC.x; - return cross; +void out_matrix(const cv::Mat& m, const char* name) +{ + if (m.cols == 1) + { + std::cout << name << " = ["; + for (int j = 0; j < m.rows; j++) + { + std::cout << " " << m.at(j, 0); + } + std::cout << "]^T" << std::endl; + } + else if (m.rows == 1) + { + std::cout << name << " = ["; + for (int i = 0; i < m.cols; i++) + { + std::cout << " " << m.at(0, i); + } + std::cout << "]^T" << std::endl; + } + else + { + std::cout << name << " = [" << std::endl; + for (int j = 0; j < m.rows; j++) + { + for (int i = 0; i < m.cols; i++) + { + std::cout << " " << m.at(j, i); + } + std::cout << std::endl; + } + std::cout << "]" << std::endl; + } +} + +double Limit(double val, double min_val, double max_val) +{ + return max(min_val, min(max_val, val)); } -double distance(CvPoint *A,CvPoint *B){ - double d1 = A->x - B->x; - double d2 = A->y - B->y; - return sqrt(d1*d1+d2*d2); +Index::Index(int a) +{ + val.push_back(a); } +Index::Index(int a, int b) +{ + val.push_back(a); + val.push_back(b); +} -double linePointDist(CvPoint *A,CvPoint *B,CvPoint *C, bool isSegment){ - double dist = cross(A,B,C) / distance(A,B); - if(isSegment){ - int dot1 = dot(A,B,C); - if(dot1 > 0)return distance(B,C); - int dot2 = dot(B,A,C); - if(dot2 > 0)return distance(A,C); +Index::Index(int a, int b, int c) +{ + val.push_back(a); + val.push_back(b); + val.push_back(c); +} + +bool Index::operator<(const Index& index) const +{ + int comp = 0; + size_t d = 0; + // Go through the dimensions (last being the most significant) + while ((d < val.size()) || (d < index.val.size())) + { + int v0 = (d < val.size() ? val[d] : 0); + int v1 = (d < index.val.size() ? index.val[d] : 0); + if (v0 < v1) + comp = -1; + else if (v1 < v0) + comp = 1; + d++; + } + if (comp == -1) + return true; + return false; +} + +int Histogram::DimIndex(int dim, double val) +{ + int binsize = (dim < int(dim_binsize.size()) ? dim_binsize[dim] : 1); + if (val >= 0) + return int(val + (binsize / 2)) / binsize; + return int(val - (binsize / 2)) / binsize; +} +double Histogram::DimVal(int dim, int index) +{ + int binsize = (dim < int(dim_binsize.size()) ? dim_binsize[dim] : 1); + return (index * binsize); +} +void Histogram::AddDimension(int binsize) +{ + dim_binsize.push_back(binsize); +} +void Histogram::Clear() +{ + bins.clear(); +} +void Histogram::Inc(double dim0, double dim1, double dim2) +{ + Index index(DimIndex(0, dim0), DimIndex(1, dim1), DimIndex(2, dim2)); + if (bins.find(index) != bins.end()) + { + bins[index]++; + } + else + { + bins[index] = 1; + } +} +int Histogram::GetMax(double* dim0, double* dim1, double* dim2) +{ + map::const_iterator iter, max_iter; + int max = 0; + for (max_iter = iter = bins.begin(); iter != bins.end(); iter++) + { + if (iter->second > max) + { + max = iter->second; + max_iter = iter; + } + } + if (max > 0) + { + *dim0 = DimVal(0, max_iter->first.val[0]); + if (dim1) + *dim1 = DimVal(1, max_iter->first.val[1]); + if (dim2) + *dim2 = DimVal(2, max_iter->first.val[2]); + } + return max; +} +void HistogramSubpixel::Clear() +{ + bins.clear(); + acc_dim0.clear(); + acc_dim1.clear(); + acc_dim2.clear(); +} +void HistogramSubpixel::Inc(double dim0, double dim1, double dim2) +{ + Index index(DimIndex(0, dim0), DimIndex(1, dim1), DimIndex(2, dim2)); + if (bins.find(index) != bins.end()) + { + bins[index]++; + acc_dim0[index] += dim0; + acc_dim1[index] += dim1; + acc_dim2[index] += dim2; + } + else + { + bins[index] = 1; + acc_dim0[index] = dim0; + acc_dim1[index] = dim1; + acc_dim2[index] = dim2; + } +} +int HistogramSubpixel::GetMax(double* dim0, double* dim1, double* dim2) +{ + map::const_iterator iter; + int max = 0; + int divider = 0; + for (iter = bins.begin(); iter != bins.end(); iter++) + { + if (iter->second > max) + { + divider = max = iter->second; + *dim0 = acc_dim0[iter->first]; + if (dim1) + *dim1 = acc_dim1[iter->first]; + if (dim2) + *dim2 = acc_dim2[iter->first]; + } + else if (iter->second == max) + { + divider += iter->second; + *dim0 += acc_dim0[iter->first]; + if (dim1) + *dim1 += acc_dim1[iter->first]; + if (dim2) + *dim2 += acc_dim2[iter->first]; } - return abs(dist); -} - -double angle(CvPoint *A,CvPoint *B, CvPoint *C,CvPoint *D, int isDirectionDependent){ - double angle; - double a = B->x - A->x; - double b = B->y - A->y; - double c = D->x - C->x; - double d = D->y - C->y; - angle = acos( ((a * c) + (b * d) )/(sqrt(a*a + b*b) * sqrt(c*c + d*d))); - if(isDirectionDependent){ - return angle; - }else{ - if (angle > CV_PI/2){ - return CV_PI - angle; - }else - return angle; - } -} - -double polyLinePointDist(CvPoint *PointList, int nPnts,CvPoint *C, int *index, int isClosedPolygon){ -// Calculate minimum distance of Point C to Polygon whose points are in list PointList -// if isClosedPolygon is true polygon is closed (segnment of the first and last point is also checked) -// index is the index of point A in pointlist, -// where A is the starting point of the closest polygon segment - *index = -1; - double mindist= -1; - double dist; - for( int i=0; i < nPnts-1; i++){ - dist=linePointDist(&PointList[i],&PointList[i+1],C,1); - if (mindist == -1 || dist &points, CvBox2D& ellipse_box) -{ - if(points.size() < 8) return; - - CvMat* vector = cvCreateMat(1, int(points.size()), CV_64FC2); - for(size_t i = 0; i < points.size(); ++i) - { - CV_MAT_ELEM(*vector, CvPoint2D64f, 0, i) = (CvPoint2D64f)points[i]; - } - ellipse_box = cvFitEllipse2(vector); - cvReleaseMat(&vector); -} - -int exp_filt2(vector &v, vector &ret, bool clamp) -{ - double y; - int n = (int)v.size(); - - double a = pow(0.01, 8.0/n);//0.8; - double k = -log(a); - - // Forward - vector yp(n); - - y = 0; - for(int i = 0; i < n; ++i) - y = a * y + v[i]; - - y *= 1.0 / (1.0-pow(a,n)); - - for(int i = 0; i < n; ++i) - { - y = a * y + v[i]; - yp[i] = y; - } - - // Backward - vector ym(n); - - y = 0; - for(int i = n-1; i >= 0; --i) - y = a * y + v[i]; - - y *= 1.0 / (1.0-pow(a,n)); - - for(int i = n-1; i >= 0; --i) - { - y = a * y + v[i]; - ym[i] = y; - } - - // Filter - ret.resize(n); - for(int i = 0; i < n; ++i) - { - ret[i] = (k/2.0) * (yp[i] + ym[i] - v[i]); - } - - return int(ret.size()); -} - -int find_zero_crossings(const vector& v, vector &corners, int offs) -{ - int ind = 0; - int len = (int)v.size(); - - int state; - if(Sign(v.at(0)) == 1) - state = 1; - else - state = 2; - - corners.clear(); - for(int i = 0; i < len+offs; ++i) - { - if(icols == 1) { - std::cout<rows; j++) { - std::cout<<" "<rows == 1) { - std::cout<cols; i++) { - std::cout<<" "<rows; j++) { - for (int i=0; icols; i++) { - std::cout<<" "<= 0) return int(val+(binsize/2))/binsize; - return int(val-(binsize/2))/binsize; -} -double Histogram::DimVal(int dim, int index) { - int binsize = (dim < int(dim_binsize.size()) ? dim_binsize[dim] : 1); - return (index * binsize); -} -void Histogram::AddDimension(int binsize) { - dim_binsize.push_back(binsize); -} -void Histogram::Clear() { - bins.clear(); -} -void Histogram::Inc(double dim0, double dim1, double dim2) { - Index index(DimIndex(0,dim0), DimIndex(1,dim1), DimIndex(2,dim2)); - if (bins.find(index) != bins.end()) { - bins[index]++; - } else { - bins[index] = 1; - } -} -int Histogram::GetMax(double *dim0, double *dim1, double *dim2) { - map::const_iterator iter, max_iter; - int max=0; - for (max_iter = iter = bins.begin(); iter != bins.end(); iter++) { - if (iter->second > max) { - max = iter->second; - max_iter = iter; - } - } - if (max > 0) { - *dim0 = DimVal(0, max_iter->first.val[0]); - if (dim1) *dim1 = DimVal(1, max_iter->first.val[1]); - if (dim2) *dim2 = DimVal(2, max_iter->first.val[2]); - } - return max; -} -void HistogramSubpixel::Clear() { - bins.clear(); - acc_dim0.clear(); - acc_dim1.clear(); - acc_dim2.clear(); -} -void HistogramSubpixel::Inc(double dim0, double dim1, double dim2) { - Index index(DimIndex(0,dim0), DimIndex(1,dim1), DimIndex(2,dim2)); - if (bins.find(index) != bins.end()) { - bins[index]++; - acc_dim0[index] += dim0; - acc_dim1[index] += dim1; - acc_dim2[index] += dim2; - } else { - bins[index] = 1; - acc_dim0[index] = dim0; - acc_dim1[index] = dim1; - acc_dim2[index] = dim2; - } -} -int HistogramSubpixel::GetMax(double *dim0, double *dim1, double *dim2) { - map::const_iterator iter; - int max=0; - int divider=0; - for (iter = bins.begin(); iter != bins.end(); iter++) { - if (iter->second > max) { - divider = max = iter->second; - *dim0 = acc_dim0[iter->first]; - if (dim1) *dim1 = acc_dim1[iter->first]; - if (dim2) *dim2 = acc_dim2[iter->first]; - } else if (iter->second == max) { - divider += iter->second; - *dim0 += acc_dim0[iter->first]; - if (dim1) *dim1 += acc_dim1[iter->first]; - if (dim2) *dim2 += acc_dim2[iter->first]; - } - } - if (max > 0) { - *dim0 /= divider; - if (dim1) *dim1 /= divider; - if (dim2) *dim2 /= divider; - } - return max; -} - -struct SerializationFormatterXml { - TiXmlDocument document; - TiXmlElement *xml_current; - SerializationFormatterXml() : xml_current(0) {} + } + if (max > 0) + { + *dim0 /= divider; + if (dim1) + *dim1 /= divider; + if (dim2) + *dim2 /= divider; + } + return max; +} + +struct SerializationFormatterXml +{ + TiXmlDocument document; + TiXmlElement* xml_current; + SerializationFormatterXml() : xml_current(0) + { + } }; -bool Serialization::Output() { - SerializationFormatterXml *xml = (SerializationFormatterXml *)formatter_handle; - if (filename.size() > 0) { - //xml->document.LinkEndChild(new TiXmlDeclaration("1.0", "UTF-8", "no")); - xml->document.InsertBeforeChild (xml->document.RootElement(), TiXmlDeclaration("1.0", "UTF-8", "no")); - xml->document.SaveFile(filename.c_str()); - } else { - const TiXmlNode *node = (xml->xml_current ? xml->xml_current : xml->document.RootElement()); - std::basic_ostream *os = dynamic_cast *>(stream); - (*os)<<(*node); - //(*stream)<<(*node); - } - return true; -} - -bool Serialization::Input() { - SerializationFormatterXml *xml = (SerializationFormatterXml *)formatter_handle; - if (filename.size() > 0) { - xml->document.LoadFile(filename.c_str()); - } else { - // TODO: How this should be handled with nested classes? - TiXmlNode *node = (xml->xml_current ? xml->xml_current : xml->document.RootElement()); - if (node == 0) { - node = (TiXmlElement*)xml->document.LinkEndChild(new TiXmlElement("root")); - } - std::basic_istream *is = dynamic_cast *>(stream); - (*is)>>(*node); - //(*stream)>>(*node); - } - return true; -} - -bool Serialization::Descend(const char *id) { - SerializationFormatterXml *xml = (SerializationFormatterXml *)formatter_handle; - if (input) { - if (xml->xml_current == 0) { - xml->xml_current = xml->document.RootElement(); - if (!xml->xml_current || (strcmp(xml->xml_current->Value(), id) != 0)) { - return false; - } - } else { - xml->xml_current = (TiXmlElement*)xml->xml_current->FirstChild (id); - if (xml->xml_current == NULL) return false; - } - } else { - if (xml->xml_current == 0) { - xml->xml_current = (TiXmlElement*)xml->document.LinkEndChild(new TiXmlElement(id)); - } else { - xml->xml_current = (TiXmlElement*)xml->xml_current->LinkEndChild(new TiXmlElement(id)); - } - } - return true; -} -bool Serialization::Ascend() { - SerializationFormatterXml *xml = (SerializationFormatterXml *)formatter_handle; - xml->xml_current = (TiXmlElement*)xml->xml_current->Parent(); - return true; -} - -Serialization::Serialization(std::string _filename) { - SerializationFormatterXml *xml = new SerializationFormatterXml(); - formatter_handle = xml; - filename = _filename; - input = false; // by default output -} - -Serialization::Serialization(std::basic_iostream &_stream) -{ - SerializationFormatterXml *xml = new SerializationFormatterXml(); - formatter_handle = xml; - stream = &_stream; -} - -Serialization::Serialization(std::basic_istream &_stream) { - SerializationFormatterXml *xml = new SerializationFormatterXml(); - formatter_handle = xml; - stream = &_stream; -} - -Serialization::Serialization(std::basic_ostream &_stream) { - SerializationFormatterXml *xml = new SerializationFormatterXml(); - formatter_handle = xml; - stream = &_stream; -} - -Serialization::~Serialization() { - SerializationFormatterXml *xml = (SerializationFormatterXml *)formatter_handle; - delete xml; -} - -bool Serialization::Serialize(int &data, const std::string &name) { - SerializationFormatterXml *xml = (SerializationFormatterXml *)formatter_handle; - if (!xml || !xml->xml_current) return false; - bool ret = true; - if (input) ret = (xml->xml_current->QueryIntAttribute(name, &data) == TIXML_SUCCESS); - else xml->xml_current->SetAttribute(name, data); - return ret; -} - -bool Serialization::Serialize(unsigned short &data, const std::string &name) { - int i = data; - bool ret = Serialize(i, name); - data = i; - return ret; -} - -bool Serialization::Serialize(unsigned long &data, const std::string &name) { - // TODO: Possible overflow here... - int i = data; - bool ret = Serialize(i, name); - data = i; - return ret; -} - -bool Serialization::Serialize(double &data, const std::string &name) { - SerializationFormatterXml *xml = (SerializationFormatterXml *)formatter_handle; - bool ret = true; - if (input) ret = (xml->xml_current->QueryDoubleAttribute(name, &data) == TIXML_SUCCESS); - else xml->xml_current->SetDoubleAttribute(name.c_str(), data); - return ret; -} - -bool Serialization::Serialize(std::string &data, const std::string &name) { - SerializationFormatterXml *xml = (SerializationFormatterXml *)formatter_handle; - bool ret = true; - if (input) { - const char *tmp = xml->xml_current->Attribute(name.c_str()); - if (tmp == NULL) ret=false; - else data = tmp; - } - else xml->xml_current->SetAttribute(name.c_str(), data.c_str()); - return ret; -} - -bool Serialization::Serialize(CvMat &data, const std::string &name) { - SerializationFormatterXml *xml = (SerializationFormatterXml *)formatter_handle; - bool ret = true; - if (input) { - TiXmlElement *xml_matrix = (TiXmlElement*)xml->xml_current->FirstChild(name); - if (xml_matrix == NULL) return false; - if (!FileFormatUtils::parseXMLMatrix(xml_matrix, &data)) return false; - } - else { - xml->xml_current->LinkEndChild(FileFormatUtils::createXMLMatrix(name.c_str(), &data)); - } - return ret; +bool Serialization::Output() +{ + SerializationFormatterXml* xml = (SerializationFormatterXml*)formatter_handle; + if (filename.size() > 0) + { + // xml->document.LinkEndChild(new TiXmlDeclaration("1.0", "UTF-8", "no")); + xml->document.InsertBeforeChild(xml->document.RootElement(), + TiXmlDeclaration("1.0", "UTF-8", "no")); + xml->document.SaveFile(filename.c_str()); + } + else + { + const TiXmlNode* node = + (xml->xml_current ? xml->xml_current : xml->document.RootElement()); + std::basic_ostream* os = + dynamic_cast*>(stream); + (*os) << (*node); + //(*stream)<<(*node); + } + return true; +} + +bool Serialization::Input() +{ + SerializationFormatterXml* xml = (SerializationFormatterXml*)formatter_handle; + if (filename.size() > 0) + { + xml->document.LoadFile(filename.c_str()); + } + else + { + // TODO: How this should be handled with nested classes? + TiXmlNode* node = + (xml->xml_current ? xml->xml_current : xml->document.RootElement()); + if (node == 0) + { + node = (TiXmlElement*)xml->document.LinkEndChild(new TiXmlElement("roo" + "t")); + } + std::basic_istream* is = + dynamic_cast*>(stream); + (*is) >> (*node); + //(*stream)>>(*node); + } + return true; +} + +bool Serialization::Descend(const char* id) +{ + SerializationFormatterXml* xml = (SerializationFormatterXml*)formatter_handle; + if (input) + { + if (xml->xml_current == 0) + { + xml->xml_current = xml->document.RootElement(); + if (!xml->xml_current || (strcmp(xml->xml_current->Value(), id) != 0)) + { + return false; + } + } + else + { + xml->xml_current = (TiXmlElement*)xml->xml_current->FirstChild(id); + if (xml->xml_current == NULL) + return false; + } + } + else + { + if (xml->xml_current == 0) + { + xml->xml_current = + (TiXmlElement*)xml->document.LinkEndChild(new TiXmlElement(id)); + } + else + { + xml->xml_current = + (TiXmlElement*)xml->xml_current->LinkEndChild(new TiXmlElement(id)); + } + } + return true; +} +bool Serialization::Ascend() +{ + SerializationFormatterXml* xml = (SerializationFormatterXml*)formatter_handle; + xml->xml_current = (TiXmlElement*)xml->xml_current->Parent(); + return true; +} + +Serialization::Serialization(std::string _filename) +{ + SerializationFormatterXml* xml = new SerializationFormatterXml(); + formatter_handle = xml; + filename = _filename; + input = false; // by default output +} + +Serialization::Serialization(std::basic_iostream& _stream) +{ + SerializationFormatterXml* xml = new SerializationFormatterXml(); + formatter_handle = xml; + stream = &_stream; +} + +Serialization::Serialization(std::basic_istream& _stream) +{ + SerializationFormatterXml* xml = new SerializationFormatterXml(); + formatter_handle = xml; + stream = &_stream; +} + +Serialization::Serialization(std::basic_ostream& _stream) +{ + SerializationFormatterXml* xml = new SerializationFormatterXml(); + formatter_handle = xml; + stream = &_stream; +} + +Serialization::~Serialization() +{ + SerializationFormatterXml* xml = (SerializationFormatterXml*)formatter_handle; + delete xml; +} + +bool Serialization::Serialize(int& data, const std::string& name) +{ + SerializationFormatterXml* xml = (SerializationFormatterXml*)formatter_handle; + if (!xml || !xml->xml_current) + return false; + bool ret = true; + if (input) + ret = (xml->xml_current->QueryIntAttribute(name, &data) == TIXML_SUCCESS); + else + xml->xml_current->SetAttribute(name, data); + return ret; +} + +bool Serialization::Serialize(unsigned short& data, const std::string& name) +{ + int i = data; + bool ret = Serialize(i, name); + data = i; + return ret; +} + +bool Serialization::Serialize(unsigned long& data, const std::string& name) +{ + // TODO: Possible overflow here... + int i = data; + bool ret = Serialize(i, name); + data = i; + return ret; } -} // namespace alvar +bool Serialization::Serialize(double& data, const std::string& name) +{ + SerializationFormatterXml* xml = (SerializationFormatterXml*)formatter_handle; + bool ret = true; + if (input) + ret = + (xml->xml_current->QueryDoubleAttribute(name, &data) == TIXML_SUCCESS); + else + xml->xml_current->SetDoubleAttribute(name.c_str(), data); + return ret; +} + +bool Serialization::Serialize(std::string& data, const std::string& name) +{ + SerializationFormatterXml* xml = (SerializationFormatterXml*)formatter_handle; + bool ret = true; + if (input) + { + const char* tmp = xml->xml_current->Attribute(name.c_str()); + if (tmp == NULL) + ret = false; + else + data = tmp; + } + else + xml->xml_current->SetAttribute(name.c_str(), data.c_str()); + return ret; +} + +bool Serialization::Serialize(cv::Mat& data, const std::string& name) +{ + SerializationFormatterXml* xml = (SerializationFormatterXml*)formatter_handle; + bool ret = true; + if (input) + { + TiXmlElement* xml_matrix = + (TiXmlElement*)xml->xml_current->FirstChild(name); + if (xml_matrix == NULL) + return false; + if (!FileFormatUtils::parseXMLMatrix(xml_matrix, data)) + return false; + } + else + { + xml->xml_current->LinkEndChild( + FileFormatUtils::createXMLMatrix(name.c_str(), data)); + } + return ret; +} + +} // namespace alvar diff --git a/ar_track_alvar/src/kinect_filtering.cpp b/ar_track_alvar/src/kinect_filtering.cpp index ec4773c..1bc1fd3 100644 --- a/ar_track_alvar/src/kinect_filtering.cpp +++ b/ar_track_alvar/src/kinect_filtering.cpp @@ -29,239 +29,239 @@ * */ - - //#include #include #include -#include -#include +// #include +#include +#include namespace ar_track_alvar { +namespace gm = geometry_msgs; - namespace gm=geometry_msgs; +using std::cerr; +using std::endl; +using std::ostream; +using std::vector; - using std::vector; - using std::cerr; - using std::endl; - using std::ostream; +// Distance threshold for plane fitting: how far are points +// allowed to be off the plane? +const double distance_threshold_ = 0.005; - // Distance threshold for plane fitting: how far are points - // allowed to be off the plane? - const double distance_threshold_ = 0.005; +PlaneFitResult fitPlane(ARCloud::ConstPtr cloud) +{ + PlaneFitResult res; + pcl::PointIndices::Ptr inliers = boost::make_shared(); - PlaneFitResult fitPlane (ARCloud::ConstPtr cloud) - { - PlaneFitResult res; - pcl::PointIndices::Ptr inliers=boost::make_shared(); - - pcl::SACSegmentation seg; - seg.setOptimizeCoefficients(true); - seg.setModelType(pcl::SACMODEL_PLANE); - seg.setMethodType(pcl::SAC_RANSAC); - seg.setDistanceThreshold(distance_threshold_); - - seg.setInputCloud(cloud); - seg.segment(*inliers, res.coeffs); - - pcl::ExtractIndices extracter; - extracter.setInputCloud(cloud); - extracter.setIndices(inliers); - extracter.setNegative(false); - extracter.filter(*res.inliers); - - return res; - } + pcl::SACSegmentation seg; + seg.setOptimizeCoefficients(true); + seg.setModelType(pcl::SACMODEL_PLANE); + seg.setMethodType(pcl::SAC_RANSAC); + seg.setDistanceThreshold(distance_threshold_); - ARCloud::Ptr filterCloud (const ARCloud& cloud, const vector >& pixels) - { - ARCloud::Ptr out(new ARCloud()); - //ROS_INFO(" Filtering %zu pixels", pixels.size()); - //for (const cv::Point& p : pixels) - for(size_t i=0; ipoints.push_back(pt); - } - return out; - } + seg.setInputCloud(cloud); + seg.segment(*inliers, res.coeffs); - gm::Point centroid (const ARCloud& points) - { - gm::Point sum; - sum.x = 0; - sum.y = 0; - sum.z = 0; - //for (const Point& p : points) - for(size_t i=0; i extracter; + extracter.setInputCloud(cloud); + extracter.setIndices(inliers); + extracter.setNegative(false); + extracter.filter(*res.inliers); - // Helper function to construct a geometry_msgs::Quaternion - inline - gm::Quaternion makeQuaternion (double x, double y, double z, double w) - { - gm::Quaternion q; - q.x = x; - q.y = y; - q.z = z; - q.w = w; - return q; - } + return res; +} - // Extract and normalize plane coefficients - int getCoeffs (const pcl::ModelCoefficients& coeffs, double* a, double* b, - double* c, double* d) +ARCloud::Ptr filterCloud( + const ARCloud& cloud, + const vector >& pixels) +{ + ARCloud::Ptr out(new ARCloud()); + // ROS_INFO(" Filtering %zu pixels", pixels.size()); + // for (const cv::Point& p : pixels) + for (size_t i = 0; i < pixels.size(); i++) { - if(coeffs.values.size() != 4) - return -1; - const double s = coeffs.values[0]*coeffs.values[0] + - coeffs.values[1]*coeffs.values[1] + coeffs.values[2]*coeffs.values[2]; - if(fabs(s) < 1e-6) - return -1; - *a = coeffs.values[0]/s; - *b = coeffs.values[1]/s; - *c = coeffs.values[2]/s; - *d = coeffs.values[3]/s; - return 0; + const cv::Point& p = pixels[i]; + const ARPoint& pt = cloud(p.x, p.y); + if (std::isnan(pt.x) || std::isnan(pt.y) || std::isnan(pt.z)) + { + // ROS_INFO(" Skipping (%.4f, %.4f, %.4f)", pt.x, pt.y, pt.z); + } + else + out->points.push_back(pt); } + return out; +} - // Project point onto plane - tf::Vector3 project (const ARPoint& p, const double a, const double b, - const double c, const double d) +gm::msg::Point centroid(const ARCloud& points) +{ + gm::msg::Point sum; + sum.x = 0; + sum.y = 0; + sum.z = 0; + // for (const Point& p : points) + for (size_t i = 0; i < points.size(); i++) { - const double t = a*p.x + b*p.y + c*p.z + d; - return tf::Vector3(p.x-t*a, p.y-t*b, p.z-t*c); + sum.x += points[i].x; + sum.y += points[i].y; + sum.z += points[i].z; } - ostream& operator<< (ostream& str, const tf::Matrix3x3& m) - { - str << "[" << m[0][0] << ", " << m[0][1] << ", " << m[0][2] << "; " - << m[1][0] << ", " << m[1][1] << ", " << m[1][2] << "; " - << m[2][0] << ", " << m[2][1] << ", " << m[2][2] << "]"; - return str; - } + gm::msg::Point center; + const size_t n = points.size(); + center.x = sum.x / n; + center.y = sum.y / n; + center.z = sum.z / n; + return center; +} - ostream& operator<< (ostream& str, const tf::Quaternion& q) - { - str << "[(" << q.x() << ", " << q.y() << ", " << q.z() << - "), " << q.w() << "]"; - return str; - } +// Helper function to construct a geometry_msgs::Quaternion +inline gm::msg::Quaternion makeQuaternion(double x, double y, double z, double w) +{ + gm::msg::Quaternion q; + q.x = x; + q.y = y; + q.z = z; + q.w = w; + return q; +} - ostream& operator<< (ostream& str, const tf::Vector3& v) - { - str << "(" << v.x() << ", " << v.y() << ", " << v.z() << ")"; - return str; - } +// Extract and normalize plane coefficients +int getCoeffs(const pcl::ModelCoefficients& coeffs, double* a, double* b, + double* c, double* d) +{ + if (coeffs.values.size() != 4) + return -1; + const double s = coeffs.values[0] * coeffs.values[0] + + coeffs.values[1] * coeffs.values[1] + + coeffs.values[2] * coeffs.values[2]; + if (fabs(s) < 1e-6) + return -1; + *a = coeffs.values[0] / s; + *b = coeffs.values[1] / s; + *c = coeffs.values[2] / s; + *d = coeffs.values[3] / s; + return 0; +} + +// Project point onto plane +tf2::Vector3 project(const ARPoint& p, const double a, const double b, + const double c, const double d) +{ + const double t = a * p.x + b * p.y + c * p.z + d; + return tf2::Vector3(p.x - t * a, p.y - t * b, p.z - t * c); +} + +ostream& operator<<(ostream& str, const tf2::Matrix3x3& m) +{ + str << "[" << m[0][0] << ", " << m[0][1] << ", " << m[0][2] << "; " << m[1][0] + << ", " << m[1][1] << ", " << m[1][2] << "; " << m[2][0] << ", " + << m[2][1] << ", " << m[2][2] << "]"; + return str; +} + +ostream& operator<<(ostream& str, const tf2::Quaternion& q) +{ + str << "[(" << q.x() << ", " << q.y() << ", " << q.z() << "), " << q.w() + << "]"; + return str; +} + +ostream& operator<<(ostream& str, const tf2::Vector3& v) +{ + str << "(" << v.x() << ", " << v.y() << ", " << v.z() << ")"; + return str; +} + +int extractFrame(const pcl::ModelCoefficients& coeffs, const ARPoint& p1, + const ARPoint& p2, const ARPoint& p3, const ARPoint& p4, + tf2::Matrix3x3& retmat) +{ + // Get plane coeffs and project points onto the plane + double a = 0, b = 0, c = 0, d = 0; + if (getCoeffs(coeffs, &a, &b, &c, &d) < 0) + return -1; - int extractFrame (const pcl::ModelCoefficients& coeffs, - const ARPoint& p1, const ARPoint& p2, - const ARPoint& p3, const ARPoint& p4, - tf::Matrix3x3 &retmat) + const tf2::Vector3 q1 = project(p1, a, b, c, d); + const tf2::Vector3 q2 = project(p2, a, b, c, d); + const tf2::Vector3 q3 = project(p3, a, b, c, d); + const tf2::Vector3 q4 = project(p4, a, b, c, d); + + // Make sure points aren't the same so things are well-defined + if ((q2 - q1).length() < 1e-3) + return -1; + + // (inverse) matrix with the given properties + const tf2::Vector3 v = (q2 - q1).normalized(); + const tf2::Vector3 n(a, b, c); + const tf2::Vector3 w = -v.cross(n); + tf2::Matrix3x3 m(v[0], v[1], v[2], w[0], w[1], w[2], n[0], n[1], n[2]); + + // Possibly flip things based on third point + const tf2::Vector3 diff = (q4 - q3).normalized(); + // ROS_INFO_STREAM("w = " << w << " and d = " << diff); + if (w.dot(diff) < 0) { - // Get plane coeffs and project points onto the plane - double a=0, b=0, c=0, d=0; - if(getCoeffs(coeffs, &a, &b, &c, &d) < 0) - return -1; - - const tf::Vector3 q1 = project(p1, a, b, c, d); - const tf::Vector3 q2 = project(p2, a, b, c, d); - const tf::Vector3 q3 = project(p3, a, b, c, d); - const tf::Vector3 q4 = project(p4, a, b, c, d); - - // Make sure points aren't the same so things are well-defined - if((q2-q1).length() < 1e-3) - return -1; - - // (inverse) matrix with the given properties - const tf::Vector3 v = (q2-q1).normalized(); - const tf::Vector3 n(a, b, c); - const tf::Vector3 w = -v.cross(n); - tf::Matrix3x3 m(v[0], v[1], v[2], w[0], w[1], w[2], n[0], n[1], n[2]); - - // Possibly flip things based on third point - const tf::Vector3 diff = (q4-q3).normalized(); - //ROS_INFO_STREAM("w = " << w << " and d = " << diff); - if (w.dot(diff)<0) - { - //ROS_INFO_STREAM("Flipping normal based on p3. Current value: " << m); - m[1] = -m[1]; - m[2] = -m[2]; - //ROS_INFO_STREAM("New value: " << m); - } - - // Invert and return - retmat = m.inverse(); - //cerr << "Frame is " << retmat << endl; - return 0; + // ROS_INFO_STREAM("Flipping normal based on p3. Current value: " << m); + m[1] = -m[1]; + m[2] = -m[2]; + // ROS_INFO_STREAM("New value: " << m); } + // Invert and return + retmat = m.inverse(); + // cerr << "Frame is " << retmat << endl; + return 0; +} + +int getQuaternion(const tf2::Matrix3x3& m, tf2::Quaternion& retQ) +{ + if (m.determinant() <= 0) + return -1; + + // tf2Scalar y=0, p=0, r=0; + // m.getEulerZYX(y, p, r); + // retQ.setEulerZYX(y, p, r); - int getQuaternion (const tf::Matrix3x3& m, tf::Quaternion &retQ) + // Use Eigen for this part instead, because the ROS version of bullet appears + // to have a bug + Eigen::Matrix3f eig_m; + for (int i = 0; i < 3; i++) { - if(m.determinant() <= 0) - return -1; - - //tfScalar y=0, p=0, r=0; - //m.getEulerZYX(y, p, r); - //retQ.setEulerZYX(y, p, r); - - //Use Eigen for this part instead, because the ROS version of bullet appears to have a bug - Eigen::Matrix3f eig_m; - for(int i=0; i<3; i++){ - for(int j=0; j<3; j++){ - eig_m(i,j) = m[i][j]; - } + for (int j = 0; j < 3; j++) + { + eig_m(i, j) = m[i][j]; } - Eigen::Quaternion eig_quat(eig_m); - - // Translate back to bullet - tfScalar ex = eig_quat.x(); - tfScalar ey = eig_quat.y(); - tfScalar ez = eig_quat.z(); - tfScalar ew = eig_quat.w(); - tf::Quaternion quat(ex,ey,ez,ew); - retQ = quat.normalized(); - - return 0; } + Eigen::Quaternion eig_quat(eig_m); + // Translate back to bullet + tf2Scalar ex = eig_quat.x(); + tf2Scalar ey = eig_quat.y(); + tf2Scalar ez = eig_quat.z(); + tf2Scalar ew = eig_quat.w(); + tf2::Quaternion quat(ex, ey, ez, ew); + retQ = quat.normalized(); - int extractOrientation (const pcl::ModelCoefficients& coeffs, - const ARPoint& p1, const ARPoint& p2, - const ARPoint& p3, const ARPoint& p4, - gm::Quaternion &retQ) - { - tf::Matrix3x3 m; - if(extractFrame(coeffs, p1, p2, p3, p4, m) < 0) - return -1; - tf::Quaternion q; - if(getQuaternion(m,q) < 0) - return -1; - retQ.x = q.x(); - retQ.y = q.y(); - retQ.z = q.z(); - retQ.w = q.w(); - return 0; - } + return 0; +} + +int extractOrientation(const pcl::ModelCoefficients& coeffs, const ARPoint& p1, + const ARPoint& p2, const ARPoint& p3, const ARPoint& p4, + gm::msg::Quaternion& retQ) +{ + tf2::Matrix3x3 m; + if (extractFrame(coeffs, p1, p2, p3, p4, m) < 0) + return -1; + tf2::Quaternion q; + if (getQuaternion(m, q) < 0) + return -1; + retQ.x = q.x(); + retQ.y = q.y(); + retQ.z = q.z(); + retQ.w = q.w(); + return 0; +} -} // namespace +} // namespace ar_track_alvar diff --git a/ar_track_alvar/src/medianFilter.cpp b/ar_track_alvar/src/medianFilter.cpp index ae78636..f667f04 100644 --- a/ar_track_alvar/src/medianFilter.cpp +++ b/ar_track_alvar/src/medianFilter.cpp @@ -29,8 +29,8 @@ */ /** - * \file - * + * \file + * * N-dimensional median filter for marker poses * * \author Scott Niekum @@ -40,51 +40,68 @@ namespace ar_track_alvar { - using namespace alvar; +using namespace alvar; - MedianFilter::MedianFilter(int n){ - median_n = n; - median_ind = 0; - median_init = false; - median_poses = new Pose[median_n]; - } +MedianFilter::MedianFilter(int n) +{ + median_n = n; + median_ind = 0; + median_init = false; + median_poses = new Pose[median_n]; +} - void MedianFilter::addPose(const Pose &new_pose){ - median_poses[median_ind] = new_pose; - median_ind = (median_ind+1) % median_n; - } +void MedianFilter::addPose(const Pose& new_pose) +{ + median_poses[median_ind] = new_pose; + median_ind = (median_ind + 1) % median_n; +} - void MedianFilter::getMedian(Pose &ret_pose){ - if(!median_init){ - if(median_ind == median_n-1) - median_init = true; - ret_pose = median_poses[median_ind-1]; - } +void MedianFilter::getMedian(Pose& ret_pose) +{ + if (!median_init) + { + if (median_ind == median_n - 1) + median_init = true; + ret_pose = median_poses[median_ind - 1]; + } - else{ - double min_dist = 0; - int min_ind = 0; - for(int i=0; i -namespace alvar { -namespace plugins { - +namespace alvar +{ +namespace plugins +{ CaptureCmu::CaptureCmu(const CaptureDevice captureDevice) - : Capture(captureDevice) - , mCamera(new C1394Camera()) - , mChannels(-1) - , mReturnFrame(NULL) + : Capture(captureDevice) + , mCamera(new C1394Camera()) + , mChannels(-1) + , mReturnFrame(NULL) { } CaptureCmu::~CaptureCmu() { - stop(); - delete mCamera; + stop(); + delete mCamera; } bool CaptureCmu::start() { - if (isCapturing()) { - return isCapturing(); - } - - if (mCamera->CheckLink() != CAM_SUCCESS) { - return false; - } - - int numberCameras = mCamera->GetNumberCameras(); - bool cameraSelected = false; - for (int i = 0; i < numberCameras; i++) { - mCamera->SelectCamera(i); - LARGE_INTEGER uniqueId; - mCamera->GetCameraUniqueID(&uniqueId); - std::stringstream convert; - convert << std::hex << uniqueId.HighPart << uniqueId.LowPart; - if (captureDevice().id().compare(convert.str()) == 0) { - cameraSelected = true; - break; - } - } - - if (!cameraSelected) { - return false; - } - - if (mCamera->InitCamera(true) != CAM_SUCCESS) { - return false; - } - - // TODO: this needs to be parameterized somehow - if (mCamera->HasVideoMode(2, 4)) { // 1600x1200rgb - if (mCamera->SetVideoFormat(2) != CAM_SUCCESS) return false; - if (mCamera->SetVideoMode(4) != CAM_SUCCESS) return false; - mChannels = 3; - } - else if (mCamera->HasVideoMode(2, 1)) { // 1280x960rgb - if (mCamera->SetVideoFormat(2) != CAM_SUCCESS) return false; - if (mCamera->SetVideoMode(1) != CAM_SUCCESS) return false; - mChannels = 3; - } - else if (mCamera->HasVideoMode(1, 4)) { // 1024x768rgb - if (mCamera->SetVideoFormat(1) != CAM_SUCCESS) return false; - if (mCamera->SetVideoMode(4) != CAM_SUCCESS) return false; - mChannels = 3; - } - else if (mCamera->HasVideoMode(1, 1)) { // 800x600rgb - if (mCamera->SetVideoFormat(1) != CAM_SUCCESS) return false; - if (mCamera->SetVideoMode(1) != CAM_SUCCESS) return false; - mChannels = 3; + if (isCapturing()) + { + return isCapturing(); + } + + if (mCamera->CheckLink() != CAM_SUCCESS) + { + return false; + } + + int numberCameras = mCamera->GetNumberCameras(); + bool cameraSelected = false; + for (int i = 0; i < numberCameras; i++) + { + mCamera->SelectCamera(i); + LARGE_INTEGER uniqueId; + mCamera->GetCameraUniqueID(&uniqueId); + std::stringstream convert; + convert << std::hex << uniqueId.HighPart << uniqueId.LowPart; + if (captureDevice().id().compare(convert.str()) == 0) + { + cameraSelected = true; + break; } - else if (mCamera->HasVideoMode(0, 4)) { // 640x480rgb - if (mCamera->SetVideoFormat(0) != CAM_SUCCESS) return false; - if (mCamera->SetVideoMode(4) != CAM_SUCCESS) return false; - mChannels = 3; + } + + if (!cameraSelected) + { + return false; + } + + if (mCamera->InitCamera(true) != CAM_SUCCESS) + { + return false; + } + + // TODO: this needs to be parameterized somehow + if (mCamera->HasVideoMode(2, 4)) + { // 1600x1200rgb + if (mCamera->SetVideoFormat(2) != CAM_SUCCESS) + return false; + if (mCamera->SetVideoMode(4) != CAM_SUCCESS) + return false; + mChannels = 3; + } + else if (mCamera->HasVideoMode(2, 1)) + { // 1280x960rgb + if (mCamera->SetVideoFormat(2) != CAM_SUCCESS) + return false; + if (mCamera->SetVideoMode(1) != CAM_SUCCESS) + return false; + mChannels = 3; + } + else if (mCamera->HasVideoMode(1, 4)) + { // 1024x768rgb + if (mCamera->SetVideoFormat(1) != CAM_SUCCESS) + return false; + if (mCamera->SetVideoMode(4) != CAM_SUCCESS) + return false; + mChannels = 3; + } + else if (mCamera->HasVideoMode(1, 1)) + { // 800x600rgb + if (mCamera->SetVideoFormat(1) != CAM_SUCCESS) + return false; + if (mCamera->SetVideoMode(1) != CAM_SUCCESS) + return false; + mChannels = 3; + } + else if (mCamera->HasVideoMode(0, 4)) + { // 640x480rgb + if (mCamera->SetVideoFormat(0) != CAM_SUCCESS) + return false; + if (mCamera->SetVideoMode(4) != CAM_SUCCESS) + return false; + mChannels = 3; /* // TODO: also support YUV422 } else if (mCamera->HasVideoMode(2, 3)) { // 1600x1200yuv422 @@ -131,125 +153,159 @@ bool CaptureCmu::start() if (mCamera->SetVideoMode(1) != CAM_SUCCESS) return false; mChannels = 2; */ - } - else if (mCamera->HasVideoMode(2, 5)) { // 1600x1200mono - if (mCamera->SetVideoFormat(2) != CAM_SUCCESS) return false; - if (mCamera->SetVideoMode(5) != CAM_SUCCESS) return false; - mChannels = 1; - } - else if (mCamera->HasVideoMode(2, 2)) { // 1280x960mono - if (mCamera->SetVideoFormat(2) != CAM_SUCCESS) return false; - if (mCamera->SetVideoMode(2) != CAM_SUCCESS) return false; - mChannels = 1; - } - else if (mCamera->HasVideoMode(1, 5)) { // 1024x768mono - if (mCamera->SetVideoFormat(1) != CAM_SUCCESS) return false; - if (mCamera->SetVideoMode(5) != CAM_SUCCESS) return false; - mChannels = 1; - } - else if (mCamera->HasVideoMode(1, 2)) { // 800x600mono - if (mCamera->SetVideoFormat(1) != CAM_SUCCESS) return false; - if (mCamera->SetVideoMode(2) != CAM_SUCCESS) return false; - mChannels = 1; - } - else if (mCamera->HasVideoMode(0, 5)) { // 640x480mono - if (mCamera->SetVideoFormat(0) != CAM_SUCCESS) return false; - if (mCamera->SetVideoMode(5) != CAM_SUCCESS) return false; - mChannels = 1; - } - else { - return false; - } - - mCamera->GetVideoFrameDimensions(&mXResolution, &mYResolution); - mReturnFrame = cvCreateImage(cvSize(mXResolution, mYResolution), IPL_DEPTH_8U, mChannels); - mCamera->StartImageAcquisitionEx(6, 1, ACQ_START_VIDEO_STREAM); - mIsCapturing = true; - - return isCapturing(); + } + else if (mCamera->HasVideoMode(2, 5)) + { // 1600x1200mono + if (mCamera->SetVideoFormat(2) != CAM_SUCCESS) + return false; + if (mCamera->SetVideoMode(5) != CAM_SUCCESS) + return false; + mChannels = 1; + } + else if (mCamera->HasVideoMode(2, 2)) + { // 1280x960mono + if (mCamera->SetVideoFormat(2) != CAM_SUCCESS) + return false; + if (mCamera->SetVideoMode(2) != CAM_SUCCESS) + return false; + mChannels = 1; + } + else if (mCamera->HasVideoMode(1, 5)) + { // 1024x768mono + if (mCamera->SetVideoFormat(1) != CAM_SUCCESS) + return false; + if (mCamera->SetVideoMode(5) != CAM_SUCCESS) + return false; + mChannels = 1; + } + else if (mCamera->HasVideoMode(1, 2)) + { // 800x600mono + if (mCamera->SetVideoFormat(1) != CAM_SUCCESS) + return false; + if (mCamera->SetVideoMode(2) != CAM_SUCCESS) + return false; + mChannels = 1; + } + else if (mCamera->HasVideoMode(0, 5)) + { // 640x480mono + if (mCamera->SetVideoFormat(0) != CAM_SUCCESS) + return false; + if (mCamera->SetVideoMode(5) != CAM_SUCCESS) + return false; + mChannels = 1; + } + else + { + return false; + } + + mCamera->GetVideoFrameDimensions(&mXResolution, &mYResolution); + mReturnFrame = cvCreateImage(cvSize(mXResolution, mYResolution), IPL_DEPTH_8U, + mChannels); + mCamera->StartImageAcquisitionEx(6, 1, ACQ_START_VIDEO_STREAM); + mIsCapturing = true; + + return isCapturing(); } void CaptureCmu::stop() { - if (isCapturing()) { - mCamera->StopImageAcquisition(); - cvReleaseImage(&mReturnFrame); - mIsCapturing = false; - } + if (isCapturing()) + { + mCamera->StopImageAcquisition(); + cvReleaseImage(&mReturnFrame); + mIsCapturing = false; + } } -IplImage *CaptureCmu::captureImage() +IplImage* CaptureCmu::captureImage() { - if (!isCapturing()) { - return NULL; - } - - if (mCamera->AcquireImageEx(false, NULL) == CAM_SUCCESS) { - unsigned long length = mReturnFrame->widthStep * mYResolution; - memcpy(mReturnFrame->imageData, mCamera->GetRawData(&length), length); - } - return mReturnFrame; + if (!isCapturing()) + { + return NULL; + } + + if (mCamera->AcquireImageEx(false, NULL) == CAM_SUCCESS) + { + unsigned long length = mReturnFrame->widthStep * mYResolution; + memcpy(mReturnFrame->imageData, mCamera->GetRawData(&length), length); + } + return mReturnFrame; } bool CaptureCmu::showSettingsDialog() { - CameraControlDialog(NULL, mCamera, true); - return true; + CameraControlDialog(NULL, mCamera, true); + return true; } std::string CaptureCmu::SerializeId() { - return "CaptureCmu"; + return "CaptureCmu"; } -bool CaptureCmu::Serialize(Serialization *serialization) +bool CaptureCmu::Serialize(Serialization* serialization) { - if (!mCamera) { - return false; - } - - unsigned short value; - if (serialization->IsInput()) { - if (!serialization->Serialize(value, "Gain")) return false; - mCamera->GetCameraControl(FEATURE_GAIN)->SetAutoMode(false); - mCamera->GetCameraControl(FEATURE_GAIN)->SetValue(value); - - if (!serialization->Serialize(value, "AutoExposure")) return false; - mCamera->GetCameraControl(FEATURE_AUTO_EXPOSURE)->SetAutoMode(false); - mCamera->GetCameraControl(FEATURE_AUTO_EXPOSURE)->SetValue(value); - - if (!serialization->Serialize(value, "Shutter")) return false; - mCamera->GetCameraControl(FEATURE_SHUTTER)->SetAutoMode(false); - mCamera->GetCameraControl(FEATURE_SHUTTER)->SetValue(value); - - if (!serialization->Serialize(value, "Brightness")) return false; - mCamera->GetCameraControl(FEATURE_BRIGHTNESS)->SetAutoMode(false); - mCamera->GetCameraControl(FEATURE_BRIGHTNESS)->SetValue(value); - - if (!serialization->Serialize(value, "Gamma")) return false; - mCamera->GetCameraControl(FEATURE_GAMMA)->SetAutoMode(false); - mCamera->GetCameraControl(FEATURE_GAMMA)->SetValue(value); - } else { - mCamera->GetCameraControl(FEATURE_GAIN)->GetValue(&value); - if (!serialization->Serialize(value, "Gain")) return false; - - mCamera->GetCameraControl(FEATURE_AUTO_EXPOSURE)->GetValue(&value); - if (!serialization->Serialize(value, "AutoExposure")) return false; - - mCamera->GetCameraControl(FEATURE_SHUTTER)->GetValue(&value); - if (!serialization->Serialize(value, "Shutter")) return false; - - mCamera->GetCameraControl(FEATURE_BRIGHTNESS)->GetValue(&value); - if (!serialization->Serialize(value, "Brightness")) return false; - - mCamera->GetCameraControl(FEATURE_GAMMA)->GetValue(&value); - if (!serialization->Serialize(value, "Gamma")) return false; - } - return true; + if (!mCamera) + { + return false; + } + + unsigned short value; + if (serialization->IsInput()) + { + if (!serialization->Serialize(value, "Gain")) + return false; + mCamera->GetCameraControl(FEATURE_GAIN)->SetAutoMode(false); + mCamera->GetCameraControl(FEATURE_GAIN)->SetValue(value); + + if (!serialization->Serialize(value, "AutoExposure")) + return false; + mCamera->GetCameraControl(FEATURE_AUTO_EXPOSURE)->SetAutoMode(false); + mCamera->GetCameraControl(FEATURE_AUTO_EXPOSURE)->SetValue(value); + + if (!serialization->Serialize(value, "Shutter")) + return false; + mCamera->GetCameraControl(FEATURE_SHUTTER)->SetAutoMode(false); + mCamera->GetCameraControl(FEATURE_SHUTTER)->SetValue(value); + + if (!serialization->Serialize(value, "Brightness")) + return false; + mCamera->GetCameraControl(FEATURE_BRIGHTNESS)->SetAutoMode(false); + mCamera->GetCameraControl(FEATURE_BRIGHTNESS)->SetValue(value); + + if (!serialization->Serialize(value, "Gamma")) + return false; + mCamera->GetCameraControl(FEATURE_GAMMA)->SetAutoMode(false); + mCamera->GetCameraControl(FEATURE_GAMMA)->SetValue(value); + } + else + { + mCamera->GetCameraControl(FEATURE_GAIN)->GetValue(&value); + if (!serialization->Serialize(value, "Gain")) + return false; + + mCamera->GetCameraControl(FEATURE_AUTO_EXPOSURE)->GetValue(&value); + if (!serialization->Serialize(value, "AutoExposure")) + return false; + + mCamera->GetCameraControl(FEATURE_SHUTTER)->GetValue(&value); + if (!serialization->Serialize(value, "Shutter")) + return false; + + mCamera->GetCameraControl(FEATURE_BRIGHTNESS)->GetValue(&value); + if (!serialization->Serialize(value, "Brightness")) + return false; + + mCamera->GetCameraControl(FEATURE_GAMMA)->GetValue(&value); + if (!serialization->Serialize(value, "Gamma")) + return false; + } + return true; } -CapturePluginCmu::CapturePluginCmu(const std::string &captureType) - : CapturePlugin(captureType) +CapturePluginCmu::CapturePluginCmu(const std::string& captureType) + : CapturePlugin(captureType) { } @@ -259,42 +315,45 @@ CapturePluginCmu::~CapturePluginCmu() CapturePlugin::CaptureDeviceVector CapturePluginCmu::enumerateDevices() { - CaptureDeviceVector devices; - - C1394Camera camera; - if (camera.CheckLink() != CAM_SUCCESS) { - return devices; - } - - int numberCameras = camera.GetNumberCameras(); - for (int i = 0; i < numberCameras; i++) { - camera.SelectCamera(i); - LARGE_INTEGER uniqueId; - camera.GetCameraUniqueID(&uniqueId); - std::stringstream convert; - convert << std::hex << uniqueId.HighPart << uniqueId.LowPart; - std::stringstream description; - char buffer[500]; - camera.GetCameraVendor(buffer, 500); - description << buffer << " "; - camera.GetCameraName(buffer, 500); - description << buffer; - CaptureDevice captureDevice(mCaptureType, convert.str(), description.str()); - devices.push_back(captureDevice); - } + CaptureDeviceVector devices; + C1394Camera camera; + if (camera.CheckLink() != CAM_SUCCESS) + { return devices; + } + + int numberCameras = camera.GetNumberCameras(); + for (int i = 0; i < numberCameras; i++) + { + camera.SelectCamera(i); + LARGE_INTEGER uniqueId; + camera.GetCameraUniqueID(&uniqueId); + std::stringstream convert; + convert << std::hex << uniqueId.HighPart << uniqueId.LowPart; + std::stringstream description; + char buffer[500]; + camera.GetCameraVendor(buffer, 500); + description << buffer << " "; + camera.GetCameraName(buffer, 500); + description << buffer; + CaptureDevice captureDevice(mCaptureType, convert.str(), description.str()); + devices.push_back(captureDevice); + } + + return devices; } -Capture *CapturePluginCmu::createCapture(const CaptureDevice captureDevice) +Capture* CapturePluginCmu::createCapture(const CaptureDevice captureDevice) { - return new CaptureCmu(captureDevice); + return new CaptureCmu(captureDevice); } -void registerPlugin(const std::string &captureType, alvar::CapturePlugin *&capturePlugin) +void registerPlugin(const std::string& captureType, + alvar::CapturePlugin*& capturePlugin) { - capturePlugin = new CapturePluginCmu(captureType); + capturePlugin = new CapturePluginCmu(captureType); } -} // namespace plugins -} // namespace alvar +} // namespace plugins +} // namespace alvar diff --git a/ar_track_alvar/src/platform/capture_plugin_cmu/CapturePluginCmu.h b/ar_track_alvar/src/platform/capture_plugin_cmu/CapturePluginCmu.h index 841cfee..b3f68dc 100644 --- a/ar_track_alvar/src/platform/capture_plugin_cmu/CapturePluginCmu.h +++ b/ar_track_alvar/src/platform/capture_plugin_cmu/CapturePluginCmu.h @@ -31,13 +31,13 @@ */ #ifdef WIN32 - #ifdef ALVAR_Capture_Plugin_CMU_BUILD - #define ALVAR_CAPTURE_PLUGIN_CMU_EXPORT __declspec(dllexport) - #else - #define ALVAR_CAPTURE_PLUGIN_CMU_EXPORT __declspec(dllimport) - #endif +#ifdef ALVAR_Capture_Plugin_CMU_BUILD +#define ALVAR_CAPTURE_PLUGIN_CMU_EXPORT __declspec(dllexport) #else - #define ALVAR_CAPTURE_PLUGIN_CMU_EXPORT +#define ALVAR_CAPTURE_PLUGIN_CMU_EXPORT __declspec(dllimport) +#endif +#else +#define ALVAR_CAPTURE_PLUGIN_CMU_EXPORT #endif #include "Capture.h" @@ -45,66 +45,67 @@ #include "1394camera.h" -namespace alvar { - +namespace alvar +{ /** * \brief Dynamically loaded plugins namespace. */ -namespace plugins { - +namespace plugins +{ /** * \brief Implementation of Capture interface for Cmu plugin. */ -class ALVAR_CAPTURE_PLUGIN_CMU_EXPORT CaptureCmu - : public alvar::Capture +class ALVAR_CAPTURE_PLUGIN_CMU_EXPORT CaptureCmu : public alvar::Capture { public: - /** - * \brief Constructor. - * - * \param captureDevice Information of which camera to create. - */ - CaptureCmu(const CaptureDevice captureDevice); - /** - * \brief Destructor. - */ - ~CaptureCmu(); - bool start(); - void stop(); - IplImage *captureImage(); - bool showSettingsDialog(); - std::string SerializeId(); - bool Serialize(Serialization *serialization); + /** + * \brief Constructor. + * + * \param captureDevice Information of which camera to create. + */ + CaptureCmu(const CaptureDevice captureDevice); + /** + * \brief Destructor. + */ + ~CaptureCmu(); + bool start(); + void stop(); + IplImage* captureImage(); + bool showSettingsDialog(); + std::string SerializeId(); + bool Serialize(Serialization* serialization); + private: - C1394Camera *mCamera; - int mChannels; - IplImage *mReturnFrame; + C1394Camera* mCamera; + int mChannels; + IplImage* mReturnFrame; }; /** * \brief Implementation of CapturePlugin interface for Cmu plugin. */ class ALVAR_CAPTURE_PLUGIN_CMU_EXPORT CapturePluginCmu - : public alvar::CapturePlugin + : public alvar::CapturePlugin { public: - /** - * \brief Constructor. - * - * \param captureType A unique identifier for the capture plugin. - */ - CapturePluginCmu(const std::string &captureType); - /** - * \brief Destructor. - */ - ~CapturePluginCmu(); - CaptureDeviceVector enumerateDevices(); - Capture *createCapture(const CaptureDevice captureDevice); + /** + * \brief Constructor. + * + * \param captureType A unique identifier for the capture plugin. + */ + CapturePluginCmu(const std::string& captureType); + /** + * \brief Destructor. + */ + ~CapturePluginCmu(); + CaptureDeviceVector enumerateDevices(); + Capture* createCapture(const CaptureDevice captureDevice); }; -extern "C" ALVAR_CAPTURE_PLUGIN_CMU_EXPORT void registerPlugin(const std::string &captureType, alvar::CapturePlugin *&capturePlugin); +extern "C" ALVAR_CAPTURE_PLUGIN_CMU_EXPORT void registerPlugin( + const std::string& captureType, alvar::CapturePlugin*& capturePlugin); -} // namespace plugins -} // namespace alvar +} // namespace plugins +} // namespace alvar #endif diff --git a/ar_track_alvar/src/platform/capture_plugin_dscapture/CapturePluginDSCapture.cpp b/ar_track_alvar/src/platform/capture_plugin_dscapture/CapturePluginDSCapture.cpp index 3d4daac..d8dd28c 100644 --- a/ar_track_alvar/src/platform/capture_plugin_dscapture/CapturePluginDSCapture.cpp +++ b/ar_track_alvar/src/platform/capture_plugin_dscapture/CapturePluginDSCapture.cpp @@ -27,19 +27,20 @@ using namespace std; -namespace alvar { -namespace plugins { - +namespace alvar +{ +namespace plugins +{ CaptureDSCapture::CaptureDSCapture(const CaptureDevice captureDevice) - : Capture(captureDevice) - , sampler(this) - , m_pDSCapture(NULL) - , m_nBpp(0) - , m_nVideo_x_res(-1) - , m_nVideo_y_res(-1) - , imgBuffer(NULL) - , imgBufferForCallback(NULL) - , mReturnFrame(NULL) + : Capture(captureDevice) + , sampler(this) + , m_pDSCapture(NULL) + , m_nBpp(0) + , m_nVideo_x_res(-1) + , m_nVideo_y_res(-1) + , imgBuffer(NULL) + , imgBufferForCallback(NULL) + , mReturnFrame(NULL) { InitializeCriticalSection(&crit); next_event = CreateEvent(NULL, FALSE, FALSE, NULL); @@ -49,9 +50,10 @@ CaptureDSCapture::CaptureDSCapture(const CaptureDevice captureDevice) CaptureDSCapture::~CaptureDSCapture() { stop(); - if (m_pDSCapture) { + if (m_pDSCapture) + { m_pDSCapture->Stop(); - delete m_pDSCapture; + delete m_pDSCapture; } delete imgBuffer; delete imgBufferForCallback; @@ -60,121 +62,135 @@ CaptureDSCapture::~CaptureDSCapture() bool CaptureDSCapture::start() { - if (m_pDSCapture) { - HRESULT hr = m_pDSCapture->Init(true, false, mCaptureDevice.id().c_str(), NULL); - - if(SUCCEEDED(hr)){ - // Set video grabber media type - AM_MEDIA_TYPE mt; - ZeroMemory(&mt, sizeof(AM_MEDIA_TYPE)); - mt.majortype = MEDIATYPE_Video; - mt.subtype = MEDIASUBTYPE_RGB24; - hr = m_pDSCapture->SetVideoGrabberFormat(&mt); - } - - if(SUCCEEDED(hr)) - hr = m_pDSCapture->ShowVideoCaptureFormatDialog(); - - // We must connect filters before we can get connected media type from grabber(s) - if(SUCCEEDED(hr)) - hr = m_pDSCapture->Connect(); - - if(SUCCEEDED(hr)){ - AM_MEDIA_TYPE mt; - ZeroMemory(&mt, sizeof(AM_MEDIA_TYPE)); - HRESULT hr = m_pDSCapture->GetVideoGrabberFormat(&mt); - if(SUCCEEDED(hr)){ - // Examine the format block. - if ((mt.formattype == FORMAT_VideoInfo) && - (mt.cbFormat >= sizeof(VIDEOINFOHEADER)) && - (mt.pbFormat != NULL) ) - { - if(mt.subtype == MEDIASUBTYPE_RGB24) - m_nBpp = 24; - else if(mt.subtype == MEDIASUBTYPE_RGB32) - m_nBpp = 32; - - VIDEOINFOHEADER *pVih = (VIDEOINFOHEADER*)mt.pbFormat; - //cout << "Video capture: "<bmiHeader.biWidth<<"x"<bmiHeader.biHeight<<" "<bmiHeader.biBitCount<<" bpp "<bmiHeader.biWidth; - m_nVideo_y_res = pVih->bmiHeader.biHeight; - } - } - } - - if(FAILED(hr)){ - return false; - } - - m_pDSCapture->AddVideoCallback(&sampler); - - buffer_size = (int)(m_nVideo_x_res * m_nVideo_y_res * (m_nBpp/8.0)); - imgBuffer = new BYTE[buffer_size]; - imgBufferForCallback = new BYTE[buffer_size]; - mReturnFrame = cvCreateImageHeader(cvSize(m_nVideo_x_res, m_nVideo_y_res), IPL_DEPTH_8U,m_nBpp / 8); - mReturnFrame->imageData = (char*)imgBuffer; + if (m_pDSCapture) + { + HRESULT hr = + m_pDSCapture->Init(true, false, mCaptureDevice.id().c_str(), NULL); + + if (SUCCEEDED(hr)) + { + // Set video grabber media type + AM_MEDIA_TYPE mt; + ZeroMemory(&mt, sizeof(AM_MEDIA_TYPE)); + mt.majortype = MEDIATYPE_Video; + mt.subtype = MEDIASUBTYPE_RGB24; + hr = m_pDSCapture->SetVideoGrabberFormat(&mt); } - m_pDSCapture->Start(); - mIsCapturing = true; - return true; + + if (SUCCEEDED(hr)) + hr = m_pDSCapture->ShowVideoCaptureFormatDialog(); + + // We must connect filters before we can get connected media type from + // grabber(s) + if (SUCCEEDED(hr)) + hr = m_pDSCapture->Connect(); + + if (SUCCEEDED(hr)) + { + AM_MEDIA_TYPE mt; + ZeroMemory(&mt, sizeof(AM_MEDIA_TYPE)); + HRESULT hr = m_pDSCapture->GetVideoGrabberFormat(&mt); + if (SUCCEEDED(hr)) + { + // Examine the format block. + if ((mt.formattype == FORMAT_VideoInfo) && + (mt.cbFormat >= sizeof(VIDEOINFOHEADER)) && (mt.pbFormat != NULL)) + { + if (mt.subtype == MEDIASUBTYPE_RGB24) + m_nBpp = 24; + else if (mt.subtype == MEDIASUBTYPE_RGB32) + m_nBpp = 32; + + VIDEOINFOHEADER* pVih = (VIDEOINFOHEADER*)mt.pbFormat; + // cout << "Video capture: + // "<bmiHeader.biWidth<<"x"<bmiHeader.biHeight<<" + // "<bmiHeader.biBitCount<<" bpp "<bmiHeader.biWidth; + m_nVideo_y_res = pVih->bmiHeader.biHeight; + } + } + } + + if (FAILED(hr)) + { + return false; + } + + m_pDSCapture->AddVideoCallback(&sampler); + + buffer_size = (int)(m_nVideo_x_res * m_nVideo_y_res * (m_nBpp / 8.0)); + imgBuffer = new BYTE[buffer_size]; + imgBufferForCallback = new BYTE[buffer_size]; + mReturnFrame = cvCreateImageHeader(cvSize(m_nVideo_x_res, m_nVideo_y_res), + IPL_DEPTH_8U, m_nBpp / 8); + mReturnFrame->imageData = (char*)imgBuffer; + } + m_pDSCapture->Start(); + mIsCapturing = true; + return true; } void CaptureDSCapture::stop() { - if (isCapturing()) { - HRESULT hr = m_pDSCapture->Stop(); - mIsCapturing = false; - } + if (isCapturing()) + { + HRESULT hr = m_pDSCapture->Stop(); + mIsCapturing = false; + } } -IplImage *CaptureDSCapture::captureImage() +IplImage* CaptureDSCapture::captureImage() { - if (!isCapturing()) { - return NULL; - } + if (!isCapturing()) + { + return NULL; + } - IplImage *ret = NULL; - if (WaitForSingleObject(next_event, 1000) == WAIT_OBJECT_0) { - EnterCriticalSection(&crit); - ret = mReturnFrame; - ret->origin = 1; - memcpy(imgBuffer,imgBufferForCallback,buffer_size); - LeaveCriticalSection(&crit); - } + IplImage* ret = NULL; + if (WaitForSingleObject(next_event, 1000) == WAIT_OBJECT_0) + { + EnterCriticalSection(&crit); + ret = mReturnFrame; + ret->origin = 1; + memcpy(imgBuffer, imgBufferForCallback, buffer_size); + LeaveCriticalSection(&crit); + } return ret; } bool CaptureDSCapture::showSettingsDialog() { - HRESULT hr = m_pDSCapture->ShowVideoCaptureFormatDialog(); - return SUCCEEDED(hr); + HRESULT hr = m_pDSCapture->ShowVideoCaptureFormatDialog(); + return SUCCEEDED(hr); } string CaptureDSCapture::SerializeId() { - return "CaptureDSCapture"; + return "CaptureDSCapture"; } -bool CaptureDSCapture::Serialize(Serialization *serialization) +bool CaptureDSCapture::Serialize(Serialization* serialization) { - return false; + return false; } -void CaptureDSCapture::OnVideoSample(BYTE* pBuffer, DWORD dwDataLen, REFERENCE_TIME t_start) +void CaptureDSCapture::OnVideoSample(BYTE* pBuffer, DWORD dwDataLen, + REFERENCE_TIME t_start) { - EnterCriticalSection(&crit); - if(pBuffer){ - if (dwDataLen <= buffer_size) { - memcpy(imgBufferForCallback,pBuffer,dwDataLen); - } - SetEvent(next_event); - } - LeaveCriticalSection(&crit); + EnterCriticalSection(&crit); + if (pBuffer) + { + if (dwDataLen <= buffer_size) + { + memcpy(imgBufferForCallback, pBuffer, dwDataLen); + } + SetEvent(next_event); + } + LeaveCriticalSection(&crit); } - -CapturePluginDSCapture::CapturePluginDSCapture(const string &captureType) - : CapturePlugin(captureType) +CapturePluginDSCapture::CapturePluginDSCapture(const string& captureType) + : CapturePlugin(captureType) { } @@ -184,26 +200,29 @@ CapturePluginDSCapture::~CapturePluginDSCapture() CapturePlugin::CaptureDeviceVector CapturePluginDSCapture::enumerateDevices() { - CaptureDeviceVector devices; - CDSCapture capture; - device_map* vids = capture.GetVideoCaptureDevices(); - for (device_map::iterator i = vids->begin(); i != vids->end(); ++i) { - CaptureDevice dev("dscapture", i->first, i->second); - devices.push_back(dev); - } - - return devices; + CaptureDeviceVector devices; + CDSCapture capture; + device_map* vids = capture.GetVideoCaptureDevices(); + for (device_map::iterator i = vids->begin(); i != vids->end(); ++i) + { + CaptureDevice dev("dscapture", i->first, i->second); + devices.push_back(dev); + } + + return devices; } -Capture *CapturePluginDSCapture::createCapture(const CaptureDevice captureDevice) +Capture* +CapturePluginDSCapture::createCapture(const CaptureDevice captureDevice) { - return new CaptureDSCapture(captureDevice); + return new CaptureDSCapture(captureDevice); } -void registerPlugin(const string &captureType, alvar::CapturePlugin *&capturePlugin) +void registerPlugin(const string& captureType, + alvar::CapturePlugin*& capturePlugin) { - capturePlugin = new CapturePluginDSCapture(captureType); + capturePlugin = new CapturePluginDSCapture(captureType); } -} // namespace plugins -} // namespace alvar +} // namespace plugins +} // namespace alvar diff --git a/ar_track_alvar/src/platform/capture_plugin_dscapture/CapturePluginDSCapture.h b/ar_track_alvar/src/platform/capture_plugin_dscapture/CapturePluginDSCapture.h index e35697d..abd68c5 100644 --- a/ar_track_alvar/src/platform/capture_plugin_dscapture/CapturePluginDSCapture.h +++ b/ar_track_alvar/src/platform/capture_plugin_dscapture/CapturePluginDSCapture.h @@ -34,13 +34,13 @@ */ #ifdef WIN32 - #ifdef ALVAR_Capture_Plugin_DSCapture_BUILD - #define ALVAR_CAPTURE_PLUGIN_DSCAPTURE_EXPORT __declspec(dllexport) - #else - #define ALVAR_CAPTURE_PLUGIN_DSCAPTURE_EXPORT __declspec(dllimport) - #endif +#ifdef ALVAR_Capture_Plugin_DSCapture_BUILD +#define ALVAR_CAPTURE_PLUGIN_DSCAPTURE_EXPORT __declspec(dllexport) #else - #define ALVAR_CAPTURE_PLUGIN_DSCAPTURE_EXPORT +#define ALVAR_CAPTURE_PLUGIN_DSCAPTURE_EXPORT __declspec(dllimport) +#endif +#else +#define ALVAR_CAPTURE_PLUGIN_DSCAPTURE_EXPORT #endif #include "Capture.h" @@ -48,13 +48,13 @@ #include "dscapture.h" -namespace alvar { - +namespace alvar +{ /** * \brief Dynamically loaded plugins namespace. */ -namespace plugins { - +namespace plugins +{ /** * \brief Implementation of Capture interface for DSCapture plugin. * @@ -62,52 +62,57 @@ namespace plugins { * the build by default. */ class ALVAR_CAPTURE_PLUGIN_DSCAPTURE_EXPORT CaptureDSCapture - : public alvar::Capture + : public alvar::Capture { - class VideoSampler : public IVideoCallback { - public: - CaptureDSCapture *parent; - VideoSampler(CaptureDSCapture *_parent) : parent(_parent) {} - void OnVideoSample(BYTE* pBuffer, DWORD dwDataLen, REFERENCE_TIME t_start) { - parent->OnVideoSample(pBuffer, dwDataLen, t_start); - } - bool operator=(const VideoSampler &vs) { return parent == vs.parent; } - } sampler; - friend class VideoSampler; - - void OnVideoSample(BYTE* pBuffer, DWORD dwDataLen, REFERENCE_TIME t_start); - + class VideoSampler : public IVideoCallback + { + public: + CaptureDSCapture* parent; + VideoSampler(CaptureDSCapture* _parent) : parent(_parent) + { + } + void OnVideoSample(BYTE* pBuffer, DWORD dwDataLen, REFERENCE_TIME t_start) + { + parent->OnVideoSample(pBuffer, dwDataLen, t_start); + } + bool operator=(const VideoSampler& vs) + { + return parent == vs.parent; + } + } sampler; + friend class VideoSampler; + + void OnVideoSample(BYTE* pBuffer, DWORD dwDataLen, REFERENCE_TIME t_start); + public: - - /** - * \brief Constructor. - * - * \param captureDevice Information of which camera to create. - */ - CaptureDSCapture(const CaptureDevice captureDevice); - /** - * \brief Destructor. - */ - ~CaptureDSCapture(); - bool start(); - void stop(); - IplImage *captureImage(); - bool showSettingsDialog(); - std::string SerializeId(); - bool Serialize(Serialization *serialization); - + /** + * \brief Constructor. + * + * \param captureDevice Information of which camera to create. + */ + CaptureDSCapture(const CaptureDevice captureDevice); + /** + * \brief Destructor. + */ + ~CaptureDSCapture(); + bool start(); + void stop(); + IplImage* captureImage(); + bool showSettingsDialog(); + std::string SerializeId(); + bool Serialize(Serialization* serialization); private: - CDSCapture *m_pDSCapture; - int m_nBpp; - int m_nVideo_x_res; - int m_nVideo_y_res; - BYTE *imgBuffer; - BYTE *imgBufferForCallback; - IplImage *mReturnFrame; - CRITICAL_SECTION crit; - unsigned int buffer_size; - HANDLE next_event; + CDSCapture* m_pDSCapture; + int m_nBpp; + int m_nVideo_x_res; + int m_nVideo_y_res; + BYTE* imgBuffer; + BYTE* imgBufferForCallback; + IplImage* mReturnFrame; + CRITICAL_SECTION crit; + unsigned int buffer_size; + HANDLE next_event; }; /** @@ -117,26 +122,27 @@ class ALVAR_CAPTURE_PLUGIN_DSCAPTURE_EXPORT CaptureDSCapture * the build by default. */ class ALVAR_CAPTURE_PLUGIN_DSCAPTURE_EXPORT CapturePluginDSCapture - : public alvar::CapturePlugin + : public alvar::CapturePlugin { public: - /** - * \brief Constructor. - * - * \param captureType A unique identifier for the capture plugin. - */ - CapturePluginDSCapture(const std::string &captureType); - /** - * \brief Destructor. - */ - ~CapturePluginDSCapture(); - CaptureDeviceVector enumerateDevices(); - Capture *createCapture(const CaptureDevice captureDevice); + /** + * \brief Constructor. + * + * \param captureType A unique identifier for the capture plugin. + */ + CapturePluginDSCapture(const std::string& captureType); + /** + * \brief Destructor. + */ + ~CapturePluginDSCapture(); + CaptureDeviceVector enumerateDevices(); + Capture* createCapture(const CaptureDevice captureDevice); }; -extern "C" ALVAR_CAPTURE_PLUGIN_DSCAPTURE_EXPORT void registerPlugin(const std::string &captureType, alvar::CapturePlugin *&capturePlugin); +extern "C" ALVAR_CAPTURE_PLUGIN_DSCAPTURE_EXPORT void registerPlugin( + const std::string& captureType, alvar::CapturePlugin*& capturePlugin); -} // namespace plugins -} // namespace alvar +} // namespace plugins +} // namespace alvar #endif diff --git a/ar_track_alvar/src/platform/capture_plugin_file/CapturePluginFile.cpp b/ar_track_alvar/src/platform/capture_plugin_file/CapturePluginFile.cpp index 94f55c1..0728f47 100644 --- a/ar_track_alvar/src/platform/capture_plugin_file/CapturePluginFile.cpp +++ b/ar_track_alvar/src/platform/capture_plugin_file/CapturePluginFile.cpp @@ -23,87 +23,92 @@ #include "CapturePluginFile.h" -namespace alvar { -namespace plugins { - +namespace alvar +{ +namespace plugins +{ CaptureFile::CaptureFile(const CaptureDevice captureDevice) - : Capture(captureDevice) - , mVideoCapture() - , mMatrix() - , mImage() + : Capture(captureDevice), mVideoCapture(), mMatrix(), mImage() { } CaptureFile::~CaptureFile() { - stop(); + stop(); } bool CaptureFile::start() { - if (isCapturing()) { - return isCapturing(); - } + if (isCapturing()) + { + return isCapturing(); + } - mVideoCapture.open(captureDevice().id().c_str()); - if (mVideoCapture.isOpened()) { - mXResolution = (int)mVideoCapture.get(CV_CAP_PROP_FRAME_WIDTH); - mYResolution = (int)mVideoCapture.get(CV_CAP_PROP_FRAME_HEIGHT); - mIsCapturing = true; - } + mVideoCapture.open(captureDevice().id().c_str()); + if (mVideoCapture.isOpened()) + { + mXResolution = (int)mVideoCapture.get(CV_CAP_PROP_FRAME_WIDTH); + mYResolution = (int)mVideoCapture.get(CV_CAP_PROP_FRAME_HEIGHT); + mIsCapturing = true; + } - return isCapturing(); + return isCapturing(); } void CaptureFile::stop() { - if (isCapturing()) { - mVideoCapture.release(); - mIsCapturing = false; - } + if (isCapturing()) + { + mVideoCapture.release(); + mIsCapturing = false; + } } -IplImage *CaptureFile::captureImage() +IplImage* CaptureFile::captureImage() { - if (!isCapturing()) { - return NULL; + if (!isCapturing()) + { + return NULL; + } + + if (!mVideoCapture.grab()) + { + // try to restart the capturing when end of file is reached + mVideoCapture.release(); + mVideoCapture.open(captureDevice().id().c_str()); + if (!mVideoCapture.isOpened()) + { + mIsCapturing = false; + return NULL; } - - if (!mVideoCapture.grab()) { - // try to restart the capturing when end of file is reached - mVideoCapture.release(); - mVideoCapture.open(captureDevice().id().c_str()); - if (!mVideoCapture.isOpened()) { - mIsCapturing = false; - return NULL; - } - if (!mVideoCapture.grab()) { - return NULL; - } + if (!mVideoCapture.grab()) + { + return NULL; } - mVideoCapture.retrieve(mMatrix); - mImage = mMatrix; - return &mImage; + } + mVideoCapture.retrieve(mMatrix); + mImage = mMatrix; + return &mImage; } bool CaptureFile::showSettingsDialog() { - // TODO: implement this method - return false; + // TODO: implement this method + return false; } std::string CaptureFile::SerializeId() { - return "CaptureFile"; + return "CaptureFile"; } -bool CaptureFile::Serialize(Serialization *serialization) +bool CaptureFile::Serialize(Serialization* serialization) { - return false; + return false; } -CapturePluginFile::CapturePluginFile(const std::string &captureType) - : CapturePlugin(captureType) +CapturePluginFile::CapturePluginFile(const std::string& captureType) + : CapturePlugin(captureType) { } @@ -113,19 +118,20 @@ CapturePluginFile::~CapturePluginFile() CapturePlugin::CaptureDeviceVector CapturePluginFile::enumerateDevices() { - CaptureDeviceVector devices; - return devices; + CaptureDeviceVector devices; + return devices; } -Capture *CapturePluginFile::createCapture(const CaptureDevice captureDevice) +Capture* CapturePluginFile::createCapture(const CaptureDevice captureDevice) { - return new CaptureFile(captureDevice); + return new CaptureFile(captureDevice); } -void registerPlugin(const std::string &captureType, alvar::CapturePlugin *&capturePlugin) +void registerPlugin(const std::string& captureType, + alvar::CapturePlugin*& capturePlugin) { - capturePlugin = new CapturePluginFile(captureType); + capturePlugin = new CapturePluginFile(captureType); } -} // namespace plugins -} // namespace alvar +} // namespace plugins +} // namespace alvar diff --git a/ar_track_alvar/src/platform/capture_plugin_file/CapturePluginFile.h b/ar_track_alvar/src/platform/capture_plugin_file/CapturePluginFile.h index 237afff..8314312 100644 --- a/ar_track_alvar/src/platform/capture_plugin_file/CapturePluginFile.h +++ b/ar_track_alvar/src/platform/capture_plugin_file/CapturePluginFile.h @@ -31,13 +31,13 @@ */ #ifdef WIN32 - #ifdef ALVAR_Capture_Plugin_File_BUILD - #define ALVAR_CAPTURE_PLUGIN_FILE_EXPORT __declspec(dllexport) - #else - #define ALVAR_CAPTURE_PLUGIN_FILE_EXPORT __declspec(dllimport) - #endif +#ifdef ALVAR_Capture_Plugin_File_BUILD +#define ALVAR_CAPTURE_PLUGIN_FILE_EXPORT __declspec(dllexport) #else - #define ALVAR_CAPTURE_PLUGIN_FILE_EXPORT +#define ALVAR_CAPTURE_PLUGIN_FILE_EXPORT __declspec(dllimport) +#endif +#else +#define ALVAR_CAPTURE_PLUGIN_FILE_EXPORT #endif #include "Capture.h" @@ -45,66 +45,67 @@ #include "highgui.h" -namespace alvar { - +namespace alvar +{ /** * \brief Dynamically loaded plugins namespace. */ -namespace plugins { - +namespace plugins +{ /** * \brief Implementation of Capture interface for File plugin. */ -class ALVAR_CAPTURE_PLUGIN_FILE_EXPORT CaptureFile - : public alvar::Capture +class ALVAR_CAPTURE_PLUGIN_FILE_EXPORT CaptureFile : public alvar::Capture { public: - /** - * \brief Constructor. - * - * \param captureDevice Information of which camera to create. - */ - CaptureFile(const CaptureDevice captureDevice); - /** - * \brief Destructor. - */ - ~CaptureFile(); - bool start(); - void stop(); - IplImage *captureImage(); - bool showSettingsDialog(); - std::string SerializeId(); - bool Serialize(Serialization *serialization); + /** + * \brief Constructor. + * + * \param captureDevice Information of which camera to create. + */ + CaptureFile(const CaptureDevice captureDevice); + /** + * \brief Destructor. + */ + ~CaptureFile(); + bool start(); + void stop(); + IplImage* captureImage(); + bool showSettingsDialog(); + std::string SerializeId(); + bool Serialize(Serialization* serialization); + private: - cv::VideoCapture mVideoCapture; - cv::Mat mMatrix; - IplImage mImage; + cv::VideoCapture mVideoCapture; + cv::Mat mMatrix; + IplImage mImage; }; /** * \brief Implementation of CapturePlugin interface for File plugin. */ class ALVAR_CAPTURE_PLUGIN_FILE_EXPORT CapturePluginFile - : public alvar::CapturePlugin + : public alvar::CapturePlugin { public: - /** - * \brief Constructor. - * - * \param captureType A unique identifier for the capture plugin. - */ - CapturePluginFile(const std::string &captureType); - /** - * \brief Destructor. - */ - ~CapturePluginFile(); - CaptureDeviceVector enumerateDevices(); - Capture *createCapture(const CaptureDevice captureDevice); + /** + * \brief Constructor. + * + * \param captureType A unique identifier for the capture plugin. + */ + CapturePluginFile(const std::string& captureType); + /** + * \brief Destructor. + */ + ~CapturePluginFile(); + CaptureDeviceVector enumerateDevices(); + Capture* createCapture(const CaptureDevice captureDevice); }; -extern "C" ALVAR_CAPTURE_PLUGIN_FILE_EXPORT void registerPlugin(const std::string &captureType, alvar::CapturePlugin *&capturePlugin); +extern "C" ALVAR_CAPTURE_PLUGIN_FILE_EXPORT void registerPlugin( + const std::string& captureType, alvar::CapturePlugin*& capturePlugin); -} // namespace plugins -} // namespace alvar +} // namespace plugins +} // namespace alvar #endif diff --git a/ar_track_alvar/src/platform/capture_plugin_highgui/CapturePluginHighgui.cpp b/ar_track_alvar/src/platform/capture_plugin_highgui/CapturePluginHighgui.cpp index e624569..1522b24 100644 --- a/ar_track_alvar/src/platform/capture_plugin_highgui/CapturePluginHighgui.cpp +++ b/ar_track_alvar/src/platform/capture_plugin_highgui/CapturePluginHighgui.cpp @@ -25,89 +25,94 @@ #include -namespace alvar { -namespace plugins { - +namespace alvar +{ +namespace plugins +{ CaptureHighgui::CaptureHighgui(const CaptureDevice captureDevice) - : Capture(captureDevice) - , mVideoCapture() - , mMatrix() - , mImage() + : Capture(captureDevice), mVideoCapture(), mMatrix(), mImage() { } CaptureHighgui::~CaptureHighgui() { - stop(); + stop(); } -void CaptureHighgui::setResolution(const unsigned long xResolution, const unsigned long yResolution) +void CaptureHighgui::setResolution(const unsigned long xResolution, + const unsigned long yResolution) { - if (mVideoCapture.isOpened()) { - mVideoCapture.set(CV_CAP_PROP_FRAME_WIDTH, xResolution); - mVideoCapture.set(CV_CAP_PROP_FRAME_HEIGHT, yResolution); - mXResolution = (int)mVideoCapture.get(CV_CAP_PROP_FRAME_WIDTH); - mYResolution = (int)mVideoCapture.get(CV_CAP_PROP_FRAME_HEIGHT); - } + if (mVideoCapture.isOpened()) + { + mVideoCapture.set(CV_CAP_PROP_FRAME_WIDTH, xResolution); + mVideoCapture.set(CV_CAP_PROP_FRAME_HEIGHT, yResolution); + mXResolution = (int)mVideoCapture.get(CV_CAP_PROP_FRAME_WIDTH); + mYResolution = (int)mVideoCapture.get(CV_CAP_PROP_FRAME_HEIGHT); + } } bool CaptureHighgui::start() { - if (isCapturing()) { - return isCapturing(); - } + if (isCapturing()) + { + return isCapturing(); + } - std::istringstream convert(captureDevice().id()); - int id; - convert >> id; + std::istringstream convert(captureDevice().id()); + int id; + convert >> id; - mVideoCapture.open(id); - if (mVideoCapture.isOpened()) { - mIsCapturing = true; - } + mVideoCapture.open(id); + if (mVideoCapture.isOpened()) + { + mIsCapturing = true; + } - return isCapturing(); + return isCapturing(); } void CaptureHighgui::stop() { - if (isCapturing()) { - mVideoCapture.release(); - mIsCapturing = false; - } + if (isCapturing()) + { + mVideoCapture.release(); + mIsCapturing = false; + } } -IplImage *CaptureHighgui::captureImage() +IplImage* CaptureHighgui::captureImage() { - if (!isCapturing()) { - return NULL; - } - if (!mVideoCapture.grab()) { - return NULL; - } - mVideoCapture.retrieve(mMatrix); - mImage = mMatrix; - return &mImage; + if (!isCapturing()) + { + return NULL; + } + if (!mVideoCapture.grab()) + { + return NULL; + } + mVideoCapture.retrieve(mMatrix); + mImage = mMatrix; + return &mImage; } bool CaptureHighgui::showSettingsDialog() { - // TODO: implement this method - return false; + // TODO: implement this method + return false; } std::string CaptureHighgui::SerializeId() { - return "CaptureHighgui"; + return "CaptureHighgui"; } -bool CaptureHighgui::Serialize(Serialization *serialization) +bool CaptureHighgui::Serialize(Serialization* serialization) { - return false; + return false; } -CapturePluginHighgui::CapturePluginHighgui(const std::string &captureType) - : CapturePlugin(captureType) +CapturePluginHighgui::CapturePluginHighgui(const std::string& captureType) + : CapturePlugin(captureType) { } @@ -117,48 +122,54 @@ CapturePluginHighgui::~CapturePluginHighgui() CapturePlugin::CaptureDeviceVector CapturePluginHighgui::enumerateDevices() { - CaptureDeviceVector devices; - - bool loop = true; - int id = 0; - cv::VideoCapture videoCapture; - - while (loop) { - std::stringstream convert; - convert << id; - CaptureDevice captureDevice(mCaptureType, convert.str()); - - videoCapture.open(id); - if (videoCapture.isOpened()) { - int width = (int)videoCapture.get(CV_CAP_PROP_FRAME_WIDTH); - int height = (int)videoCapture.get(CV_CAP_PROP_FRAME_HEIGHT); - if (width > 0 && height > 0) { - devices.push_back(captureDevice); - } - else { - loop = false; - } - } - else { - loop = false; - } - - id++; + CaptureDeviceVector devices; + + bool loop = true; + int id = 0; + cv::VideoCapture videoCapture; + + while (loop) + { + std::stringstream convert; + convert << id; + CaptureDevice captureDevice(mCaptureType, convert.str()); + + videoCapture.open(id); + if (videoCapture.isOpened()) + { + int width = (int)videoCapture.get(CV_CAP_PROP_FRAME_WIDTH); + int height = (int)videoCapture.get(CV_CAP_PROP_FRAME_HEIGHT); + if (width > 0 && height > 0) + { + devices.push_back(captureDevice); + } + else + { + loop = false; + } + } + else + { + loop = false; } - videoCapture.release(); - return devices; + id++; + } + videoCapture.release(); + + return devices; } -Capture *CapturePluginHighgui::createCapture(const CaptureDevice captureDevice) +Capture* CapturePluginHighgui::createCapture(const CaptureDevice captureDevice) { - return new CaptureHighgui(captureDevice); + return new CaptureHighgui(captureDevice); } -void registerPlugin(const std::string &captureType, alvar::CapturePlugin *&capturePlugin) +void registerPlugin(const std::string& captureType, + alvar::CapturePlugin*& capturePlugin) { - capturePlugin = new CapturePluginHighgui(captureType); + capturePlugin = new CapturePluginHighgui(captureType); } -} // namespace plugins -} // namespace alvar +} // namespace plugins +} // namespace alvar diff --git a/ar_track_alvar/src/platform/capture_plugin_highgui/CapturePluginHighgui.h b/ar_track_alvar/src/platform/capture_plugin_highgui/CapturePluginHighgui.h index b215036..721204d 100644 --- a/ar_track_alvar/src/platform/capture_plugin_highgui/CapturePluginHighgui.h +++ b/ar_track_alvar/src/platform/capture_plugin_highgui/CapturePluginHighgui.h @@ -31,13 +31,13 @@ */ #ifdef WIN32 - #ifdef ALVAR_Capture_Plugin_Highgui_BUILD - #define ALVAR_CAPTURE_PLUGIN_HIGHGUI_EXPORT __declspec(dllexport) - #else - #define ALVAR_CAPTURE_PLUGIN_HIGHGUI_EXPORT __declspec(dllimport) - #endif +#ifdef ALVAR_Capture_Plugin_Highgui_BUILD +#define ALVAR_CAPTURE_PLUGIN_HIGHGUI_EXPORT __declspec(dllexport) #else - #define ALVAR_CAPTURE_PLUGIN_HIGHGUI_EXPORT +#define ALVAR_CAPTURE_PLUGIN_HIGHGUI_EXPORT __declspec(dllimport) +#endif +#else +#define ALVAR_CAPTURE_PLUGIN_HIGHGUI_EXPORT #endif #include "Capture.h" @@ -45,67 +45,69 @@ #include "highgui.h" -namespace alvar { - +namespace alvar +{ /** * \brief Dynamically loaded plugins namespace. */ -namespace plugins { - +namespace plugins +{ /** * \brief Implementation of Capture interface for Highgui plugin. */ -class ALVAR_CAPTURE_PLUGIN_HIGHGUI_EXPORT CaptureHighgui - : public alvar::Capture +class ALVAR_CAPTURE_PLUGIN_HIGHGUI_EXPORT CaptureHighgui : public alvar::Capture { public: - /** - * \brief Constructor. - * - * \param captureDevice Information of which camera to create. - */ - CaptureHighgui(const CaptureDevice captureDevice); - /** - * \brief Destructor. - */ - ~CaptureHighgui(); - void setResolution(const unsigned long xResolution, const unsigned long yResolution); - bool start(); - void stop(); - IplImage *captureImage(); - bool showSettingsDialog(); - std::string SerializeId(); - bool Serialize(Serialization *serialization); + /** + * \brief Constructor. + * + * \param captureDevice Information of which camera to create. + */ + CaptureHighgui(const CaptureDevice captureDevice); + /** + * \brief Destructor. + */ + ~CaptureHighgui(); + void setResolution(const unsigned long xResolution, + const unsigned long yResolution); + bool start(); + void stop(); + IplImage* captureImage(); + bool showSettingsDialog(); + std::string SerializeId(); + bool Serialize(Serialization* serialization); + private: - cv::VideoCapture mVideoCapture; - cv::Mat mMatrix; - IplImage mImage; + cv::VideoCapture mVideoCapture; + cv::Mat mMatrix; + IplImage mImage; }; /** * \brief Implementation of CapturePlugin interface for Highgui plugin. */ class ALVAR_CAPTURE_PLUGIN_HIGHGUI_EXPORT CapturePluginHighgui - : public alvar::CapturePlugin + : public alvar::CapturePlugin { public: - /** - * \brief Constructor. - * - * \param captureType A unique identifier for the capture plugin. - */ - CapturePluginHighgui(const std::string &captureType); - /** - * \brief Destructor. - */ - ~CapturePluginHighgui(); - CaptureDeviceVector enumerateDevices(); - Capture *createCapture(const CaptureDevice captureDevice); + /** + * \brief Constructor. + * + * \param captureType A unique identifier for the capture plugin. + */ + CapturePluginHighgui(const std::string& captureType); + /** + * \brief Destructor. + */ + ~CapturePluginHighgui(); + CaptureDeviceVector enumerateDevices(); + Capture* createCapture(const CaptureDevice captureDevice); }; -extern "C" ALVAR_CAPTURE_PLUGIN_HIGHGUI_EXPORT void registerPlugin(const std::string &captureType, alvar::CapturePlugin *&capturePlugin); +extern "C" ALVAR_CAPTURE_PLUGIN_HIGHGUI_EXPORT void registerPlugin( + const std::string& captureType, alvar::CapturePlugin*& capturePlugin); -} // namespace plugins -} // namespace alvar +} // namespace plugins +} // namespace alvar #endif diff --git a/ar_track_alvar/src/platform/capture_plugin_ptgrey/CapturePluginPtgrey.cpp b/ar_track_alvar/src/platform/capture_plugin_ptgrey/CapturePluginPtgrey.cpp index 606515a..8a67ee0 100644 --- a/ar_track_alvar/src/platform/capture_plugin_ptgrey/CapturePluginPtgrey.cpp +++ b/ar_track_alvar/src/platform/capture_plugin_ptgrey/CapturePluginPtgrey.cpp @@ -31,148 +31,172 @@ using namespace std; -namespace alvar { -namespace plugins { - +namespace alvar +{ +namespace plugins +{ CapturePtgrey::CapturePtgrey(const CaptureDevice captureDevice) - : Capture(captureDevice) - , mCamera(new FlyCapture2::Camera) - , mImage(new FlyCapture2::Image) - , mChannels(-1) - , mReturnFrame(NULL) + : Capture(captureDevice) + , mCamera(new FlyCapture2::Camera) + , mImage(new FlyCapture2::Image) + , mChannels(-1) + , mReturnFrame(NULL) { } CapturePtgrey::~CapturePtgrey() { - stop(); - delete mCamera; - delete mImage; + stop(); + delete mCamera; + delete mImage; } bool CapturePtgrey::start() { - if (isCapturing()) { - return isCapturing(); - } - - stringstream id(captureDevice().id()); - id.setf(ios_base::hex, ios_base::basefield); - id >> mGUID.value[0]; id.get(); - id >> mGUID.value[1]; id.get(); - id >> mGUID.value[2]; id.get(); - id >> mGUID.value[3]; - - if (mCamera->Connect(&mGUID) != FlyCapture2::PGRERROR_OK) { - return false; - } - - FlyCapture2::VideoMode videoMode; - FlyCapture2::FrameRate frameRate; - if (mCamera->GetVideoModeAndFrameRate (&videoMode, &frameRate) != FlyCapture2::PGRERROR_OK) { - return false; - } - - if (videoMode == FlyCapture2::VIDEOMODE_640x480RGB) { - mChannels = 3; - mXResolution = 640; - mYResolution = 480; - - } else if (videoMode == FlyCapture2::VIDEOMODE_640x480Y8) { - mChannels = 1; - mXResolution = 640; - mYResolution = 480; - - } else if (videoMode == FlyCapture2::VIDEOMODE_800x600RGB) { - mChannels = 3; - mXResolution = 800; - mYResolution = 600; - - } else if (videoMode == FlyCapture2::VIDEOMODE_800x600Y8) { - mChannels = 1; - mXResolution = 800; - mYResolution = 600; - - } else if (videoMode == FlyCapture2::VIDEOMODE_1024x768RGB) { - mChannels = 3; - mXResolution = 1024; - mYResolution = 768; - - } else if (videoMode == FlyCapture2::VIDEOMODE_1024x768Y8) { - mChannels = 1; - mXResolution = 1024; - mYResolution = 768; - - } else if (videoMode == FlyCapture2::VIDEOMODE_1280x960RGB) { - mChannels = 3; - mXResolution = 1280; - mYResolution = 960; - - } else if (videoMode == FlyCapture2::VIDEOMODE_1280x960Y8) { - mChannels = 1; - mXResolution = 1280; - mYResolution = 960; - - } else if (videoMode == FlyCapture2::VIDEOMODE_1600x1200RGB) { - mChannels = 3; - mXResolution = 1600; - mYResolution = 1200; - - } else if (videoMode == FlyCapture2::VIDEOMODE_1600x1200Y8) { - mChannels = 1; - mXResolution = 1600; - mYResolution = 1200; - - } else { - return false; - } - - mReturnFrame = cvCreateImage(cvSize(mXResolution, mYResolution), IPL_DEPTH_8U, mChannels); - if (mCamera->StartCapture() != FlyCapture2::PGRERROR_OK) { - return false; - } - mIsCapturing = true; + if (isCapturing()) + { return isCapturing(); + } + + stringstream id(captureDevice().id()); + id.setf(ios_base::hex, ios_base::basefield); + id >> mGUID.value[0]; + id.get(); + id >> mGUID.value[1]; + id.get(); + id >> mGUID.value[2]; + id.get(); + id >> mGUID.value[3]; + + if (mCamera->Connect(&mGUID) != FlyCapture2::PGRERROR_OK) + { + return false; + } + + FlyCapture2::VideoMode videoMode; + FlyCapture2::FrameRate frameRate; + if (mCamera->GetVideoModeAndFrameRate(&videoMode, &frameRate) != + FlyCapture2::PGRERROR_OK) + { + return false; + } + + if (videoMode == FlyCapture2::VIDEOMODE_640x480RGB) + { + mChannels = 3; + mXResolution = 640; + mYResolution = 480; + } + else if (videoMode == FlyCapture2::VIDEOMODE_640x480Y8) + { + mChannels = 1; + mXResolution = 640; + mYResolution = 480; + } + else if (videoMode == FlyCapture2::VIDEOMODE_800x600RGB) + { + mChannels = 3; + mXResolution = 800; + mYResolution = 600; + } + else if (videoMode == FlyCapture2::VIDEOMODE_800x600Y8) + { + mChannels = 1; + mXResolution = 800; + mYResolution = 600; + } + else if (videoMode == FlyCapture2::VIDEOMODE_1024x768RGB) + { + mChannels = 3; + mXResolution = 1024; + mYResolution = 768; + } + else if (videoMode == FlyCapture2::VIDEOMODE_1024x768Y8) + { + mChannels = 1; + mXResolution = 1024; + mYResolution = 768; + } + else if (videoMode == FlyCapture2::VIDEOMODE_1280x960RGB) + { + mChannels = 3; + mXResolution = 1280; + mYResolution = 960; + } + else if (videoMode == FlyCapture2::VIDEOMODE_1280x960Y8) + { + mChannels = 1; + mXResolution = 1280; + mYResolution = 960; + } + else if (videoMode == FlyCapture2::VIDEOMODE_1600x1200RGB) + { + mChannels = 3; + mXResolution = 1600; + mYResolution = 1200; + } + else if (videoMode == FlyCapture2::VIDEOMODE_1600x1200Y8) + { + mChannels = 1; + mXResolution = 1600; + mYResolution = 1200; + } + else + { + return false; + } + + mReturnFrame = cvCreateImage(cvSize(mXResolution, mYResolution), IPL_DEPTH_8U, + mChannels); + if (mCamera->StartCapture() != FlyCapture2::PGRERROR_OK) + { + return false; + } + mIsCapturing = true; + return isCapturing(); } void CapturePtgrey::stop() { - if (isCapturing()) { - mCamera->StopCapture(); - cvReleaseImage(&mReturnFrame); - } + if (isCapturing()) + { + mCamera->StopCapture(); + cvReleaseImage(&mReturnFrame); + } } -IplImage *CapturePtgrey::captureImage() +IplImage* CapturePtgrey::captureImage() { - if (!isCapturing()) { - return NULL; - } - - if (mCamera->RetrieveBuffer(mImage) == FlyCapture2::PGRERROR_OK) { - unsigned long length = mReturnFrame->widthStep * mYResolution; - memcpy(mReturnFrame->imageData, mImage->GetData(), length); - } - return mReturnFrame; + if (!isCapturing()) + { + return NULL; + } + + if (mCamera->RetrieveBuffer(mImage) == FlyCapture2::PGRERROR_OK) + { + unsigned long length = mReturnFrame->widthStep * mYResolution; + memcpy(mReturnFrame->imageData, mImage->GetData(), length); + } + return mReturnFrame; } bool CapturePtgrey::showSettingsDialog() { - return false; + return false; } string CapturePtgrey::SerializeId() { - return "CapturePtgrey"; + return "CapturePtgrey"; } -bool CapturePtgrey::Serialize(Serialization *serialization) +bool CapturePtgrey::Serialize(Serialization* serialization) { - return false; + return false; } -CapturePluginPtgrey::CapturePluginPtgrey(const string &captureType) - : CapturePlugin(captureType) +CapturePluginPtgrey::CapturePluginPtgrey(const string& captureType) + : CapturePlugin(captureType) { } @@ -182,47 +206,52 @@ CapturePluginPtgrey::~CapturePluginPtgrey() CapturePlugin::CaptureDeviceVector CapturePluginPtgrey::enumerateDevices() { - CaptureDeviceVector devices; - - FlyCapture2::BusManager bus; - if (bus.RescanBus() != FlyCapture2::PGRERROR_OK) { - return devices; - } - - unsigned int numberCameras = 0; - bus.GetNumOfCameras(&numberCameras); - - for (unsigned int i = 0; i < numberCameras; i++) { - FlyCapture2::PGRGuid guid; - bus.GetCameraFromIndex(i, &guid); - stringstream convert; - convert << hex << guid.value[0]; - convert << "_" << hex << guid.value[1]; - convert << "_" << hex << guid.value[2]; - convert << "_" << hex << guid.value[3]; - stringstream description; - FlyCapture2::Camera camera; - if (camera.Connect(&guid) != FlyCapture2::PGRERROR_OK) continue; - FlyCapture2::CameraInfo info; - if (camera.GetCameraInfo (&info) != FlyCapture2::PGRERROR_OK) continue; - description << info.vendorName << " "; - description << info.modelName; - CaptureDevice captureDevice(mCaptureType, convert.str(), description.str()); - devices.push_back(captureDevice); - } + CaptureDeviceVector devices; + FlyCapture2::BusManager bus; + if (bus.RescanBus() != FlyCapture2::PGRERROR_OK) + { return devices; + } + + unsigned int numberCameras = 0; + bus.GetNumOfCameras(&numberCameras); + + for (unsigned int i = 0; i < numberCameras; i++) + { + FlyCapture2::PGRGuid guid; + bus.GetCameraFromIndex(i, &guid); + stringstream convert; + convert << hex << guid.value[0]; + convert << "_" << hex << guid.value[1]; + convert << "_" << hex << guid.value[2]; + convert << "_" << hex << guid.value[3]; + stringstream description; + FlyCapture2::Camera camera; + if (camera.Connect(&guid) != FlyCapture2::PGRERROR_OK) + continue; + FlyCapture2::CameraInfo info; + if (camera.GetCameraInfo(&info) != FlyCapture2::PGRERROR_OK) + continue; + description << info.vendorName << " "; + description << info.modelName; + CaptureDevice captureDevice(mCaptureType, convert.str(), description.str()); + devices.push_back(captureDevice); + } + + return devices; } -Capture *CapturePluginPtgrey::createCapture(const CaptureDevice captureDevice) +Capture* CapturePluginPtgrey::createCapture(const CaptureDevice captureDevice) { - return new CapturePtgrey(captureDevice); + return new CapturePtgrey(captureDevice); } -void registerPlugin(const string &captureType, alvar::CapturePlugin *&capturePlugin) +void registerPlugin(const string& captureType, + alvar::CapturePlugin*& capturePlugin) { - capturePlugin = new CapturePluginPtgrey(captureType); + capturePlugin = new CapturePluginPtgrey(captureType); } -} // namespace plugins -} // namespace alvar +} // namespace plugins +} // namespace alvar diff --git a/ar_track_alvar/src/platform/capture_plugin_ptgrey/CapturePluginPtgrey.h b/ar_track_alvar/src/platform/capture_plugin_ptgrey/CapturePluginPtgrey.h index 17a583a..347a34f 100644 --- a/ar_track_alvar/src/platform/capture_plugin_ptgrey/CapturePluginPtgrey.h +++ b/ar_track_alvar/src/platform/capture_plugin_ptgrey/CapturePluginPtgrey.h @@ -34,13 +34,13 @@ */ #ifdef WIN32 - #ifdef ALVAR_Capture_Plugin_Ptgrey_BUILD - #define ALVAR_CAPTURE_PLUGIN_PTGREY_EXPORT __declspec(dllexport) - #else - #define ALVAR_CAPTURE_PLUGIN_PTGREY_EXPORT __declspec(dllimport) - #endif +#ifdef ALVAR_Capture_Plugin_Ptgrey_BUILD +#define ALVAR_CAPTURE_PLUGIN_PTGREY_EXPORT __declspec(dllexport) #else - #define ALVAR_CAPTURE_PLUGIN_PTGREY_EXPORT +#define ALVAR_CAPTURE_PLUGIN_PTGREY_EXPORT __declspec(dllimport) +#endif +#else +#define ALVAR_CAPTURE_PLUGIN_PTGREY_EXPORT #endif #include "Capture.h" @@ -49,50 +49,51 @@ #include "FlyCapture2Defs.h" // Forward declaration for PTGrey specific classes. -namespace FlyCapture2 { - class Camera; - class Image; -} +namespace FlyCapture2 +{ +class Camera; +class Image; +} // namespace FlyCapture2 -namespace alvar { - +namespace alvar +{ /** * \brief Dynamically loaded plugins namespace. */ -namespace plugins { - +namespace plugins +{ /** * \brief Implementation of Capture interface for Ptgrey plugin. * * \note The PointGrey plugin is currently experimental and not included in * the build by default. */ -class ALVAR_CAPTURE_PLUGIN_PTGREY_EXPORT CapturePtgrey - : public alvar::Capture +class ALVAR_CAPTURE_PLUGIN_PTGREY_EXPORT CapturePtgrey : public alvar::Capture { public: - /** - * \brief Constructor. - * - * \param captureDevice Information of which camera to create. - */ - CapturePtgrey(const CaptureDevice captureDevice); - /** - * \brief Destructor. - */ - ~CapturePtgrey(); - bool start(); - void stop(); - IplImage *captureImage(); - bool showSettingsDialog(); - std::string SerializeId(); - bool Serialize(Serialization *serialization); + /** + * \brief Constructor. + * + * \param captureDevice Information of which camera to create. + */ + CapturePtgrey(const CaptureDevice captureDevice); + /** + * \brief Destructor. + */ + ~CapturePtgrey(); + bool start(); + void stop(); + IplImage* captureImage(); + bool showSettingsDialog(); + std::string SerializeId(); + bool Serialize(Serialization* serialization); + private: - FlyCapture2::Camera *mCamera; - FlyCapture2::Image *mImage; - FlyCapture2::PGRGuid mGUID; - int mChannels; - IplImage *mReturnFrame; + FlyCapture2::Camera* mCamera; + FlyCapture2::Image* mImage; + FlyCapture2::PGRGuid mGUID; + int mChannels; + IplImage* mReturnFrame; }; /** @@ -102,26 +103,27 @@ class ALVAR_CAPTURE_PLUGIN_PTGREY_EXPORT CapturePtgrey * the build by default. */ class ALVAR_CAPTURE_PLUGIN_PTGREY_EXPORT CapturePluginPtgrey - : public alvar::CapturePlugin + : public alvar::CapturePlugin { public: - /** - * \brief Constructor. - * - * \param captureType A unique identifier for the capture plugin. - */ - CapturePluginPtgrey(const std::string &captureType); - /** - * \brief Destructor. - */ - ~CapturePluginPtgrey(); - CaptureDeviceVector enumerateDevices(); - Capture *createCapture(const CaptureDevice captureDevice); + /** + * \brief Constructor. + * + * \param captureType A unique identifier for the capture plugin. + */ + CapturePluginPtgrey(const std::string& captureType); + /** + * \brief Destructor. + */ + ~CapturePluginPtgrey(); + CaptureDeviceVector enumerateDevices(); + Capture* createCapture(const CaptureDevice captureDevice); }; -extern "C" ALVAR_CAPTURE_PLUGIN_PTGREY_EXPORT void registerPlugin(const std::string &captureType, alvar::CapturePlugin *&capturePlugin); +extern "C" ALVAR_CAPTURE_PLUGIN_PTGREY_EXPORT void registerPlugin( + const std::string& captureType, alvar::CapturePlugin*& capturePlugin); -} // namespace plugins -} // namespace alvar +} // namespace plugins +} // namespace alvar #endif diff --git a/ar_track_alvar/test/marker_abstract.launch.xml b/ar_track_alvar/test/marker_abstract.launch.xml deleted file mode 100644 index 63fdc9d..0000000 --- a/ar_track_alvar/test/marker_abstract.launch.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/ar_track_alvar/test/marker_arg_config-basic.test b/ar_track_alvar/test/marker_arg_config-basic.test deleted file mode 100644 index a98553e..0000000 --- a/ar_track_alvar/test/marker_arg_config-basic.test +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - - diff --git a/ar_track_alvar/test/marker_arg_config-full.test b/ar_track_alvar/test/marker_arg_config-full.test deleted file mode 100644 index c3b9602..0000000 --- a/ar_track_alvar/test/marker_arg_config-full.test +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - diff --git a/ar_track_alvar/test/marker_param_config-basic.test b/ar_track_alvar/test/marker_param_config-basic.test deleted file mode 100644 index 6da4cd3..0000000 --- a/ar_track_alvar/test/marker_param_config-basic.test +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - diff --git a/ar_track_alvar/test/marker_param_config-full.test b/ar_track_alvar/test/marker_param_config-full.test deleted file mode 100644 index 937a103..0000000 --- a/ar_track_alvar/test/marker_param_config-full.test +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - diff --git a/ar_track_alvar/test/points b/ar_track_alvar/test/points deleted file mode 100644 index 3a097a0..0000000 --- a/ar_track_alvar/test/points +++ /dev/null @@ -1,81 +0,0 @@ -0.033776 0.107919 0.865000 -0.035506 0.113123 0.867000 -0.037157 0.118077 0.867000 -0.038719 0.122748 0.865000 -0.040367 0.127690 0.865000 -0.028900 0.109820 0.867000 -0.030481 0.114510 0.865000 -0.032129 0.119452 0.865000 -0.033776 0.124395 0.865000 -0.035424 0.129338 0.865000 -0.025597 0.111471 0.867000 -0.027186 0.116157 0.865000 -0.028833 0.121100 0.865000 -0.030481 0.126043 0.865000 -0.032129 0.129338 0.865000 -0.020643 0.113123 0.867000 -0.022294 0.118077 0.867000 -0.023890 0.122748 0.865000 -0.025538 0.127690 0.865000 -0.027091 0.130531 0.862000 -0.015652 0.114510 0.865000 -0.017300 0.119452 0.865000 -0.018882 0.123964 0.862000 -0.020595 0.127690 0.865000 -0.022166 0.132173 0.862000 -0.007414 0.117805 0.865000 -0.043763 0.104866 0.867000 -0.012386 0.104866 0.867000 -0.025450 0.142025 0.862000 -0.009062 0.122748 0.865000 -0.045414 0.109820 0.867000 -0.017380 0.103452 0.869000 -0.030375 0.140383 0.862000 -0.010710 0.126043 0.865000 -0.047066 0.114774 0.867000 -0.022346 0.103452 0.869000 -0.035301 0.138741 0.862000 -0.012314 0.130531 0.862000 -0.048605 0.119452 0.865000 -0.025656 0.101797 0.869000 -0.038719 0.137576 0.865000 -0.013956 0.135457 0.862000 -0.050252 0.124395 0.865000 -0.030622 0.100142 0.869000 -0.043662 0.135929 0.865000 -0.004119 0.107919 0.865000 -0.040553 0.096831 0.869000 -0.053362 0.132173 0.862000 -0.017200 0.144971 0.860000 -0.005767 0.112862 0.865000 -0.042111 0.101563 0.867000 -0.051720 0.128890 0.862000 -0.015562 0.140057 0.860000 -0.010710 0.116157 0.865000 -0.038719 0.106271 0.865000 -0.014005 0.109567 0.865000 -0.023808 0.137099 0.862000 -0.012357 0.121100 0.865000 -0.040460 0.111471 0.867000 -0.018991 0.108169 0.867000 -0.028833 0.135929 0.865000 -0.014005 0.126043 0.865000 -0.042111 0.116426 0.867000 -0.024001 0.106763 0.869000 -0.033659 0.133815 0.862000 -0.015598 0.128890 0.862000 -0.043662 0.121100 0.865000 -0.027249 0.104866 0.867000 -0.036943 0.132173 0.862000 -0.017240 0.133815 0.862000 -0.045310 0.126043 0.865000 -0.032203 0.103214 0.867000 -0.042014 0.130986 0.865000 -0.009083 0.106517 0.867000 -0.035588 0.098487 0.869000 -0.048436 0.133815 0.862000 -0.020476 0.143333 0.860000 -0.009083 0.111471 0.867000 -0.037071 0.102976 0.865000 -0.046957 0.130986 0.865000 -0.018882 0.138741 0.862000 \ No newline at end of file diff --git a/ar_track_alvar/test/resources/alvar-marker-pose-test.bag b/ar_track_alvar/test/resources/alvar-marker-pose-test.bag new file mode 100644 index 0000000..2adfb44 Binary files /dev/null and b/ar_track_alvar/test/resources/alvar-marker-pose-test.bag differ diff --git a/ar_track_alvar/test/test_ar.py b/ar_track_alvar/test/test_ar.py deleted file mode 100755 index 287ba86..0000000 --- a/ar_track_alvar/test/test_ar.py +++ /dev/null @@ -1,105 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Software License Agreement (BSD License) -# -# Copyright (c) 2017, TORK (Tokyo Opensource Robotics Kyokai Association) -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of Tokyo Opensource Robotics Kyokai Association -# nor the names of its contributors may be used to endorse or promote -# products derived from this software without specific prior written -# permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -import numpy -import math -import unittest - -from geometry_msgs.msg import Pose -import rospy -import tf -from tf.transformations import quaternion_from_euler - -class TestArAlvarRos(unittest.TestCase): - ''' - Tests are based on http://download.ros.org/data/ar_track_alvar/ar_track_alvar_4markers_tork_2017-01-30-15-03-19.bag - ''' - - def setUp(self): - rospy.init_node('test_armarker_ros_detect') - self.tflistener = tf.TransformListener() - - def tearDown(self): - True # TODO impl something meaningful - - def _lookup_tf(self, origin, target): - ''' - DEPRECATED: This does not return meaningful values for some reason. - @param origin: lookupTransform's 1st argument. - @param target: lookupTransform's 2nd argument. - @rtype: ([str], [str]) - @return: Translation and rotation. - ''' - while not rospy.is_shutdown(): - try: - (trans,rot) = self.tflistener.lookupTransform(origin, target, rospy.Time(0)) - break - except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as e: - rospy.logerr(str(e) + ' target={}'.format(target)) - continue - return (trans, rot) - - def test_markers(self): - ''' - Aiming the real output taken from a bag file. - ''' - # The values in this list are ordered in the marker's number. - tf_expected = [[[0.04464459977845401, 0.05341412598745035, 0.26796900627543024], [0.6726999185589797, 0.7391474391806558, -0.01739267319701629, -0.028868280906256056]], - [[-0.04805772245624329, 0.039528315926071665, 0.26775882622136327], [0.48207151562664247, 0.8758763282975102, -0.016363763970395625, -0.013414118615296202]], - [[0.007233278235745441, 0.015615692018491452, 0.26619586686955365], [0.08546919545682985, 0.9959809257461487, 0.00424040439, -0.02677659572186436]], - [[0.06223014382428272, 0.014613815037010106, 0.26226145707174475], [-0.46400320825216246, 0.8850390875261293, 0.032644264656690035, -0.018471282241381157]]] - for i in range (0, len(tf_expected)): - while not rospy.is_shutdown(): - try: - target_frame = '/ar_marker_{}'.format(i) - (trans, rot) = self.tflistener.lookupTransform('/camera', target_frame, rospy.Time(0)) - break - except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as e: - rospy.logerr(str(e) + ' target_frame={}'.format(target_frame)) - continue - # Compare each translation element (x, y, z) - for v_ret, v_expected in zip(trans, tf_expected[i][0]): - # Given that sigfig ignores the leading zero, we only compare the first sigfig. - numpy.testing.assert_approx_equal( - v_ret, v_expected, significant=1) - # Compare each orientation element (x, y, z, w) - for v_ret, v_expected in zip(rot, tf_expected[i][1]): - numpy.testing.assert_approx_equal( - v_ret, v_expected, significant=1) - -if __name__ == '__main__': - import rostest - rostest.rosrun('ar_track_alvar', 'test_ar_alvar_ros', TestArAlvarRos) diff --git a/ar_track_alvar/test/test_ar_legacy.py b/ar_track_alvar/test/test_ar_legacy.py new file mode 100644 index 0000000..c4b5262 --- /dev/null +++ b/ar_track_alvar/test/test_ar_legacy.py @@ -0,0 +1,118 @@ +from rclpy.duration import Duration + +import os +import sys +import time +import unittest +import numpy as np + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +from launch_ros.actions import Node +import launch_testing +import launch + +import pytest +import rclpy + +from stereo_msgs.msg import DisparityImage +from sensor_msgs.msg import Image +from ar_track_alvar_msgs.msg import AlvarMarkers +from tf2_ros import TransformBroadcaster, TransformListener, TransformStamped, Buffer, LookupException, ConnectivityException, ExtrapolationException + +# Test Parameters +bag_name = os.path.join(os.path.dirname(__file__),'resources','alvar-marker-pose-test.bag') +@pytest.mark.rostest +def generate_test_description(): + + return LaunchDescription([ + # Launch bag with four markers + launch.actions.ExecuteProcess( + cmd=['ros2', 'bag', 'play', '--loop','-s',"rosbag_v2",bag_name], + output='screen' + ), + + # Invidividual Marker Detector + Node( + package='ar_track_alvar', + executable='individualMarkersNoKinect', + name='individual_markers', + remappings=[ + ("camera_image", "camera/image_raw"), + ("camera_info", "camera/camera_info") + ], + parameters=[ + {"marker_size":2.3}, + {"max_new_marker_error":0.08}, + {"max_track_error":0.2}, + {"output_frame":"camera"}, + {"max_frequency":100.0}, + {"marker_margin":2}, + {"marker_resolution":5} + ], + output='screen' + ), + launch_testing.actions.ReadyToTest(), + ]) + +class TestArTrack(unittest.TestCase): + @classmethod + def setUpClass(cls): + rclpy.init() + cls.tfBuffer = Buffer() + cls.node = rclpy.create_node('test_ar_track_alvar_markers') + cls.tflistener = TransformListener(cls.tfBuffer, cls.node) + + @classmethod + def tearDownClass(cls): + cls.node.destroy_node() + rclpy.shutdown() + + + def test_marker_pose_estimation(self): + transforms={} + start_time = time.time() + count = 0 + while rclpy.ok() and (time.time() - start_time) < 120: + rclpy.spin_once(self.node, timeout_sec=0.1) + + dur = Duration() + dur.sec = 40 + dur.nsec = 0 + + #The values in this list are ordered in the marker's number. + tf_expected = [[[0.04464459977845401, 0.05341412598745035, 0.26796900627543024], [0.6726999185589797, 0.7391474391806558, -0.01739267319701629, -0.028868280906256056]], + [[-0.04805772245624329, 0.039528315926071665, 0.26775882622136327], [0.48207151562664247, 0.8758763282975102, -0.016363763970395625, -0.013414118615296202]], + [[0.007233278235745441, 0.015615692018491452, 0.26619586686955365], [0.08546919545682985, 0.9959809257461487, 0.00424040439, -0.02677659572186436]], + [[0.06223014382428272, 0.014613815037010106, 0.26226145707174475], [-0.46400320825216246, 0.8850390875261293, 0.032644264656690035, -0.018471282241381157]]] + + for i in range (0, len(tf_expected)): + try: + target_frame = 'ar_marker_{}'.format(i) + time_stamp = rclpy.time.Time(seconds=0, nanoseconds=0).to_msg() + obj = self.tfBuffer.lookup_transform('camera', target_frame,time=time_stamp) + trans = obj.transform.translation + rot = obj.transform.rotation + + item = [[trans.x,trans.y,trans.z],[rot.x,rot.y,rot.z,rot.w]] + transforms[i] = item + + except (LookupException, ConnectivityException, ExtrapolationException) as e: + continue + + # Break when we've received all four markers + if(len(transforms)==4): + break + + # There are four markers in the test bag. + # Make sure the deviation in rotation and translation is less than 0.15 for translation, 0.1 for orientation + if(len(transforms)>=4): + for i in range(4): + trans = np.abs(np.asarray(transforms[i][0]) - np.asarray(tf_expected[i][0])) + rot = np.abs( np.asarray(transforms[i][1]) - np.asarray(tf_expected[i][1])) + assert np.max(trans) < 0.16 + assert np.max(rot) < 0.1 + + + + diff --git a/ar_track_alvar/test/test_ar_track_alvar_bag.py b/ar_track_alvar/test/test_ar_track_alvar_bag.py new file mode 100644 index 0000000..d4440bf --- /dev/null +++ b/ar_track_alvar/test/test_ar_track_alvar_bag.py @@ -0,0 +1,61 @@ +import os +import sys +import time +import unittest + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +from launch_ros.actions import Node +import launch_testing +import launch + +import pytest +import rclpy + +from sensor_msgs.msg import Image + +# Test Parameters +bag_name = os.path.join(os.path.dirname(__file__),'resources','alvar-marker-pose-test.bag') + +@pytest.mark.rostest +def generate_test_description(): + + return LaunchDescription([ + + # Launch Bag + launch.actions.ExecuteProcess( + cmd=['ros2', 'bag', 'play','--loop', '-s',"rosbag_v2",bag_name], + output='screen' + ), + launch_testing.actions.ReadyToTest(), + ]) + +class TestArTrack(unittest.TestCase): + @classmethod + def setUpClass(cls): + rclpy.init() + cls.node = rclpy.create_node('test_ar_track_alvar_node') + + @classmethod + def tearDownClass(cls): + cls.node.destroy_node() + rclpy.shutdown() + + + def test_message_received(self): + # Expect the disparity node to publish on '/disparity' topic + msgs_received = [] + self.node.create_subscription( + Image, + '/camera/image_raw', + lambda msg: msgs_received.append(msg), + 1 + ) + + # Wait up to 60 seconds to receive message + start_time = time.time() + while len(msgs_received) == 0 and (time.time() - start_time) < 60: + rclpy.spin_once(self.node, timeout_sec=0.1) + + assert len(msgs_received) > 0 + diff --git a/ar_track_alvar/test/test_ar_track_alvar_individual_markers.py b/ar_track_alvar/test/test_ar_track_alvar_individual_markers.py new file mode 100644 index 0000000..bf5f3c7 --- /dev/null +++ b/ar_track_alvar/test/test_ar_track_alvar_individual_markers.py @@ -0,0 +1,82 @@ +import os +import sys +import time +import unittest + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +from launch_ros.actions import Node +import launch_testing +import launch + +import pytest +import rclpy + +from stereo_msgs.msg import DisparityImage +from sensor_msgs.msg import Image +from ar_track_alvar_msgs.msg import AlvarMarkers + +# Test Parameters +bag_name = os.path.join(os.path.dirname(__file__),'resources','alvar-marker-pose-test.bag') +@pytest.mark.rostest +def generate_test_description(): + + return LaunchDescription([ + # DisparityNode + + launch.actions.ExecuteProcess( + cmd=['ros2', 'bag', 'play','--loop','-s',"rosbag_v2",bag_name], + output='screen' + ), + + Node( + package='ar_track_alvar', + executable='individualMarkersNoKinect', + name='individual_markers', + remappings=[ + ("camera_image", "camera/image_raw"), + ("camera_info", "camera/camera_info") + ], + parameters=[ + {"marker_size":2.3}, + {"max_new_marker_error":0.08}, + {"max_track_error":0.2}, + {"output_frame":"camera"}, + {"max_frequency":100.0}, + {"marker_margin":2}, + {"marker_resolution":5} + ], + output='screen' + ), + launch_testing.actions.ReadyToTest(), + ]) + +class TestArTrack(unittest.TestCase): + @classmethod + def setUpClass(cls): + rclpy.init() + cls.node = rclpy.create_node('test_ar_track_alvar_markers') + + @classmethod + def tearDownClass(cls): + cls.node.destroy_node() + rclpy.shutdown() + + + def test_message_received(self): + # Expect the disparity node to publish on '/disparity' topic + msgs_received = [] + self.node.create_subscription( + AlvarMarkers, + "ar_pose_marker", + lambda msg: msgs_received.append(msg), + 1 + ) + + # Wait up to 60 seconds to receive message + start_time = time.time() + while len(msgs_received) == 0 and (time.time() - start_time) < 60: + rclpy.spin_once(self.node, timeout_sec=0.1) + + assert len(msgs_received) > 0 + diff --git a/ar_track_alvar/test/test_kinect_filtering.cpp b/ar_track_alvar/test/test_kinect_filtering.cpp deleted file mode 100644 index a7bbf4e..0000000 --- a/ar_track_alvar/test/test_kinect_filtering.cpp +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - */ - -/** - * \file - * - * Test node for kinect filtering - * - * \author Bhaskara Marthi - */ - -#include - -namespace a=ar_track_alvar; -namespace gm=geometry_msgs; - -using std::cerr; - -// Random float between a and b -float randFloat (float a, float b) -{ - const float u = static_cast(rand())/RAND_MAX; - return a + u*(b-a); -} - -// Generate points in a square in space of form p+av+bw where -// a and b range from 0 to 1 -a::ARCloud::Ptr generateCloud(const double px, const double py, const double pz, - const double vx, const double vy, const double vz, - const double wx, const double wy, const double wz) -{ - const double INC=0.1; - const double NOISE=0.01; - - a::ARCloud::Ptr cloud(boost::make_shared()); - for (double u=0; u<1+INC/2; u+=INC) - { - for (double v=0; v<1+INC/2; v+=INC) - { - a::ARPoint p; - p.x = px+u*vx+v*wx+randFloat(-NOISE, NOISE); - p.y = py+u*vy+v*wy+randFloat(-NOISE, NOISE); - p.z = pz+u*vz+v*wz+randFloat(-NOISE, NOISE); - cloud->points.push_back(p); - } - } - return cloud; -} - -int main (int argc, char** argv) -{ - if (argc != 12) - { - cerr << "Usage: " << argv[0] << " PX PY PZ VX VY VZ WX WY WZ I1 I2 I3\n"; - return 1; - } - - const double px = atof(argv[1]); - const double py = atof(argv[2]); - const double pz = atof(argv[3]); - const double vx = atof(argv[4]); - const double vy = atof(argv[5]); - const double vz = atof(argv[6]); - const double wx = atof(argv[7]); - const double wy = atof(argv[8]); - const double wz = atof(argv[9]); - - a::ARCloud::ConstPtr cloud = - generateCloud(px, py, pz, vx, vy, vz, wx, wy, wz); - const size_t n = cloud->size(); - ROS_INFO("Generated cloud with %zu points such as (%.4f, %.4f, %.4f)" - " and (%.4f, %.4f, %.4f)", n, (*cloud)[0].x, (*cloud)[0].y, - (*cloud)[0].z, (*cloud)[n-1].x, (*cloud)[n-1].y, (*cloud)[n-1].z); - - const size_t i1 = atoi(argv[10]); - const size_t i2 = atoi(argv[11]); - const size_t i3 = atoi(argv[12]); - a::ARPoint p1 = (*cloud)[i1]; - a::ARPoint p2 = (*cloud)[i2]; - a::ARPoint p3 = (*cloud)[i3]; - ROS_INFO("Points are (%.4f, %.4f, %.4f) and (%.4f, %.4f, %.4f)", - p1.x, p1.y, p1.z, p2.x, p2.y, p2.z); - - a::PlaneFitResult res = a::fitPlane(cloud); - ROS_INFO("Plane equation is %.3fx + %.3fy + %.3fz + %.3f = 0", - res.coeffs.values[0], res.coeffs.values[1], res.coeffs.values[2], - res.coeffs.values[3]); - - gm::Quaternion q = a::extractOrientation(res.coeffs, p1, p2, p3, p1); - ROS_INFO_STREAM("Orientation is " << q); - - - return 0; -} diff --git a/ar_track_alvar/test/test_launch_files.py b/ar_track_alvar/test/test_launch_files.py new file mode 100644 index 0000000..2cb996d --- /dev/null +++ b/ar_track_alvar/test/test_launch_files.py @@ -0,0 +1,85 @@ +from rclpy.duration import Duration + +import os +import sys +import time +import unittest +import numpy as np + +from ament_index_python.packages import get_package_share_directory +from launch.actions import IncludeLaunchDescription +from launch.launch_description_sources import PythonLaunchDescriptionSource +from launch import LaunchDescription +from launch_ros.actions import Node +import launch_testing +import launch + +import pytest +import rclpy + +pr2_bundle_no_kinect = os.path.join(get_package_share_directory('ar_track_alvar'),'launch','pr2_bundle_no_kinect.py') +pr2_bundle = os.path.join(get_package_share_directory('ar_track_alvar'),'launch','pr2_bundle.py') +pr2_indiv_no_kinect = os.path.join(get_package_share_directory('ar_track_alvar'),'launch','pr2_indiv_no_kinect.py') +pr2_indiv = os.path.join(get_package_share_directory('ar_track_alvar'),'launch','pr2_indiv.py') +pr2_train = os.path.join(get_package_share_directory('ar_track_alvar'),'launch','pr2_train.py') + +bag_name = os.path.join(os.path.dirname(__file__),'resources','alvar-marker-pose-test.bag') + +@pytest.mark.rostest +def generate_test_description(): + return LaunchDescription([ + + # Launch Bag + # launch.actions.ExecuteProcess( + # cmd=['ros2', 'bag', 'play','--loop', '-s',"rosbag_v2",bag_name], + # output='screen' + # ), + + # IndividualMarkersNoKinect Launch File + IncludeLaunchDescription( + PythonLaunchDescriptionSource([get_package_share_directory('ar_track_alvar'), '/launch/pr2_indiv_no_kinect.py']), + ), + + # IndividualMarkers Launch File + IncludeLaunchDescription( + PythonLaunchDescriptionSource([get_package_share_directory('ar_track_alvar'), '/launch/pr2_indiv.py']), + ), + + # FindMarkerBundles Launch File + IncludeLaunchDescription( + PythonLaunchDescriptionSource([get_package_share_directory('ar_track_alvar'), '/launch/pr2_bundle.py']), + ), + + # FindMarkerBundlesNoKinect Launch File + IncludeLaunchDescription( + PythonLaunchDescriptionSource([get_package_share_directory('ar_track_alvar'), '/launch/pr2_bundle_no_kinect_testing.py']), + ), + + # TrainMarkerBundle Launch File + IncludeLaunchDescription( + PythonLaunchDescriptionSource([get_package_share_directory('ar_track_alvar'), '/launch/pr2_train.py']), + ), + + + launch_testing.actions.ReadyToTest(), + ]) + +class TestArTrack(unittest.TestCase): + @classmethod + def setUpClass(cls): + rclpy.init() + cls.node = rclpy.create_node('test_ar_track_alvar_launch_files') + + @classmethod + def tearDownClass(cls): + cls.node.destroy_node() + rclpy.shutdown() + + def test_launch_files(self): + # Wait up to 10 seconds for all the nodes to launch + launch_success=True + start_time = time.time() + while (time.time() - start_time) < 5: + rclpy.spin_once(self.node, timeout_sec=0.1) + + assert launch_success diff --git a/ar_track_alvar/test/test_markerdetect.launch.xml b/ar_track_alvar/test/test_markerdetect.launch.xml deleted file mode 100644 index 99f2a9c..0000000 --- a/ar_track_alvar/test/test_markerdetect.launch.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/ar_track_alvar/test/test_points.cpp b/ar_track_alvar/test/test_points.cpp deleted file mode 100644 index 6e1df52..0000000 --- a/ar_track_alvar/test/test_points.cpp +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright (c) 2008, Willow Garage, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the Willow Garage, Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - */ - -/** - * \file - * - * Test node for kinect filtering - * - * \author Bhaskara Marthi - */ - -#include -#include - -namespace a=ar_track_alvar; -namespace gm=geometry_msgs; - -using std::cerr; -using std::ifstream; - -// Random float between a and b -float randFloat (float a, float b) -{ - const float u = static_cast(rand())/RAND_MAX; - return a + u*(b-a); -} - -// Generate points in a square in space of form p+av+bw where -// a and b range from 0 to 1 -a::ARCloud::Ptr generateCloud(const double px, const double py, const double pz, - const double vx, const double vy, const double vz, - const double wx, const double wy, const double wz) -{ - const double INC=0.1; - const double NOISE=0.01; - - a::ARCloud::Ptr cloud(boost::make_shared()); - for (double u=0; u<1+INC/2; u+=INC) - { - for (double v=0; v<1+INC/2; v+=INC) - { - a::ARPoint p; - p.x = px+u*vx+v*wx+randFloat(-NOISE, NOISE); - p.y = py+u*vy+v*wy+randFloat(-NOISE, NOISE); - p.z = pz+u*vz+v*wz+randFloat(-NOISE, NOISE); - cloud->points.push_back(p); - } - } - return cloud; -} - -int main (int argc, char** argv) -{ - ros::init(argc, argv, "test_points"); - ifstream f("points"); - a::ARCloud::Ptr cloud(new a::ARCloud()); - while (!f.eof()) - { - a::ARPoint pt; - f >> pt.x >> pt.y >> pt.z; - cloud->points.push_back(pt); - } - ROS_INFO("Cloud has %zu points such as (%.2f, %.2f, %.2f)", - cloud->points.size(), cloud->points[0].x, cloud->points[0].y, - cloud->points[0].z); - a::ARPoint p1, p2, p3; - p1.x = 0.1888; - p1.y = 0.1240; - p1.z = 0.8620; - p2.x = 0.0372; - p2.y = 0.1181; - p2.z = 0.8670; - p3.x = 42; - p3.y = 24; - p3.z = 88; - - a::PlaneFitResult res = a::fitPlane(cloud); - ROS_INFO("Plane equation is %.3fx + %.3fy + %.3fz + %.3f = 0", - res.coeffs.values[0], res.coeffs.values[1], res.coeffs.values[2], - res.coeffs.values[3]); - - gm::Quaternion q = a::extractOrientation(res.coeffs, p1, p2, p3, p1); - ROS_INFO_STREAM("Orientation is " << q); - return 0; -} diff --git a/ar_track_alvar_msgs/CMakeLists.txt b/ar_track_alvar_msgs/CMakeLists.txt index be775f6..50aa8cb 100644 --- a/ar_track_alvar_msgs/CMakeLists.txt +++ b/ar_track_alvar_msgs/CMakeLists.txt @@ -1,18 +1,27 @@ -cmake_minimum_required(VERSION 2.8.3) +cmake_minimum_required(VERSION 3.5) project(ar_track_alvar_msgs) -set(MSG_DEPS - std_msgs - geometry_msgs - ) +if(NOT WIN32) + if(NOT CMAKE_CXX_STANDARD) + set(CMAKE_CXX_STANDARD 14) + endif() + if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Wall -Wextra -Wpedantic) + endif() +endif() -find_package(catkin REQUIRED COMPONENTS - message_generation - ${MSG_DEPS} -) +find_package(ament_cmake REQUIRED) +find_package(std_msgs REQUIRED) +find_package(geometry_msgs REQUIRED) +find_package(rosidl_default_generators REQUIRED) -set(MSG_FILES AlvarMarker.msg AlvarMarkers.msg) -add_message_files(DIRECTORY msg FILES ${MSG_FILES}) -generate_messages(DEPENDENCIES ${MSG_DEPS}) +rosidl_generate_interfaces( + ${PROJECT_NAME} + msg/AlvarMarkers.msg + msg/AlvarMarker.msg + DEPENDENCIES + geometry_msgs +) -catkin_package(CATKIN_DEPENDS message_runtime ${MSG_DEPS}) +ament_export_dependencies(rosidl_default_runtime) +ament_package() diff --git a/ar_track_alvar_msgs/msg/AlvarMarker.msg b/ar_track_alvar_msgs/msg/AlvarMarker.msg index b1db8c6..e6e3efa 100644 --- a/ar_track_alvar_msgs/msg/AlvarMarker.msg +++ b/ar_track_alvar_msgs/msg/AlvarMarker.msg @@ -1,4 +1,4 @@ -Header header +std_msgs/Header header uint32 id uint32 confidence geometry_msgs/PoseStamped pose diff --git a/ar_track_alvar_msgs/msg/AlvarMarkers.msg b/ar_track_alvar_msgs/msg/AlvarMarkers.msg index d820821..29b9fe6 100644 --- a/ar_track_alvar_msgs/msg/AlvarMarkers.msg +++ b/ar_track_alvar_msgs/msg/AlvarMarkers.msg @@ -1,2 +1,2 @@ -Header header +std_msgs/Header header AlvarMarker[] markers diff --git a/ar_track_alvar_msgs/package.xml b/ar_track_alvar_msgs/package.xml index 22e5810..6d03b07 100644 --- a/ar_track_alvar_msgs/package.xml +++ b/ar_track_alvar_msgs/package.xml @@ -1,5 +1,8 @@ - + + ar_track_alvar_msgs 0.7.1 @@ -10,11 +13,19 @@ BSD http://ros.org/wiki/ar_track_alvar - catkin - message_generation - geometry_msgs - std_msgs - message_runtime - geometry_msgs - std_msgs + ament_cmake + rosidl_default_generators + geometry_msgs + std_msgs + + rosidl_default_runtime + ament_cmake_gtest + ament_lint_auto + ament_lint_common + + rosidl_interface_packages + + + ament_cmake +