From e4f6d6f06838a3ab30cf93e713d2d1750bb8eefe Mon Sep 17 00:00:00 2001 From: Shafeef Omar Date: Tue, 26 Feb 2019 22:54:46 +0530 Subject: [PATCH] configuration and installation done on OS X 10.13.6 --- .DS_Store | Bin 0 -> 10244 bytes .catkin_tools/CATKIN_IGNORE | 0 .catkin_tools/README | 13 + .catkin_tools/VERSION | 1 + .catkin_tools/profiles/default/config.yaml | 24 + future pkg/.DS_Store | Bin 0 -> 6148 bytes future pkg/image_pipeline/.DS_Store | Bin 0 -> 6148 bytes .../image_pipeline/camera_calibration/.tar | 2 + .../camera_calibration/CHANGELOG.rst | 170 +++ .../camera_calibration/CMakeLists.txt | 31 + .../camera_calibration/button.jpg | Bin 0 -> 21110 bytes .../camera_calibration/doc/conf.py | 201 +++ .../camera_calibration/doc/index.rst | 18 + .../camera_calibration/mainpage.dox | 59 + .../nodes/cameracalibrator.py | 154 +++ .../camera_calibration/nodes/cameracheck.py | 57 + .../camera_calibration/package.xml | 30 + .../camera_calibration/rosdoc.yaml | 4 + .../scripts/tarfile_calibration.py | 235 ++++ .../camera_calibration/setup.py | 9 + .../src/camera_calibration/__init__.py | 0 .../src/camera_calibration/calibrator.py | 1150 +++++++++++++++++ .../camera_calibration/camera_calibrator.py | 352 +++++ .../src/camera_calibration/camera_checker.py | 200 +++ .../camera_calibration/test/directed.py | 274 ++++ .../test/multiple_boards.py | 87 ++ .../image_pipeline/depth_image_proc/.tar | 2 + .../depth_image_proc/CHANGELOG.rst | 122 ++ .../depth_image_proc/CMakeLists.txt | 44 + .../depth_image_proc/depth_conversions.h | 101 ++ .../include/depth_image_proc/depth_traits.h | 71 + .../depth_image_proc/mainpage.dox | 26 + .../depth_image_proc/nodelet_plugins.xml | 73 ++ .../depth_image_proc/package.xml | 46 + .../src/nodelets/convert_metric.cpp | 140 ++ .../src/nodelets/crop_foremost.cpp | 142 ++ .../src/nodelets/disparity.cpp | 189 +++ .../src/nodelets/point_cloud_xyz.cpp | 138 ++ .../src/nodelets/point_cloud_xyz_radial.cpp | 235 ++++ .../src/nodelets/point_cloud_xyzi.cpp | 316 +++++ .../src/nodelets/point_cloud_xyzi_radial.cpp | 313 +++++ .../src/nodelets/point_cloud_xyzrgb.cpp | 345 +++++ .../src/nodelets/register.cpp | 263 ++++ future pkg/image_pipeline/image_pipeline/.tar | 2 + .../image_pipeline/CHANGELOG.rst | 68 + .../image_pipeline/CMakeLists.txt | 4 + .../image_pipeline/image_pipeline/package.xml | 29 + future pkg/image_pipeline/image_proc/.tar | 2 + .../image_pipeline/image_proc/CHANGELOG.rst | 119 ++ .../image_pipeline/image_proc/CMakeLists.txt | 68 + .../image_proc/cfg/CropDecimate.cfg | 35 + .../image_pipeline/image_proc/cfg/Debayer.cfg | 26 + .../image_pipeline/image_proc/cfg/Rectify.cfg | 22 + .../image_pipeline/image_proc/cfg/Resize.cfg | 37 + .../image_proc/advertisement_checker.h | 61 + .../image_proc/include/image_proc/processor.h | 77 ++ .../image_proc/launch/image_proc.launch | 29 + .../image_pipeline/image_proc/mainpage.dox | 12 + .../image_proc/nodelet_plugins.xml | 48 + .../image_pipeline/image_proc/package.xml | 39 + .../libimage_proc/advertisement_checker.cpp | 92 ++ .../src/libimage_proc/processor.cpp | 130 ++ .../image_proc/src/nodelets/crop_decimate.cpp | 353 +++++ .../image_proc/src/nodelets/crop_non_zero.cpp | 146 +++ .../image_proc/src/nodelets/debayer.cpp | 276 ++++ .../image_proc/src/nodelets/edge_aware.cpp | 804 ++++++++++++ .../image_proc/src/nodelets/edge_aware.h | 49 + .../image_proc/src/nodelets/rectify.cpp | 170 +++ .../image_proc/src/nodelets/resize.cpp | 174 +++ .../image_proc/src/nodes/image_proc.cpp | 97 ++ .../image_proc/test/CMakeLists.txt | 5 + .../image_proc/test/rostest.cpp | 99 ++ .../image_proc/test/test_bayer.xml | 10 + .../image_proc/test/test_rectify.cpp | 205 +++ .../image_proc/test/test_rectify.xml | 9 + .../image_pipeline/image_publisher/.DS_Store | Bin 0 -> 6148 bytes .../image_pipeline/image_publisher/.tar | 2 + .../image_publisher/CHANGELOG.rst | 63 + .../image_publisher/CMakeLists.txt | 39 + .../image_publisher/cfg/ImagePublisher.cfg | 44 + .../image_publisher/nodelet_plugins.xml | 7 + .../image_publisher/package.xml | 35 + .../image_publisher/src/.DS_Store | Bin 0 -> 6148 bytes .../src/node/image_publisher.cpp | 57 + .../src/nodelet/image_publisher_nodelet.cpp | 196 +++ future pkg/image_pipeline/image_rotate/.tar | 2 + .../image_pipeline/image_rotate/CHANGELOG.rst | 93 ++ .../image_rotate/CMakeLists.txt | 32 + .../image_rotate/cfg/ImageRotate.cfg | 59 + .../image_pipeline/image_rotate/mainpage.dox | 26 + .../image_rotate/nodelet_plugins.xml | 7 + .../image_pipeline/image_rotate/package.xml | 55 + .../image_rotate/src/node/image_rotate.cpp | 54 + .../src/nodelet/image_rotate_nodelet.cpp | 281 ++++ future pkg/image_pipeline/image_view/.tar | 2 + .../image_pipeline/image_view/CHANGELOG.rst | 188 +++ .../image_pipeline/image_view/CMakeLists.txt | 90 ++ .../image_view/cfg/ImageView.cfg | 29 + .../image_pipeline/image_view/mainpage.dox | 11 + .../image_view/nodelet_plugins.xml | 11 + .../image_pipeline/image_view/package.xml | 46 + .../image_pipeline/image_view/rosdoc.yaml | 4 + .../image_view/scripts/extract_images_sync | 99 ++ .../src/nodelets/disparity_nodelet.cpp | 439 +++++++ .../image_view/src/nodelets/image_nodelet.cpp | 228 ++++ .../image_view/src/nodelets/window_thread.cpp | 52 + .../image_view/src/nodelets/window_thread.h | 44 + .../image_view/src/nodes/disparity_view.cpp | 54 + .../image_view/src/nodes/extract_images.cpp | 154 +++ .../image_view/src/nodes/image_saver.cpp | 226 ++++ .../image_view/src/nodes/image_view.cpp | 212 +++ .../image_view/src/nodes/stereo_view.cpp | 573 ++++++++ .../image_view/src/nodes/video_recorder.cpp | 141 ++ .../image_pipeline/stereo_image_proc/.tar | 2 + .../stereo_image_proc/CHANGELOG.rst | 102 ++ .../stereo_image_proc/CMakeLists.txt | 57 + .../stereo_image_proc/cfg/Disparity.cfg | 39 + .../stereo_image_proc/doc/mainpage.dox | 14 + .../stereo_image_proc/doc/stereo_frames.svg | 1052 +++++++++++++++ .../include/stereo_image_proc/processor.h | 299 +++++ .../launch/stereo_image_proc.launch | 36 + .../stereo_image_proc/nodelet_plugins.xml | 11 + .../stereo_image_proc/package.xml | 40 + .../stereo_image_proc/rosdoc.yaml | 4 + .../src/libstereo_image_proc/processor.cpp | 317 +++++ .../src/nodelets/disparity.cpp | 250 ++++ .../src/nodelets/point_cloud2.cpp | 272 ++++ .../src/nodes/stereo_image_proc.cpp | 149 +++ install | 256 ++-- 129 files changed, 15362 insertions(+), 127 deletions(-) create mode 100644 .DS_Store create mode 100644 .catkin_tools/CATKIN_IGNORE create mode 100644 .catkin_tools/README create mode 100644 .catkin_tools/VERSION create mode 100644 .catkin_tools/profiles/default/config.yaml create mode 100644 future pkg/.DS_Store create mode 100644 future pkg/image_pipeline/.DS_Store create mode 100644 future pkg/image_pipeline/camera_calibration/.tar create mode 100644 future pkg/image_pipeline/camera_calibration/CHANGELOG.rst create mode 100644 future pkg/image_pipeline/camera_calibration/CMakeLists.txt create mode 100644 future pkg/image_pipeline/camera_calibration/button.jpg create mode 100644 future pkg/image_pipeline/camera_calibration/doc/conf.py create mode 100644 future pkg/image_pipeline/camera_calibration/doc/index.rst create mode 100644 future pkg/image_pipeline/camera_calibration/mainpage.dox create mode 100755 future pkg/image_pipeline/camera_calibration/nodes/cameracalibrator.py create mode 100755 future pkg/image_pipeline/camera_calibration/nodes/cameracheck.py create mode 100644 future pkg/image_pipeline/camera_calibration/package.xml create mode 100644 future pkg/image_pipeline/camera_calibration/rosdoc.yaml create mode 100755 future pkg/image_pipeline/camera_calibration/scripts/tarfile_calibration.py create mode 100644 future pkg/image_pipeline/camera_calibration/setup.py create mode 100644 future pkg/image_pipeline/camera_calibration/src/camera_calibration/__init__.py create mode 100644 future pkg/image_pipeline/camera_calibration/src/camera_calibration/calibrator.py create mode 100755 future pkg/image_pipeline/camera_calibration/src/camera_calibration/camera_calibrator.py create mode 100755 future pkg/image_pipeline/camera_calibration/src/camera_calibration/camera_checker.py create mode 100644 future pkg/image_pipeline/camera_calibration/test/directed.py create mode 100644 future pkg/image_pipeline/camera_calibration/test/multiple_boards.py create mode 100644 future pkg/image_pipeline/depth_image_proc/.tar create mode 100644 future pkg/image_pipeline/depth_image_proc/CHANGELOG.rst create mode 100644 future pkg/image_pipeline/depth_image_proc/CMakeLists.txt create mode 100644 future pkg/image_pipeline/depth_image_proc/include/depth_image_proc/depth_conversions.h create mode 100644 future pkg/image_pipeline/depth_image_proc/include/depth_image_proc/depth_traits.h create mode 100644 future pkg/image_pipeline/depth_image_proc/mainpage.dox create mode 100644 future pkg/image_pipeline/depth_image_proc/nodelet_plugins.xml create mode 100644 future pkg/image_pipeline/depth_image_proc/package.xml create mode 100644 future pkg/image_pipeline/depth_image_proc/src/nodelets/convert_metric.cpp create mode 100755 future pkg/image_pipeline/depth_image_proc/src/nodelets/crop_foremost.cpp create mode 100644 future pkg/image_pipeline/depth_image_proc/src/nodelets/disparity.cpp create mode 100644 future pkg/image_pipeline/depth_image_proc/src/nodelets/point_cloud_xyz.cpp create mode 100644 future pkg/image_pipeline/depth_image_proc/src/nodelets/point_cloud_xyz_radial.cpp create mode 100644 future pkg/image_pipeline/depth_image_proc/src/nodelets/point_cloud_xyzi.cpp create mode 100644 future pkg/image_pipeline/depth_image_proc/src/nodelets/point_cloud_xyzi_radial.cpp create mode 100644 future pkg/image_pipeline/depth_image_proc/src/nodelets/point_cloud_xyzrgb.cpp create mode 100644 future pkg/image_pipeline/depth_image_proc/src/nodelets/register.cpp create mode 100644 future pkg/image_pipeline/image_pipeline/.tar create mode 100644 future pkg/image_pipeline/image_pipeline/CHANGELOG.rst create mode 100644 future pkg/image_pipeline/image_pipeline/CMakeLists.txt create mode 100644 future pkg/image_pipeline/image_pipeline/package.xml create mode 100644 future pkg/image_pipeline/image_proc/.tar create mode 100644 future pkg/image_pipeline/image_proc/CHANGELOG.rst create mode 100644 future pkg/image_pipeline/image_proc/CMakeLists.txt create mode 100755 future pkg/image_pipeline/image_proc/cfg/CropDecimate.cfg create mode 100755 future pkg/image_pipeline/image_proc/cfg/Debayer.cfg create mode 100755 future pkg/image_pipeline/image_proc/cfg/Rectify.cfg create mode 100755 future pkg/image_pipeline/image_proc/cfg/Resize.cfg create mode 100644 future pkg/image_pipeline/image_proc/include/image_proc/advertisement_checker.h create mode 100644 future pkg/image_pipeline/image_proc/include/image_proc/processor.h create mode 100644 future pkg/image_pipeline/image_proc/launch/image_proc.launch create mode 100644 future pkg/image_pipeline/image_proc/mainpage.dox create mode 100644 future pkg/image_pipeline/image_proc/nodelet_plugins.xml create mode 100644 future pkg/image_pipeline/image_proc/package.xml create mode 100644 future pkg/image_pipeline/image_proc/src/libimage_proc/advertisement_checker.cpp create mode 100644 future pkg/image_pipeline/image_proc/src/libimage_proc/processor.cpp create mode 100644 future pkg/image_pipeline/image_proc/src/nodelets/crop_decimate.cpp create mode 100644 future pkg/image_pipeline/image_proc/src/nodelets/crop_non_zero.cpp create mode 100644 future pkg/image_pipeline/image_proc/src/nodelets/debayer.cpp create mode 100644 future pkg/image_pipeline/image_proc/src/nodelets/edge_aware.cpp create mode 100644 future pkg/image_pipeline/image_proc/src/nodelets/edge_aware.h create mode 100644 future pkg/image_pipeline/image_proc/src/nodelets/rectify.cpp create mode 100644 future pkg/image_pipeline/image_proc/src/nodelets/resize.cpp create mode 100644 future pkg/image_pipeline/image_proc/src/nodes/image_proc.cpp create mode 100644 future pkg/image_pipeline/image_proc/test/CMakeLists.txt create mode 100644 future pkg/image_pipeline/image_proc/test/rostest.cpp create mode 100644 future pkg/image_pipeline/image_proc/test/test_bayer.xml create mode 100644 future pkg/image_pipeline/image_proc/test/test_rectify.cpp create mode 100644 future pkg/image_pipeline/image_proc/test/test_rectify.xml create mode 100644 future pkg/image_pipeline/image_publisher/.DS_Store create mode 100644 future pkg/image_pipeline/image_publisher/.tar create mode 100644 future pkg/image_pipeline/image_publisher/CHANGELOG.rst create mode 100644 future pkg/image_pipeline/image_publisher/CMakeLists.txt create mode 100755 future pkg/image_pipeline/image_publisher/cfg/ImagePublisher.cfg create mode 100644 future pkg/image_pipeline/image_publisher/nodelet_plugins.xml create mode 100644 future pkg/image_pipeline/image_publisher/package.xml create mode 100644 future pkg/image_pipeline/image_publisher/src/.DS_Store create mode 100644 future pkg/image_pipeline/image_publisher/src/node/image_publisher.cpp create mode 100644 future pkg/image_pipeline/image_publisher/src/nodelet/image_publisher_nodelet.cpp create mode 100644 future pkg/image_pipeline/image_rotate/.tar create mode 100644 future pkg/image_pipeline/image_rotate/CHANGELOG.rst create mode 100644 future pkg/image_pipeline/image_rotate/CMakeLists.txt create mode 100755 future pkg/image_pipeline/image_rotate/cfg/ImageRotate.cfg create mode 100644 future pkg/image_pipeline/image_rotate/mainpage.dox create mode 100644 future pkg/image_pipeline/image_rotate/nodelet_plugins.xml create mode 100644 future pkg/image_pipeline/image_rotate/package.xml create mode 100644 future pkg/image_pipeline/image_rotate/src/node/image_rotate.cpp create mode 100644 future pkg/image_pipeline/image_rotate/src/nodelet/image_rotate_nodelet.cpp create mode 100644 future pkg/image_pipeline/image_view/.tar create mode 100644 future pkg/image_pipeline/image_view/CHANGELOG.rst create mode 100644 future pkg/image_pipeline/image_view/CMakeLists.txt create mode 100755 future pkg/image_pipeline/image_view/cfg/ImageView.cfg create mode 100644 future pkg/image_pipeline/image_view/mainpage.dox create mode 100644 future pkg/image_pipeline/image_view/nodelet_plugins.xml create mode 100644 future pkg/image_pipeline/image_view/package.xml create mode 100644 future pkg/image_pipeline/image_view/rosdoc.yaml create mode 100755 future pkg/image_pipeline/image_view/scripts/extract_images_sync create mode 100644 future pkg/image_pipeline/image_view/src/nodelets/disparity_nodelet.cpp create mode 100644 future pkg/image_pipeline/image_view/src/nodelets/image_nodelet.cpp create mode 100644 future pkg/image_pipeline/image_view/src/nodelets/window_thread.cpp create mode 100644 future pkg/image_pipeline/image_view/src/nodelets/window_thread.h create mode 100644 future pkg/image_pipeline/image_view/src/nodes/disparity_view.cpp create mode 100644 future pkg/image_pipeline/image_view/src/nodes/extract_images.cpp create mode 100644 future pkg/image_pipeline/image_view/src/nodes/image_saver.cpp create mode 100644 future pkg/image_pipeline/image_view/src/nodes/image_view.cpp create mode 100644 future pkg/image_pipeline/image_view/src/nodes/stereo_view.cpp create mode 100644 future pkg/image_pipeline/image_view/src/nodes/video_recorder.cpp create mode 100644 future pkg/image_pipeline/stereo_image_proc/.tar create mode 100644 future pkg/image_pipeline/stereo_image_proc/CHANGELOG.rst create mode 100644 future pkg/image_pipeline/stereo_image_proc/CMakeLists.txt create mode 100755 future pkg/image_pipeline/stereo_image_proc/cfg/Disparity.cfg create mode 100644 future pkg/image_pipeline/stereo_image_proc/doc/mainpage.dox create mode 100644 future pkg/image_pipeline/stereo_image_proc/doc/stereo_frames.svg create mode 100644 future pkg/image_pipeline/stereo_image_proc/include/stereo_image_proc/processor.h create mode 100644 future pkg/image_pipeline/stereo_image_proc/launch/stereo_image_proc.launch create mode 100644 future pkg/image_pipeline/stereo_image_proc/nodelet_plugins.xml create mode 100644 future pkg/image_pipeline/stereo_image_proc/package.xml create mode 100644 future pkg/image_pipeline/stereo_image_proc/rosdoc.yaml create mode 100644 future pkg/image_pipeline/stereo_image_proc/src/libstereo_image_proc/processor.cpp create mode 100644 future pkg/image_pipeline/stereo_image_proc/src/nodelets/disparity.cpp create mode 100644 future pkg/image_pipeline/stereo_image_proc/src/nodelets/point_cloud2.cpp create mode 100644 future pkg/image_pipeline/stereo_image_proc/src/nodes/stereo_image_proc.cpp diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..1f6035a7cfae3d90b83be054d91269d9e5c2612f GIT binary patch literal 10244 zcmeHMYitx%6h5aEm}y5kg+8Ws!NmeTsL~e@L}a@yj|$q#w$NH&ncbPTBeOH>%bIue1Fq%=C0m1;l(Z#4JP&Y$k^X!_|N_Q-HxqO(PLmE8 zfd>K)1Re-H5O^T)z=hxedS|o7m3uVk0}liq2t1JY0E-V{x)_c4bi$)<=%CIm0f?3o z+if&4Zm#Lk&GuqMj=HB?kJH1A6K-AMxcUJnE?fx+{|%e`WM56!Z%Z z47=ih5swCa;DNvc(>=gy_fm+{-zeBc_4|67JNk#)T*bUxV$@Fib^hr@TGwo+sp&H; znNvD<-h7TLKT%|vcOkmFa#NJ=(0#B3~AU8!&7xibN&gKAt{SR}ebRra1^^^T=Urs^ESHMhl5Rl~K{Jv3yiPJdjiu#=Xl>piBeIhtt< z6wD6nwh8TBj+*K+hD=&9OX%)SGckLoYS~1m>Gu!FmL(g*YIJOmE+p-QOPmDOx5p{sqPn&ONM$aYdS6hh^#6vzxm8`4yZuf#^a*N{`Ht0mP* z^_!Ay5AZ|$2tURz@q7FMfAoG`9{St;U+WY6l?Jmx+K*@@56led(B>^$YlIswTu6W6 zMcgWiw&CShUDLAm`u02yiVl7XFbmP>g3v>mi@qGXJj6?Nm)3_C3zyCIob(Z1)dL)R zM8x@%^>vp@OD_+FM20-hy)9o666m?$-J7dd&|C@4Ftfs{P%}^H7w5RQp*CR+M>rSf zdb8_<^*lWZVyQQKgAm~e@ZvnzP;{{%h`{sz0CL}jPvAR3?e7SL)pA@!XkAK3U4`o? z#73|KJ8?61Q;>~eFZSVH!mCV(-A{Nm(8L2chU0h#9>%+T)E>cy@L@cLkKm*D6h4i| z3B4!q6uyKn<16?ozJc%IBz{_iTmLHi@aRHS-q2s#{X5_B8`u?wyvhaWp&eE7A~wXp*Cyb z@(4}lH&}EOu4d2$9+=q!b3AsKegEGy`~UxERt~s<2LcaV6du6R&RA!Zwvwyti+9Do zwTJ22M;B{MZo;Fk3U%)1c%ts-c)DSq_$DnW2 zH{fyFZ+`-+QaGXr%}BG~WOiol{npya5Rqt)dfP+|B9c&pl^TZsh?zG!Cl&8m0t&lE zLS5=p3Qgvs*|CTWn6vwc6T6{^u5fD4=a<2oQ*xB)%Q(|9;_#7(EnxGH6}z2RbK~)_ z?{k00{VW|8t=6NcELE3RR;8@V`da-wyw>Beh>Fp$9SzRRuj8n=j=lX&>+H#n;*&HP zcN-fAIxnI$PX;zQ?Ij5LewyYzJ#On!-W%F=LN_60P1d@N&B}pSQmrUEKVclAO)p_`l*hnh=@2sz z;ZlJvRrnS|xODiHj|)7O4qZA4-+T!FWZ^p$VV{oml?^8mIOLHTU|=k&^=HSfiAW6Q<6WW-5m`u#EgRVx#_gQe zY(smPCuEF_d>YaSdA!!!IE(^DfqzW_{&sK4FLp{Ht$x1|1+rdHL@6alaXJd(G(dko zEbb%S{U!}1_Km@xQ9%=&01-`b2Ase%gf9bs)Zl+=@NYCY5qKd^k%$rs>s+zj;MxuD z8RLm}4bE1BGskRsv>d-5Iy6T7`e*E*%x7Aw&%rE?@}k%KA~rT#mo8tiEZefL+b_LI zT6%?F%=3YtJ?GkEznBEN{3K1|pJhLI8inPsbNgYM6n>P1nIuQ!5M5q8i;{6#4$^rt z&Se}=HCR^LY7aX*i$%Y`*LC`Py=B)~931qz&b|Kra@n@F@9aJ}I-b6Iy?C>H`vFsg z2^{3owlw~Lk0_iY+NqZ%ahjZn1IkUb8UX<>n(3vsu3of>oB0|Ai~{E>z~_U5#OP|Q6w0FmnS25O zTX0K5ng1Rz$I<9&tQ4XJCKMH@s6sz6grdXm=zgxoN}-~Y(2ozHM;7{pBIM|Z@5t^X zT!kh#3K#|I3T&!shwuMKzkdJMNv38LFbe!v3W(OBcR0Y5^xe8PIlgNhq+=vD?pG<4 l6lD53mIc0wcaWr^&EW#j)mSNn2WI{VNEu9K6!@zOd`_) + Without this commit, specifying a smaller column than row size lead to + huge reported errors: + ``` + $ rosrun camera_calibration cameracheck.py --size 6x7 --square 0.0495 + Linearity RMS Error: 13.545 Pixels Reprojection RMS Error: 22.766 Pixels + $ rosrun camera_calibration cameracheck.py --size 7x6 --square 0.0495 + Linearity RMS Error: 0.092 Pixels Reprojection RMS Error: 0.083 Pixels + ``` + This commit switches columns and rows around if necessary. +* Contributors: Martin Günther + +1.12.22 (2017-12-08) +-------------------- +* Changed flags CV_LOAD_IMAGE_COLOR by IMREAD_COLOR to adapt to Opencv3. (`#252 `_) +* Fixed stereo calibration problem with chessboard with the same number of rows and cols by rotating the corners to same direction. +* Contributors: jbosch + +1.12.21 (2017-11-05) +-------------------- +* re-add the calibration nodes but now using the Python modules. + Fixes `#298 `_ +* Move nodes to Python module. +* Contributors: Vincent Rabaud + +1.12.20 (2017-04-30) +-------------------- +* properly save bytes buffer as such + This is useful for Python 3 and fixes `#256 `_. +* Get tests slightly looser. + OpenCV 3.2 gives slightly different results apparently. +* Use floor division where necessary. (`#247 `_) +* Fix and Improve Camera Calibration Checker Node (`#254 `_) + * Fix according to calibrator.py API + * Add approximate to cameracheck +* Force first corner off chessboard to be uppler left. + Fixes `#140 `_ +* fix doc jobs + This is a proper fix for `#233 `_ +* During stereo calibration check that the number of corners detected in the left and right images are the same. This fixes `ros-perception/image_pipeline#225 `_ +* Contributors: Leonard Gerard, Martin Peris, Vincent Rabaud, hgaiser + +1.12.19 (2016-07-24) +-------------------- +* Fix array check in camerachecky.py + This closes `#205 `_ +* Contributors: Vincent Rabaud + +1.12.18 (2016-07-12) +-------------------- + +1.12.17 (2016-07-11) +-------------------- +* fix typo np -> numpy +* fix failing tests +* Contributors: Shingo Kitagawa, Vincent Rabaud + +1.12.16 (2016-03-19) +-------------------- +* clean OpenCV dependency in package.xml +* Contributors: Vincent Rabaud + +1.12.15 (2016-01-17) +-------------------- +* better 16 handling in mkgray + This re-uses `#150 `_ and therefore closes `#150 `_ +* fix OpenCV2 compatibility +* fix tests with OpenCV3 +* [Calibrator]: add yaml file with calibration data in output +* Contributors: Vincent Rabaud, sambrose + +1.12.14 (2015-07-22) +-------------------- +* remove camera_hammer and install Python nodes properly + camera_hammer was just a test for camera info, nothing to do with + calibration. Plus the test was basic. +* Correct three errors that prevented the node to work properly. +* Contributors: Filippo Basso, Vincent Rabaud + +1.12.13 (2015-04-06) +-------------------- +* replace Queue by deque of fixed size for simplicity + That is a potential fix for `#112 `_ +* Contributors: Vincent Rabaud + +1.12.12 (2014-12-31) +-------------------- +* try to improve `#112 `_ +* Contributors: Vincent Rabaud + +1.12.11 (2014-10-26) +-------------------- + +1.12.10 (2014-09-28) +-------------------- +* Update calibrator.py + bugfix: stereo calibrator crashed after the signature of the method for the computation of the epipolar error changed but the function call was not updated +* Contributors: Volker Grabe + +1.12.9 (2014-09-21) +------------------- +* fix bad Python +* only analyze the latest image + fixes `#97 `_ +* flips width and height during resize to give correct aspect ratio +* Contributors: Russell Toris, Vincent Rabaud + +1.12.8 (2014-08-19) +------------------- +* install scripts in the local bin (they are now rosrun-able again) + fixes `#93 `_ +* fix default Constructor for OpenCV flags + this does not change anything in practice as the flag is set by the node. + It just fixes the test. +* Contributors: Vincent Rabaud + +1.12.6 (2014-07-27) +------------------- +* make sure the GUI is started in its processing thread and fix a typo + This fully fixes `#85 `_ +* fix bad call to save an image +* have display be in its own thread + that could be a fix for `#85 `_ +* fix bad usage of Numpy + fixes `#89 `_ +* fix asymmetric circle calibration + fixes `#35 `_ +* add more tests +* improve unittests to include all patterns +* install Python scripts properly + and fixes `#86 `_ +* fix typo that leads to segfault + fixes `#84 `_ +* also print self.report() on calibrate ... allows to use the params without having to commit them (e.g. for extrensic calibration between to cameras not used as stereo pair) +* fixes `#76 `_ + Move Python approximate time synchronizer to ros_comm +* remove all trace of cv in Python (use cv2) +* remove deprecated file (as mentioned in its help) +* fixes `#25 `_ + This is just removing deprecated options that were around since diamondback +* fixes `#74 `_ + calibrator.py is now using the cv2 only API when using cv_bridge. + The API got changed too but it seems to only be used internally. +* Contributors: Vincent Rabaud, ahb + +1.12.5 (2014-05-11) +------------------- +* Fix `#68 `_: StringIO issues in calibrator.py +* fix architecture independent +* Contributors: Miquel Massot, Vincent Rabaud + +1.12.4 (2014-04-28) +------------------- + +1.12.3 (2014-04-12) +------------------- +* camera_calibration: Fix Python import order +* Contributors: Scott K Logan + +1.12.2 (2014-04-08) +------------------- +* Fixes a typo on stereo camera info service calls + Script works after correcting the call names. +* Contributors: JoonasMelin + +1.11.4 (2013-11-23 13:10:55 +0100) +---------------------------------- +- add visualization during calibration and several calibration flags (#48) diff --git a/future pkg/image_pipeline/camera_calibration/CMakeLists.txt b/future pkg/image_pipeline/camera_calibration/CMakeLists.txt new file mode 100644 index 0000000..469d3b8 --- /dev/null +++ b/future pkg/image_pipeline/camera_calibration/CMakeLists.txt @@ -0,0 +1,31 @@ +cmake_minimum_required(VERSION 2.8) +project(camera_calibration) + +find_package(catkin REQUIRED) +catkin_package() + +catkin_python_setup() + +if(CATKIN_ENABLE_TESTING) + # Unit test of calibrator.py + catkin_add_nosetests(test/directed.py) + + # Tests simple calibration dataset + catkin_download_test_data(camera_calibration.tar.gz http://download.ros.org/data/camera_calibration/camera_calibration.tar.gz + DESTINATION ${CATKIN_DEVEL_PREFIX}/${CATKIN_PACKAGE_SHARE_DESTINATION}/tests + MD5 6da43ea314640a4c15dd7a90cbc3aee0 + ) + + # Tests multiple checkerboards + catkin_download_test_data(multi_board_calibration.tar.gz http://download.ros.org/data/camera_calibration/multi_board_calibration.tar.gz + DESTINATION ${CATKIN_DEVEL_PREFIX}/${CATKIN_PACKAGE_SHARE_DESTINATION}/tests + MD5 ddc0f69582d140e33f9d3bfb681956bb + ) + catkin_add_nosetests(test/multiple_boards.py) +endif() + +catkin_install_python(PROGRAMS nodes/cameracalibrator.py + nodes/cameracheck.py + scripts/tarfile_calibration.py + DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +) diff --git a/future pkg/image_pipeline/camera_calibration/button.jpg b/future pkg/image_pipeline/camera_calibration/button.jpg new file mode 100644 index 0000000000000000000000000000000000000000..494a6f6fc5247fdb882536643408f1c5abf7c8a1 GIT binary patch literal 21110 zcmeIaby!qg*FS!S?gph>I%Z(#kZ$P)0bzil8M;JJQCg5t1eETO1{IVJ1(hyAK@kw7 zQ{j6CSer*64Ltgp7gSgn^5_Tt^c2+j=toPU%Y*rob^>XV+#h)rgDpb(ldcA7vfLlC zXqAxsA2dh;7(Zz+I{@=+oF1u85B6O|GX zM)%Y|cz`YLPYL{=?;pA>e*XfA|NjB56)FO^u)&7#CW#Yo4@#7M`$z``NO#lptV&cMJe&ciPZ6%`d_=8}|^ zfXN7oh{DiHAo%$BM1(}=iHXm{SQ%Jh|Lf=UIY5pF&R0832pfPw4#6acoOS}tU^%hT zD;)$~ld}hcfr*8UgNuhxKnMcr$p8!pCME_JCN?$}*a(m?Fde`m$EIL~D&SD++u^c# zQ^6vVa`4y{AHSeBc(cJFas?HMPe60-JS`pP1ukwLUQxK1xP+vXlCp}bn!1Ljp^>qP zshPQjy#vD0$rr0}Bfi3m0842u1)HFv+p7S)n)-3i`Nq-jr;x2s|pqq@2eu@YzKSHmI+l-Vo4m zh`#6CL|5&sX8&2mBL7b{`(3fW>NN!rVM4&p!z2gffqh=?+-n5f#g&4O>TdVRl+lrV zamKLVd(gb;7SeBSDYLc`NNrqSarKhHE3)vqx-YoaDk#26*Ec=_F;b^MO;efwJArG} zS_45Wo>mU}wo+2O+=8qvx3#ch*_du#a!FEY{P$fd-ThzG6qEeGWPWRtdP;j%8r3=dQ_i7Llojgzw%&8T7$- z2Xv*=lLj^5Sr$(-1cumQFURQjy~b>k3gmCE&+#}$yPmVX{^{GwU8~EQ0kWOt;qTy% z?*=4#%T^sQb}CJIWGA=kz5U9h2X*f2TfOU?&TC55J?^f{*~FU1YP%_DzspV}PiN9# zyk=Y&;jNgNS4&{lzhnLWLi`gX9mCw`+T^_Ztez|K6@F8yk4Y*#%vJmDMzhNrodP$5 z!k)dH3hT|F=^&ZCy#s%mo~_kUK0##r!R*!wUuo8*Fs7w3M00YNpvE%iLG~>bC08HD zkL3a64EZtV=`!kM^>SmlflAA+036p?SvcvOC-mfcfgikmWq5tluOocu{59rl7cwaQ zO(kbht7{g~(WL9POcs!}u~=!h`6PX0n&C?Ni8cY9DQER1zC0=F-4tT>b^@+<+%F#Q zc6mwnZ8Kw75>nd z-t?HMp53w1F4eYl?luw-~ikJ5Vhye+@{`6#s^k2>y+d$wnjxK}uxd)qBP=_j$p zY|-LV4U?#OY`mQ^2)e%`bP(a)oA3 zkX>YkjGd6hRzqS%%EWhB?0)GXB~YuxLIfEkmUstieiOBKFZfOIX!$8nu+Nk_zgkn~ zQ`CG47)=jgacn9pEjRC@e2dT55kp0nm2R7e1Q}h5eaKBeDI;+yzrCD*Voc(+u~Wb- z@M;LbcI;S)fJ*+&NgCZO^Gi~%_lF1bUfrLYyqEFe)8d;QIjE_b+Lw>#UeQdAUz*B# za@dj{X`85vqrrCXexA~{Js0y!!=gFFBcxdRQ|tQI2*s-it8rrLFMF?A@Uz8sO}dl> zW`?ys_3=G91%e)2NSDm$?jHZRU!M2TA^5=%Ny!H!KNqV9|0jy)JuUkdB}K&htpfUx z%)=W&7Z<|ceR%q+>H=lZJDxQ3pjby%EE(@ zVdGMgCa${FNCUwn&ik;!UM+`SCI4?-wt+E7&P|;K4chprt9rZCstbC}$tp=pNm-I6 z{2Dd|3T|80D;mwQ&2Gt;G)gXSKNdA@%qXIMS~a{QORB(GAM3w^^NjlH=PLP)mzKNw z2}xD*7CVsV)H6emY0#Zu9(-~yqcRm<>t~lL7bitGlF!ag1^H03o*`b-L@)U03Qkz~ z^D(n~c~l;F^W!-OLmi~3@H+k|L8{P>+C-(xo?a&*)L!;YmuM}k4)@Df-Y$)L1W6{C zJ(hoZtLrc=^tGCWy}GO`PnFEwXjJP`0nQ=b@#^6?d#vR{5gATRkGCd5GOfFquf7^_ zw#KV!$H>#=PetGV_QZM0NjAm%@OAEF1p~*YH7aK-Ykp@fy2;lSuj+`yQNnBc=MD+_ zhj$)w$Icokq<*w##68GZ4;9JNt+?FdBZCmImy9P3_pvrw-dfq0djN?r^Lr%}yCuzx zi~Ne6K6QclS<1TH=%|sGVs&S69_Uxr%aWnzHy9w55#vNKzSvFih_(8QjyFS_Z>e79 zYsjkrLibAp15Agl)0|}vdP057gph7#hhsIWA)7Wjx(Mv{56ZYJd>0nlgWZ+{FqW3J z8MBv$9#1U0C~Pqf6~1-7JoxUD0sHXIR`baP>MxrI(pqtg7u!;i)6RSTZ7c8fcc>yb z8e|qOZZLLr_W6`NMoAI`6YLD|oiEQZThId-OZ$uUuR*mgVpHk=kAeAg^7K9FXJ=nu zFDW4*4Ykp+Gw?A*i6R5FjHT=w)Z`ituH*f^b55$Z~ys(#FMtbdcpT zh3g3Gcqt*Ak?O%HgkkU{Bl}=idr1c_c{u`^K&e1?FL#8m9ZR6Qn}?57pe)y!aw(8T zvxT@o5X!+(%0OB5Cj|T@%k{G?0RaJm0V0B)C?_GPq@<*fFiZ#r695qcK0zM7c7XyO zKJ1_d0+ur!$_O8O6w=EV>FM!PmoL)sFJ*tL7&_&wW}r9=iwydwl!m*V6T%ef;Oi_0 zlYj}!2>mPf??`QghZ7nq35ER${mK4~Rq}NALLq#7z*5OUe**vH{RZked-{5Uje&7xr(sj-4w)?rgvOv+ie%zmh#2z;-!H|GAU?1v^Xljk^T4 zg+D?W>4fkB^Ms4>!{FjzkN|@uKTHh94-*#!1IUvQ<%f$1g8>TCAYVd+A0{crFCq-( z7lBHEL6RR11A_<*iNOZfQSxP8`ov$a#$kWqJP653@vYbsvA7@W579~Rw z7E`2$gJ*!x5BWdAe^Y??PjWw#{(<}@tOKqE4tBnF|B3kDu;AKw){FmHVTK~Q^ z(J6l{nCPEU=x<;q2-MlqhW>IRBlHh8y8LHMRW~~yXD?*nZ#=jJ{KWrj(r+0hPd87L zA!v6Hast1U|H1o>LpO>M(j5UVbf9grbNgHG{``FMt9Q=|^ACZ4r~;=m(ia(waQKgP z{Uz~RnM$CSr;i`%tZZ^nA*k4|8viZfH(d3SpZgUAN)84S`c>OstlyYll6ojlCzPE# z+M@hy^B-CL;{O&fboTQFjftTb0^wj{=jMly6O|AVfk_C7!Qmp{3_p|pC+N4lf)@hi z3)bO>EL=iT0w(%5*}nsR%l#^-BGS(1hph1LTKzNax0tG@y`9@n6Ds+a#GmZHW6`Du z@uvw96%msFjf)Ue6bk<}z|czn0sJ?5{M$Nw%oPWyy z3*fg3B|jg~1u#H(I3Q5q(EBx9{)PP;{;M6qbq(>4k#NTR4K;N}`XUtVz{Ub6)2~7E zH|B4sp*_L_p=9TU?scOeFU0R+{3GGF$e9aarVLI&kF&+#hx>HaX#bk@cNz0PWX}Ak zzhwSM`YrP}g9i4qm?*!fu%wvqSqvIcaVYFZ3_Iieh($m?9Qp$z%8!;3KZ`+w3mQ|9 z1I8dnR01x17K6qXq(M^)#-MQ)7XeK-ItKAzEQyXKKqMHW^AZ6KIXXt?A_5wCbPURZ z@tG`W-bJC{j5~|L(t`94j`$fZfsQ4?a)R+0MiLIv;xIH{9CQW17<3p!p^~6Zbc~jj zlt9Z$N}w^45@-x)_RnJIS&Y^ud8SQL5}l*unNCS`NnpZIF@Dgg;0GNMeldP=ehGd_ zez0kvP<|+kA1cBR73Bw=3=jkr=LbC!7>EV^6wplpJs3FX%z(ZK=(d2~h_r}=lB%kb zu%fsER8mXIvPb-I3x3XQv~dDU@xL*Z4)(tdrI#Pd z?QE0cU@wGlLx8iu*9WwfQ1tEww0{ovQjVS|cROD>BzlJ-Jk`elg`@hTA3%yN1 zdmX>C|C`tIJNXY|{hKFsW@L|Kk1cY7Kf|Iy(Qp#=rbLclV#|eTJ3N@bK|PuXY;Ba(;eD z2Pp|K!mHDAe+0)(+-K`qR zXTu5ga}3FH{ZOs+->c?g5jqFNBk%$109sy|%H$QhINFRVSl{>=D2RvCM zLi2qQ_Rb(J4AMj>BLgLnmH_}fBBwv-D}T_wh(NHM;7P2fR}kosI{UJ4+w-uXCk=}_ zBESvd>norKeoC=JIk2F=1K4>4fv2Wtbw*186zH~P0UKEiTv){fV1i)t|9<}4#NVj@ zo{_^Ulw$u+JpNEKNO1( zdPl(`@UMgT|19`NvHmCrzX8G#^y0u3h{XgPWuTn{yW7J7+*N>|r;r~1RS*9k#r`M* zn*1@YL4q>>1fa4J1jvS{0L-M})fS!4#%u7GUJxGJ;XYt=6 zj70D!h7Zz-122zyNVT8n_6k0Gfa< zU<8;0mjQdg8E^+sKmZU5L;z7hEN}-%1u}q#Kt50cR01_XBk&Y>0dxZcz-wR>mDD7*rUH7#A>vFeES(Ftjj?Fsw10FuXB> zF>YWaVBEvV#VE&Uz<7Z%fH8_OkFkoeiwR&7V$xu;VhUhNU@BqiVp?E2VxlmwV#Z*m zVdi31VzyxRU=Cx>V}8Lr#KOU%#A3k`z>>yN$1=vU$MVLyiWP^IfmMvvfYpgLjP)LC z1M38v7@Gl`4_gXb1KSMS2|EBg3OfzE5W60`3wsp%BlaE+4h{_t7mhfNI*vJx3r+}5 z98MNa70z>X?Uf0t$1(omhg`8$?!SxrSNs}9q@zj6Y=x$TkwbQKjI$~kP~nb$PyS4xDZ4T z+#{$Y=pvXQ*d`<-WFwR$)F(s|UL(9mSWVbV_@3~Ph=PchNQuab$d4$2sEFt}(In9} zF){H4;)}!<#JqnAiYc)Oqxbo zOFBgQg$$qU0+|w-9obc~2V~7;<7B(!l;luyU2=EwIPy~RKJw2LxD*#CR45!MZcyY= zbWki%Vp6hGDpA@~UZ>2b?4tZgg+s+fr9tIF6-!k?HAuBVO+hV6Z9*ML{eb!z^*jwG z%>^0_8aJ9mni`rhnxk`!=M>H%&PAWAJoon8{(1WI7tbTk$DFS^KYIR%mW5V@)|EDi zwvl$04vUVLPM{kLY_seH>=Nvb>?!Q+>>C`+96B7K9Az9+ocNp)oX(u-oIRZT7q~8%UbuCk z@xo`W^IRHS!CYlrGu*`7^4#9sx!fZ>I6M+Ot~^;huX!#lnkA3Oouf3grsxio%L0#d^hkB`KvarRU0+ z%4*6n%7ZEtD#j`qD(_U;RGn1IR5#S%Y9VUR)v?vJ)RWZ5H5fJQHA*x#G{rT;G&{A3 zv<$T}v_5F_YNNDUbTD)@b?)lST;jasaj8)k(AChrt2?X5rRS~pL?1_APyd1bCj+QK zs6me*rJ;>sso|cHqEVvJv@y4_pK-ehsfm?IiOIgHvT3sEyqS<$nAw0iojKCH$pY8H z#G=q**HXnY)$*ei+$zdy%$mzO(7Nw3-DS7S&umC->}+anv29IlOKrc|>DuMm?OxHi zl6hsrUdcY)e%0ZkLyE&PLKcyXSaOtgOm_V2B{~;)%I}q@EhSDuc=)tjv$C|jd&F)9GM)sbKUfM%MHdG*KRD_RJ&OgMG}RI8owod z>)~yz+sNC4(IV0JqfcTSV)|o+W7A^4#o5R8$3x@q#h)fPCJZKuC1&5jx#MwXEJ;48 zIGH>-IC*52`X5 zGNUv1vmCPCW?#&%e8~7P_TkYZu`@fHVX(Y&1GI&NrDiy=m5LZf}upsedB)r06N@(+AI}pCz^uwBBev zZ3}MOeeU)A%L}I$3+*=T(;a3VBc1x4gI!u(z1^za9X$#?ZN0L+Py3|$n)@aC8wbP& z8eYO**1v+ksvi^^Y#0(BYI-gCy5)_`o7T7TZ(j^64|k7fj0}uk8htZnJT@_IH9kM# zF!6cPZE|zUf9iPp+6?wg%q-b#+B=4KxpO>oRr6x=&)zG)fBC`i!}Nmv!s?>W;>`)<#D?|47vfcBvHP~x!X$n z=KuOWyV>-A75*jn<7Sf+;D8AMFZKMo*~A3;G+5|6Kxj|p$AiAqgoRCj3Bdqw2uYEF z=|65Z;b5Y#)1mMGfES+tTyi`LR!S&76}1AJz8yOU%sV29hEwsJh?2n-RL6i@~5))XEam3PErYIazXh+@tL z4abW&oA{vq>!*La;H#G7C8?FSHz55fIJpomfI`<+oNH%T5BwIxYRP^>YKO?o z(nII&rC_;dcrH)qb`RHx;$mL1Xs??(yB}$;-X7d1)%i%KTE&)gf8R}2X2~COrud7@ z_pYnnh4=0=dhbdmU)_%MU1T4ZJh+$Ss`0?8p^lly(8T&)3O;6{U(M88H3H1*rR~Wn zZljDkU*fL!RFkOLANsG0Cy*{3dLFw{C)Bj`R&cHkX1jjsQX}xDZo2Bd&6pV(VqMw# zY;7JzReQNQfgmNR$0I5^Wr2)>l z=}x$e|Gux-zF$deTVs%)pt)HpXS1MjKSAo{qhuqtX%m?=x8+a%k$EI<86*|hn`&^g z%w;_3eOppxyFs@rI1 zZ0-|eP#kB<&fp!J2-b=q@ZQg~^tM$|8g_iz>1Z-vLZ!=hL*ee?f&Thc8C!?si$IpV z_DWuWF6?Dv&Hc4EE9Ye5YoE~K*n}}bX+313R~5Xy?S_rgPFzL$tlS%M$BGov21p8a z-wumYHz!$CSlW0z9)7w@(H()R;CPUR5?SspC49no3Z#(SC68m=j8Q!7juz!#EwUDSEm+EgdR zR4cxrUE9~=QaE|$UAf1a3L5Fuw-~GlL|bxJLhl5(W4u+o84ADJ$56WV(xq>5p`pFH z=kBoV#a#~BlaCQ*3bagwM~h2MQipjjPl4|WRjGzXmWRe2BMrf1mn9CaM_;kL`Oa=a zR|gs8`95t6uh*hah|%?}IqMaDbH1xNM|TD;TgArD50P1MYB{ETQiY$3)R4v;oB4=;U?-qqmW*L*GV9XhzdbHD?M7u)OEml#hHnloV7{vdhT@|I}epkPgK zZk_dtn`n>pPK+gaqah$|+m-Ncay-j%avd`3Ooz&yL zeyY-70G=64e#>dL z=5%bvdFPReWysrsbst&?OJi+#)M($Wo$fI1Br##;IbPBvw)s#&A&K(=b!XMMVa+DI z_BJ~Y;mu-W91E#y6uv$FdZgOLB(vVkjccoTuj0c*7sp4$1rDcG=Qf`8eXiPh9bIX~ z6+~`m!^3_#iDN^#M&_I*HR{pW^vC;bq!gnC{9ENxHQA|0&z%A<2)PBwwbIZ(8`WQI zam~v)sH0t^n^#_q(`hcVD)twSq*>eJgpQZH=%sTiUu|!`@A;zl6(n)pDrLxLR^tna zbRw{p-H92uF9vwXd`?o`>>SC~Wy5gkt}=2wU4y;T&VCY`SdN5R@+o5r=n zr1*@9Jo-5+DzehJU$C+naw@Me%7|#D3O#+&mAOlqC7JTkK$h-vgqOFr0lon@luo&X zC*IPOnw9w6vkOy)(-?0ZF8S5**fr3_)5&L5iI)zHS{jW?65x|v%s(>PT;fein4CC= zlA4Xu=mrkx2jzUd!=(ZX^KLh#RV)<7e~js|i!>*$eBt4Q&BOh{wsc#n6z=Z&sJQaJ zax!&1YZU)CEsuAZmIfKDTnV~Jjzory(8H5QB(mRMblvFw0t>mZ6-soBc?x8@#=X+J zbEgTW8KU~+q%vTc85iqYz{ri_sWggy-ToXaNPk-Pdbm8-^KaisWP9db*)ik?eBLvr zCpzTtY?WEupTjjMwc-`G`AFPr%qW3-{P=s=cKmM2-aD1s>V^UwC$|A+7VrJVldGZO zwsvm>!#lrqllTwyV)n%CZI&7+SjAx)NqnuA-P`Z!`MTws+}!d0$VJ^9Umy1-AR+H1 z!E_27f4@4F@4%}mKASze6|&UkI5_deE7aa(F0P~q#rVt!mx$iKdp6O#;gUEmeVlt? z+=DpYE#ebtgkZ*pQUlI$bJN!1tq+>c$?5do3{0aWdJ8V=?qewpS&9!V-a2V;7A8w& zAUUD}@scI&uaxJLDe+;>5JlqAVwGVC!>cDf8$+-Mr#nNXeadU!3HXtZS$c?y)XN%k zDu<(zIl?9xdA))u5Q;MU66i4x2hJ>f;D|?gyZFlI82So!aw-NDk89yNvf2;(nhh@) z%vts4I2p+~1cQTNGQ18J7VcY~2L z-)I!|O--E6*kta#w50ZSbi+IC)b0w)gw{!dcHwQD!QC7F%!c3Ie)148_(1AvQ1)?B zRbS8IwI}x-(SkuJ-J*m0$TZQ3)=1bE4!F##9u9UaaLGa2;+S!}{c(-f#yo76r>`)| z>8q>v_~#aK#6^>jX@`V$Jd2t+jvl;`J^sd+!q9lMwaVvC@|Vxgx?~_dLD9i31EtgU zS8Q{DCc`_sC~p2sq%&;M$?)@>X8kg&3pV3(VxkI3WQdmD*WL5cDA9L(<*vcqjK6Te98F=RRO zHnGz;M$BNYhkPN;YMfci3Oxmii=o)+QI26rx`(QBK|_s*uXpEc{Y0U02BNCwa>)w# zObZi`p~Rzbq2|5)rP$AQ{HlkJ>z8JqwFw+w4OxeWcr+}*G}5kUiBP6p@xD79>O?!| zer?NCSDKEJ1C{igJrM}pKY30PBtwd8O1#`nudtTCPRbqD}fjKaw_)SUPCx#l8( zD{I7jobq+=75V0Vjx5LZx9`pDQd~<=hW8rf41Bnv0zb^WJ@TL0iP=<*yVz{z(p&Uo zdeI=5VMu*&=hMXjY&WBNUaaaIn)kzF-jt3~PkUPL3dnz14R7Zv!>~2#t z^7C!v<<*KO>N8Za(bBb6Go;0lim_-)&dm;-{?^A&!4XlTg#i!l)3Y$h`SJ+wM9oC1 zqG}Ai1u+;m7BB8Q`rht6K0|o@+S(Y?%6WZW_b1YXFN=g~^c5J}o|za9i*3l<((7<9 zOIk3e)8;pzT;gCpe;!vp?cm$41gl;x8I{9h*~o}yd3w*yheJl|6VGNV-u5nynoK#+ z;<(p78AnGY zmEydmbw&C4MH-IxdMIxsoKNcSI!};u4q2jB7ZP!OEcdE$<7A~cah0|t?3n6VVO%PA zMEk?X5w!WK+&A*6Nfw?1*>K}xS34Yw7^x;QTigu%yo=IA=@(`8`d&yJoYzVhYC391 zG;a!FddEjbiJ6U6?suFkv*vZ!$<4?(I?sxI?^>I`78%YlvMbMVjwS7tppcI1of)~C zG`ye73N0pNQb@ZS-akhy6Sy-(e&`5`UgXhKmntF95xKqmJ{d{xmq6u&f;KN-e04bQ z|FvwDx^lad`p%RDYRI>Bw$RjBpuvUUNtSQSJ(!GdioULNE(T$nt|DeSFSPL3pM5FB zx8eM&O`;+ae-|qwDu)ph-zXOgps6^jo8PyOct5|%!k{Cp?<;0fDLiGguDrxhM_=3W zaW*X*KMsscsxstj+{sbHhZBm0LCWH&^q2#!=s*L_ddDILB}CJdJXtZszYxB&f!~gE zxk|ZB&ER@myz1HuSFgvtaGFU~_H=uyL-q?MvaWaR$EoTku{;%J9^T7;U{4|%Vj^c@ zq`_2o&c+SOE8TVZ!K)Jg;TLc=_E)+xuG}jz8dp*&?DiV;XN|4B`ZW|gh{QVt%LOyp zJ3~d(+?#EhJia3fo_4HGd7O)t?Vi=}i+vm81FsD3545d_eOnS~V{reIjb@<3kaBSs zp}1!w@=H`1wX{>yE#9`0qcsu}P#1Q07Y*MM2-m+!`#J2f1Gntd zK!NBhfzJLrS1HPMc!M85=lMjT!L&4@-Xir*hL7nHtyfW4f{-r8=EAJAo7#1W>dWhO z@?8RHo(KNx9_f5e+Ajq~#ontO^tUj@1WgzZ_i)OwrbI(2cM5%;mAwmPDj4`O=Mo@J znnPMkG>ed6GE6jLGrFT3*k9977EKJ}2zDi^UA<-f#qO}HctWKH-zcrUP9+41XlUI;O+Yzre(%t*&bDzl*ZOd zSQzF~mcy!73zuaf)xLd8=hqRp6?1C(H?<$qMx+@x2E^+2a=Rn#9O|m-89%viE{l<~ z*ZNo-Q{+CKy@^`v^lm1SNH@Bb!I$RD6=66Q<(%xW%O1q$Yi=*m(LPvO!%BpQ;Bbfw z7;$LgJq7=qWQqKQmRSKKsG7C%vZw*44qqQ-55HTb z$wlqyuI-?-2eh@#PXp3-U$t8CY}ydW<)OqHC9g}xL&7_y*;v?bBok06Y!1mq&QIMn z?(qK<`bol}Nb>8jF8>{F`4jJ|wuaWN@BB6w+O{auc8w`$opS4p%&aFCZyy9NSvuxp zWxb&CHOj+UZ{oTFAAKinJDMefUtTUy0=4f&YOq8OuW^pGE?%F=>6HuGUL^BNX2*&T zXq|fJLIr<}4_(kWKYS9q=iS z!z{gjZHwdb^y2)#SlYeq>eM%I z>ny4N%JV4Y<0!FWjjl>}t-;GU;Q@Yf(=Og>>B!2sqD4mQwCS%TD~r~l9*#ru-hS!s z*VBV4{Iz0c(gioCWc&pf(%D+3DGj~_-!82u<9j=-a&MSuSHZg|?nb_cuirRNU_-;Z zw*&efNZ#b&pz!-ddUzTJ)Z5F6hB<&9bq5v8uKSm4yicTXk5j-RG-x0uwqPzs(#G89 zLkm4P8m%YS_Xt%I=mnI9MfFVhH>T#rp=wIYO3Ml+e8e0{y%!*QMghY9ZH01;wPA6| z^!?spOV2uNZ48CR8}p}IzFcR#MPVIU7o5!A8LRIa27N%2LU=jy%k>w%#I0S=U=`Tu z(JKPw%9~c7iW1EB(|D;eI4?!f+U77_;h?`pxCD)aS5g%eejH-ES{`^e%=RYbj*dBgC+|ar$WBf+gxgQ?f&zT$JY&P341j|}E2->w5 z4-phV^}FGOArfW~=58foX zlTfwN-x!iIsDX2ALzCWy;_SRN9e@()UoyLM0WnBVH`auCTj*Q&k;2jwAF(Cx57OcI zV|rs=?x#RBuT!%XPt^c}$W@YrHLI=BUV};t07??)H-suwO5Yi_5gLGd=H6or0L=?N5Dl zJ#LRB;b7>BU%07;zRK?1UOLU<47vplJf~Uchu*M zxi+!ig|$8Nl39eA!a{6o&aglG}Hop`yK_=k*nq zFldjpZoVb{SYD@J-PY9$+4Rq*O6@FJl9Q|05?JX6M(kxBuJB;whmckubU(edOW*ZP zHb_S8O6_*YgQu3LTM6sBWddf?;D_{N@HVscD)HNN_AyRyE)MDwG2H^*wWk@r#l-Cm9qRYgge zprsp-Ma<{S%M0e?CwbF5YgMY^K8FwIE_LUl_jAP$K13&IwKpEyiq{bGS-DO<(5b5n zXng!QJZ1{sbdQL-2_JXm#h3TFR@{&#J3N`PuJ7y^3Ab8xTt+;S58906^-(#9kc zefK>MGg3jGKiL3}?mdHwu54_yi&GJs*zQzo(xh=p`FCxvN-6aOB@Q!bjE9x2wBHpb z18*gT>+e@`z0Xad_6)k4a9~mCVpfX0Vfv}Kq%w1>vYX{f;(dx5sR!x3<=~`o~Je0ut#$F3V;u2rYL_1%K)rP9ry|c)D0h zc&m#)tnR$&qc`Sv6;!&1oR7{VkN>DlfuV{rj_95yMp~)t1`Qp5$8H}Lr&V$-XyH$q z$*(r!H<3E(OwtriURLl(SfYf{m;?tmaqxC&rE<_XIR@v=4nFj7TW{#$h1*!g+@_@# zJCzHprAh2FQAEF* zSEH#4T&57pUq-dUUV4-3K5MpUwsGj}_Nsko<1}qVG03%-7CI%^QCFxxDzKxu@hBsT$%(Bb`k zdz+@~ccUDE%xm~#121igNxjdj z{rr&>QYD{?VjM+OFf_XrrsUgh|o?1`w+4joUuDhB> z#n34liYv0jx2SnhHh%Bg(+6%_ymiQ4^C}LeEl9z4f78WC-+eoZ^V)UtVO`OUWVHR5vdtC@ydWB z`BE|mr+I|oqC8n;fk!|a)e2LGaDISqb@?R^%ETEivL3pKbIjSE#=IhR70-1X8q-kH z9+SGNC?&fc|MFk~ENhLd5d(?soIh+tahtBkv?Ej3 z^h!zN3@^`$OB;6k9v{N;m5xEGw?tKLZd8d6jIUq$`WRN(A{2-1>$uUAyk`@ihCe)s z4YG04czlmv_Q`bjmA9j#l7`V^&r7S~NgQGhUdp<23NO39Zf+pFYB2J4tSBVQtAx`S z(dCR>vJ-mGswripK1yrPNS>RVqm}eJk6(*IBs1%>$tKtNZJLLX5?9yw132EhT9e{$ z6mY_+Wkt(WOVpIq)L-e)X_)d6xR~7-PEVcG67hq+Tw4BAs4QFC@m^}`4cnNGfUm#l zbA$X#nbD|gm@~Af*TdQ2^W51I!4QU7&TV_QHF$RpWMXxkG@|P?w+g*}}g|1d)6;&9UkR~eX&G(d4IV1$l z=kkRnkS4w0xN4R7c+tiGB=6|z)C7I=x53OTtzyuJRl3yT&uD$o_xQfuZkdaqIrhCt zM|{T*fz-_n{73HGOoq`KLFAlIr$_yB?yfx3x3^=IITtkQY?FI2lrgb)dwW`bciNDr zgEXst;MHQ6$Gf?h{YOvCo29)4Kf!nqtOAo_w*B5u$8l1wY2Hkbcj=6!D~yaeXSJF% zpQEjm&MLJ}yK(qbuRsO1YpNf)ar3JvKP34fli5Ngi_f>N&*yigZl%eU2c4gJH-B&4 zpI2XqZXweJ&TF*$iQ;Up#ynKE%z6>>yZ)iQ&8mIRotZD>TCZFnW{bvoRqqJ7%gt%UpHMDoSH^^idNtmD1?=UVRd z%4@T;yiZ|SoCnl3_7fis4U!w!)R4$v8C)yO+D{zCVmncXj7cGM7C{*kjJVg!qY6v- z91TBToYmhY&U(&X*07TQ-e-d_*)Z-te10fx+Fyi-78NFW6C}XeG8f86bzm-)q#|!&v+$AIyDW8uVXbS*oUY&+3~Y z&+=U|CKrCxlW+%6LIN1Nn2DHoUu?*bsL24N4aH8<$6w3b-F(4gc~>Sw{N(mYGtJ0| zd2SR&Yv=X(!-by53)g#x9}hh|MpnAWU0`HKJh9H6*cGWYHY`x8_q(+(LS3LaeBs4s z;)14TKSDb`r69PKLrxK$0o8;4LdzQ)VH0az_rtMr(sO#;Z}qIs&4HgJsbBBf23gt9 zL@avFkTTvrnJNfv_w#V2emYL!&s%<>{QgExVtTAQl-Iz4mm$FT#4?ycvo)MCrEoF* z^;TIbl&Ce$H=ApIVqJjKy)BondtiXs*(Cu&T+MpHlV44{C;$@evk+d)oWH6DD=ho) zj^G<(dfca#j0Ghz);e#FrgunMs@Slpi*hUVO1kD3h+S0pG_2@?zqjl9JO=-)fmFCn zQ;F|m<3bBQcH@K%iSWG4)-8%u@1=O}qX9o-h6-XM3MIBh-USg%a}s;*kP&q`!U1GRr!Y6I+Jt!`0)w;-6Ctv-+Qiq z5uKV{(#xsYi8eI1i_)uqdi<3&jZRUG*I?FLLsJjnNKMK2?%N!!vM=;U7#nD?U@IhR z4C3IRH1EhHRnyH0$UyZCIlPA74v215tLMgWrKzF3N|SPHMJTI{i8Q9Mmh*CZMdEj! zo-Z7F5v&cDyFE^SolQErd-=W%kI%A)Kt0>L&PPHYzjH{E@vy2>s`Ib<)okSNdL$d= zB#=kyEz+#jW+K#@`tGdD4PURivUPnqYBjW^;E8t}%##02jixg_kEQ8brC>iSGNipN z;H=+9^E}l~=Y+M+y|8_CPpFsWw;Av-nQ9cwArleip<4QTHzwVz`Q$~2sWv1pQQYLcjau$ZA?xh0CXDcQK88J zTzzw`M>Q8E7)b5rU92^J-içZyBLlB)R_IlzzjXjC*HYqEgQn7_P%S{Xj!&YiY z!A0`kkG!`z7~+*FNK`vr?=wBQN^O1#9u(dZCMA<`Xu{bSaL}GxXiU5mz}Y!9!dHLm zvcW|+9oxq|8d~nntlbZ^6t^Vzx~bL@?l$yHe7)YhVY)TwXYD{*Yb!S4eobOJ{7U$o zyU(i@Vdm0W|MY@_=J#4QB8EWwdwpLn;G8!2P~DBp|a_;=+En zVq`<0)Zj84y6fbJ#K;V>yMIx(LBB1cwMlk**)ruEPeyPEr%J(0nw^rh3ERrd_d6QW z3Ut@6QL`DJACz&9e;jfcT74@(GmXlxnB$3Y{PVk9bqBoa_m0c4Nk{EiPSoNpg)m>M zKCh)&%Y2e`>-ny3Uqt28r>EXswX{KO@mo}wgvb4W!-J|PoPljJ> zUKt8*RJ@GfmFie7de&(9IOYXbl<-WLhla%=1FKFddsD*R^ZfzC$qFM}R(q^K!YHoR z@0|Ib*UU~@R%K)doK71zN&#MsbNWH&p!v7@bnS{qh2RLOTY%J1YyM=bFSw%UyG+Kdyp3e`rq9 zmkJfQA$1c2&p=_3*(-=i?Gq_Wf&v4h=m_ha$8|Bw;12^jdpt%v(fDSDeYe|9j2?y4 zVqo3EbA0|NhSqaT<}WMu+e_=9E2aE-zvH( zcHJno#D%!i>h?`0BQ>VPdEA7cHT})!T#0sMS?xQSclXkCxlVxuneedMOd{>eFkyR4 zuS;S>tsRWV0dsBFL|-X_16ttgB@c6tMPXeZzWVGf0>v1`Fj#J~F4Ih)2E#@&oR34N zSuo^_`R(oXcVP_^dn2UzJ;gceBvKFZzh;fga)tG>pP$Ax*mjb`v6Q{l7cI>a^T3qO z!lbU{TCO@vveB05kT;XME_-BH6BZUSvRLG6e9wx{@$y(%i|XwFtee36xA^A=2h4Yx z1ag+`UB#{v1vU+eTjEtu>7~=fP55j?EH+V4_bpuSabDY;Ui@tKP~w0oTvlB}c4&FQ zKqt+@uCA`1Vj2lx+ejP8CMSKzJq1LEZ7Ti- ItU8_if8pd9>Hq)$ literal 0 HcmV?d00001 diff --git a/future pkg/image_pipeline/camera_calibration/doc/conf.py b/future pkg/image_pipeline/camera_calibration/doc/conf.py new file mode 100644 index 0000000..82ad7fe --- /dev/null +++ b/future pkg/image_pipeline/camera_calibration/doc/conf.py @@ -0,0 +1,201 @@ +# -*- coding: utf-8 -*- +# +# camera_calibration documentation build configuration file, created by +# sphinx-quickstart on Mon Jun 1 14:21:53 2009. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys, os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.append(os.path.abspath('.')) + +# -- General configuration ----------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.pngmath'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = 'camera_calibration' +copyright = '2009, Willow Garage, Inc.' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '0.1' +# The full version, including alpha/beta/rc tags. +release = '0.1.0' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of documents that shouldn't be included in the build. +#unused_docs = [] + +# List of directories, relative to source directory, that shouldn't be searched +# for source files. +exclude_trees = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +#html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_use_modindex = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = '' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'camera_calibrationdoc' + + +# -- Options for LaTeX output -------------------------------------------------- + +# The paper size ('letter' or 'a4'). +#latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +#latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'camera_calibration.tex', 'stereo\\_utils Documentation', + 'James Bowman', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# Additional stuff for the LaTeX preamble. +#latex_preamble = '' + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_use_modindex = True + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + 'http://docs.python.org/': None, + 'http://docs.scipy.org/doc/numpy' : None, + 'http://www.ros.org/doc/api/tf/html/python/' : None + } diff --git a/future pkg/image_pipeline/camera_calibration/doc/index.rst b/future pkg/image_pipeline/camera_calibration/doc/index.rst new file mode 100644 index 0000000..741e4e5 --- /dev/null +++ b/future pkg/image_pipeline/camera_calibration/doc/index.rst @@ -0,0 +1,18 @@ +camera_calibration +================== + +The camera_calibration package contains a user-friendly calibration tool, +cameracalibrator. This tool uses the following Python classes, which +conveniently hide some of the complexities of using OpenCV's calibration +process and chessboard detection, and the details of constructing a ROS +CameraInfo message. These classes are documented here for people who +need to extend or make a new calibration tool. + +For details on the camera model and camera calibration process, see +http://docs.opencv.org/master/d9/d0c/group__calib3d.html + +.. autoclass:: camera_calibration.calibrator.MonoCalibrator + :members: + +.. autoclass:: camera_calibration.calibrator.StereoCalibrator + :members: diff --git a/future pkg/image_pipeline/camera_calibration/mainpage.dox b/future pkg/image_pipeline/camera_calibration/mainpage.dox new file mode 100644 index 0000000..dbcfe69 --- /dev/null +++ b/future pkg/image_pipeline/camera_calibration/mainpage.dox @@ -0,0 +1,59 @@ +/** +\mainpage +\htmlinclude manifest.html + +\b The camera_calibration package contains tools for calibrating monocular and stereo cameras. + +\section codeapi Code API + +camera_calibration does not have a code API. + +\section rosapi ROS API + +List of nodes: +- \b calibrationnode + + + +
+ +\subsection node_name calibrationnode + +calibrationnode subscribes to ROS raw image topics, and presents a +calibration window. It can run in both monocular and stereo modes. +The calibration window shows the current images from the cameras, +highlighting the checkerboard. When the user presses the "CALIBRATE" +button, the node computes the camera calibration parameters. When the +user clicks "UPLOAD", the node uploads these new calibration parameters +to the camera driver using a service call. + +\subsubsection Usage +\verbatim +$ node_type1 [standard ROS args] +\endverbatim + +\par Example + +\verbatim +$ rosrun camera_calibration cal.py right:=/my_stereo/right/image_raw left:=/my_stereo/left/image_raw left_camera:=/my_stereo/left right_camera:=/my_stereo/right +\endverbatim + + +\subsubsection topics ROS topics + +Subscribes to: +- \b "left": [sensor_msgs/Image] left raw image topic, for stereo cameras +- \b "right": [sensor_msgs/Image] left raw image topic, for stereo cameras +- \b "image": [sensor_msgs/Image] raw image topic, for monocular cameras + +Makes service calls to: + +\subsubsection services ROS services +- \b "foo_service": [std_srvs/FooType] description of foo_service +- \b "camera/set_camera_info": [sensor_msgs/SetCameraInfo] node of the camera for monocular +- \b "left_camera/set_camera_info": [sensor_msgs/SetCameraInfo] node of the left stereo camera +- \b "right_camera/set_camera_info": [sensor_msgs/SetCameraInfo] node of the left stereo camera + + + +*/ diff --git a/future pkg/image_pipeline/camera_calibration/nodes/cameracalibrator.py b/future pkg/image_pipeline/camera_calibration/nodes/cameracalibrator.py new file mode 100755 index 0000000..206962e --- /dev/null +++ b/future pkg/image_pipeline/camera_calibration/nodes/cameracalibrator.py @@ -0,0 +1,154 @@ +#!/usr/bin/python +# +# Software License Agreement (BSD License) +# +# Copyright (c) 2009, Willow Garage, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of the Willow Garage nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import cv2 +import functools +import message_filters +import os +import rospy +from camera_calibration.camera_calibrator import OpenCVCalibrationNode +from camera_calibration.calibrator import ChessboardInfo, Patterns +from message_filters import ApproximateTimeSynchronizer + + +def main(): + from optparse import OptionParser, OptionGroup + parser = OptionParser("%prog --size SIZE1 --square SQUARE1 [ --size SIZE2 --square SQUARE2 ]", + description=None) + parser.add_option("-c", "--camera_name", + type="string", default='narrow_stereo', + help="name of the camera to appear in the calibration file") + group = OptionGroup(parser, "Chessboard Options", + "You must specify one or more chessboards as pairs of --size and --square options.") + group.add_option("-p", "--pattern", + type="string", default="chessboard", + help="calibration pattern to detect - 'chessboard', 'circles', 'acircles'") + group.add_option("-s", "--size", + action="append", default=[], + help="chessboard size as NxM, counting interior corners (e.g. a standard chessboard is 7x7)") + group.add_option("-q", "--square", + action="append", default=[], + help="chessboard square size in meters") + parser.add_option_group(group) + group = OptionGroup(parser, "ROS Communication Options") + group.add_option("--approximate", + type="float", default=0.0, + help="allow specified slop (in seconds) when pairing images from unsynchronized stereo cameras") + group.add_option("--no-service-check", + action="store_false", dest="service_check", default=True, + help="disable check for set_camera_info services at startup") + parser.add_option_group(group) + group = OptionGroup(parser, "Calibration Optimizer Options") + group.add_option("--fix-principal-point", + action="store_true", default=False, + help="fix the principal point at the image center") + group.add_option("--fix-aspect-ratio", + action="store_true", default=False, + help="enforce focal lengths (fx, fy) are equal") + group.add_option("--zero-tangent-dist", + action="store_true", default=False, + help="set tangential distortion coefficients (p1, p2) to zero") + group.add_option("-k", "--k-coefficients", + type="int", default=2, metavar="NUM_COEFFS", + help="number of radial distortion coefficients to use (up to 6, default %default)") + group.add_option("--disable_calib_cb_fast_check", action='store_true', default=False, + help="uses the CALIB_CB_FAST_CHECK flag for findChessboardCorners") + parser.add_option_group(group) + options, args = parser.parse_args() + + if len(options.size) != len(options.square): + parser.error("Number of size and square inputs must be the same!") + + if not options.square: + options.square.append("0.108") + options.size.append("8x6") + + boards = [] + for (sz, sq) in zip(options.size, options.square): + size = tuple([int(c) for c in sz.split('x')]) + boards.append(ChessboardInfo(size[0], size[1], float(sq))) + + if options.approximate == 0.0: + sync = message_filters.TimeSynchronizer + else: + sync = functools.partial(ApproximateTimeSynchronizer, slop=options.approximate) + + num_ks = options.k_coefficients + + calib_flags = 0 + if options.fix_principal_point: + calib_flags |= cv2.CALIB_FIX_PRINCIPAL_POINT + if options.fix_aspect_ratio: + calib_flags |= cv2.CALIB_FIX_ASPECT_RATIO + if options.zero_tangent_dist: + calib_flags |= cv2.CALIB_ZERO_TANGENT_DIST + if (num_ks > 3): + calib_flags |= cv2.CALIB_RATIONAL_MODEL + if (num_ks < 6): + calib_flags |= cv2.CALIB_FIX_K6 + if (num_ks < 5): + calib_flags |= cv2.CALIB_FIX_K5 + if (num_ks < 4): + calib_flags |= cv2.CALIB_FIX_K4 + if (num_ks < 3): + calib_flags |= cv2.CALIB_FIX_K3 + if (num_ks < 2): + calib_flags |= cv2.CALIB_FIX_K2 + if (num_ks < 1): + calib_flags |= cv2.CALIB_FIX_K1 + + pattern = Patterns.Chessboard + if options.pattern == 'circles': + pattern = Patterns.Circles + elif options.pattern == 'acircles': + pattern = Patterns.ACircles + elif options.pattern != 'chessboard': + print('Unrecognized pattern %s, defaulting to chessboard' % options.pattern) + + if options.disable_calib_cb_fast_check: + checkerboard_flags = 0 + else: + checkerboard_flags = cv2.CALIB_CB_FAST_CHECK + + rospy.init_node('cameracalibrator') + node = OpenCVCalibrationNode(boards, options.service_check, sync, calib_flags, pattern, options.camera_name, + checkerboard_flags=checkerboard_flags) + rospy.spin() + +if __name__ == "__main__": + try: + main() + except Exception as e: + import traceback + traceback.print_exc() diff --git a/future pkg/image_pipeline/camera_calibration/nodes/cameracheck.py b/future pkg/image_pipeline/camera_calibration/nodes/cameracheck.py new file mode 100755 index 0000000..6af00d6 --- /dev/null +++ b/future pkg/image_pipeline/camera_calibration/nodes/cameracheck.py @@ -0,0 +1,57 @@ +#!/usr/bin/python +# +# Software License Agreement (BSD License) +# +# Copyright (c) 2009, Willow Garage, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of the Willow Garage nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import rospy +from camera_calibration.camera_checker import CameraCheckerNode + + +def main(): + from optparse import OptionParser + rospy.init_node('cameracheck') + parser = OptionParser() + parser.add_option("-s", "--size", default="8x6", help="specify chessboard size as nxm [default: %default]") + parser.add_option("-q", "--square", default=".108", help="specify chessboard square size in meters [default: %default]") + parser.add_option("--approximate", + type="float", default=0.0, + help="allow specified slop (in seconds) when pairing images from unsynchronized stereo cameras") + + options, args = parser.parse_args() + size = tuple([int(c) for c in options.size.split('x')]) + dim = float(options.square) + approximate = float(options.approximate) + CameraCheckerNode(size, dim, approximate) + rospy.spin() + +if __name__ == "__main__": + main() diff --git a/future pkg/image_pipeline/camera_calibration/package.xml b/future pkg/image_pipeline/camera_calibration/package.xml new file mode 100644 index 0000000..7946de4 --- /dev/null +++ b/future pkg/image_pipeline/camera_calibration/package.xml @@ -0,0 +1,30 @@ + + camera_calibration + 1.12.23 + + camera_calibration allows easy calibration of monocular or stereo + cameras using a checkerboard calibration target. + + James Bowman + Patrick Mihelich + Vincent Rabaud + + catkin + + rostest + + cv_bridge + image_geometry + message_filters + rospy + std_srvs + sensor_msgs + + BSD + http://www.ros.org/wiki/camera_calibration + + + + + + diff --git a/future pkg/image_pipeline/camera_calibration/rosdoc.yaml b/future pkg/image_pipeline/camera_calibration/rosdoc.yaml new file mode 100644 index 0000000..0136cd7 --- /dev/null +++ b/future pkg/image_pipeline/camera_calibration/rosdoc.yaml @@ -0,0 +1,4 @@ + - builder: sphinx + name: Python API + output_dir: python + sphinx_root_dir: doc diff --git a/future pkg/image_pipeline/camera_calibration/scripts/tarfile_calibration.py b/future pkg/image_pipeline/camera_calibration/scripts/tarfile_calibration.py new file mode 100755 index 0000000..5d552ac --- /dev/null +++ b/future pkg/image_pipeline/camera_calibration/scripts/tarfile_calibration.py @@ -0,0 +1,235 @@ +#!/usr/bin/env python +# +# Software License Agreement (BSD License) +# +# Copyright (c) 2009, Willow Garage, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of the Willow Garage nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import os +import sys +import numpy + +import cv2 +import cv_bridge +import tarfile + +from camera_calibration.calibrator import MonoCalibrator, StereoCalibrator, CalibrationException, ChessboardInfo + +import rospy +import sensor_msgs.srv + +def waitkey(): + k = cv2.waitKey(6) + return k + +def display(win_name, img): + cv2.namedWindow(win_name, cv2.WINDOW_NORMAL) + cv2.imshow( win_name, numpy.asarray( img[:,:] )) + k=-1 + while k ==-1: + k=waitkey() + cv2.destroyWindow(win_name) + if k in [27, ord('q')]: + rospy.signal_shutdown('Quit') + + +def cal_from_tarfile(boards, tarname, mono = False, upload = False, calib_flags = 0, visualize = False, alpha=1.0): + if mono: + calibrator = MonoCalibrator(boards, calib_flags) + else: + calibrator = StereoCalibrator(boards, calib_flags) + + calibrator.do_tarfile_calibration(tarname) + + print(calibrator.ost()) + + if upload: + info = calibrator.as_message() + if mono: + set_camera_info_service = rospy.ServiceProxy("%s/set_camera_info" % rospy.remap_name("camera"), sensor_msgs.srv.SetCameraInfo) + + response = set_camera_info_service(info) + if not response.success: + raise RuntimeError("connected to set_camera_info service, but failed setting camera_info") + else: + set_left_camera_info_service = rospy.ServiceProxy("%s/set_camera_info" % rospy.remap_name("left_camera"), sensor_msgs.srv.SetCameraInfo) + set_right_camera_info_service = rospy.ServiceProxy("%s/set_camera_info" % rospy.remap_name("right_camera"), sensor_msgs.srv.SetCameraInfo) + + response1 = set_left_camera_info_service(info[0]) + response2 = set_right_camera_info_service(info[1]) + if not (response1.success and response2.success): + raise RuntimeError("connected to set_camera_info service, but failed setting camera_info") + + if visualize: + + #Show rectified images + calibrator.set_alpha(alpha) + + archive = tarfile.open(tarname, 'r') + if mono: + for f in archive.getnames(): + if f.startswith('left') and (f.endswith('.pgm') or f.endswith('png')): + filedata = archive.extractfile(f).read() + file_bytes = numpy.asarray(bytearray(filedata), dtype=numpy.uint8) + im=cv2.imdecode(file_bytes,cv2.IMREAD_COLOR) + + bridge = cv_bridge.CvBridge() + try: + msg=bridge.cv2_to_imgmsg(im, "bgr8") + except cv_bridge.CvBridgeError as e: + print(e) + + #handle msg returns the recitifed image with corner detection once camera is calibrated. + drawable=calibrator.handle_msg(msg) + vis=numpy.asarray( drawable.scrib[:,:]) + #Display. Name of window:f + display(f, vis) + else: + limages = [ f for f in archive.getnames() if (f.startswith('left') and (f.endswith('pgm') or f.endswith('png'))) ] + limages.sort() + rimages = [ f for f in archive.getnames() if (f.startswith('right') and (f.endswith('pgm') or f.endswith('png'))) ] + rimages.sort() + + if not len(limages) == len(rimages): + raise RuntimeError("Left, right images don't match. %d left images, %d right" % (len(limages), len(rimages))) + + for i in range(len(limages)): + l=limages[i] + r=rimages[i] + + if l.startswith('left') and (l.endswith('.pgm') or l.endswith('png')) and r.startswith('right') and (r.endswith('.pgm') or r.endswith('png')): + # LEFT IMAGE + filedata = archive.extractfile(l).read() + file_bytes = numpy.asarray(bytearray(filedata), dtype=numpy.uint8) + im_left=cv2.imdecode(file_bytes,cv2.IMREAD_COLOR) + + bridge = cv_bridge.CvBridge() + try: + msg_left=bridge.cv2_to_imgmsg(im_left, "bgr8") + except cv_bridge.CvBridgeError as e: + print(e) + + #RIGHT IMAGE + filedata = archive.extractfile(r).read() + file_bytes = numpy.asarray(bytearray(filedata), dtype=numpy.uint8) + im_right=cv2.imdecode(file_bytes,cv2.IMREAD_COLOR) + try: + msg_right=bridge.cv2_to_imgmsg(im_right, "bgr8") + except cv_bridge.CvBridgeError as e: + print(e) + + drawable=calibrator.handle_msg([ msg_left,msg_right] ) + + h, w = numpy.asarray(drawable.lscrib[:,:]).shape[:2] + vis = numpy.zeros((h, w*2, 3), numpy.uint8) + vis[:h, :w ,:] = numpy.asarray(drawable.lscrib[:,:]) + vis[:h, w:w*2, :] = numpy.asarray(drawable.rscrib[:,:]) + + display(l+" "+r,vis) + + +if __name__ == '__main__': + from optparse import OptionParser + parser = OptionParser("%prog TARFILE [ opts ]") + parser.add_option("--mono", default=False, action="store_true", dest="mono", + help="Monocular calibration only. Calibrates left images.") + parser.add_option("-s", "--size", default=[], action="append", dest="size", + help="specify chessboard size as NxM [default: 8x6]") + parser.add_option("-q", "--square", default=[], action="append", dest="square", + help="specify chessboard square size in meters [default: 0.108]") + parser.add_option("--upload", default=False, action="store_true", dest="upload", + help="Upload results to camera(s).") + parser.add_option("--fix-principal-point", action="store_true", default=False, + help="fix the principal point at the image center") + parser.add_option("--fix-aspect-ratio", action="store_true", default=False, + help="enforce focal lengths (fx, fy) are equal") + parser.add_option("--zero-tangent-dist", action="store_true", default=False, + help="set tangential distortion coefficients (p1, p2) to zero") + parser.add_option("-k", "--k-coefficients", type="int", default=2, metavar="NUM_COEFFS", + help="number of radial distortion coefficients to use (up to 6, default %default)") + parser.add_option("--visualize", action="store_true", default=False, + help="visualize rectified images after calibration") + parser.add_option("-a", "--alpha", type="float", default=1.0, metavar="ALPHA", + help="zoom for visualization of rectifies images. Ranges from 0 (zoomed in, all pixels in calibrated image are valid) to 1 (zoomed out, all pixels in original image are in calibrated image). default %default)") + + options, args = parser.parse_args() + + if len(options.size) != len(options.square): + parser.error("Number of size and square inputs must be the same!") + + if not options.square: + options.square.append("0.108") + options.size.append("8x6") + + boards = [] + for (sz, sq) in zip(options.size, options.square): + info = ChessboardInfo() + info.dim = float(sq) + size = tuple([int(c) for c in sz.split('x')]) + info.n_cols = size[0] + info.n_rows = size[1] + + boards.append(info) + + if not boards: + parser.error("Must supply at least one chessboard") + + if not args: + parser.error("Must give tarfile name") + if not os.path.exists(args[0]): + parser.error("Tarfile %s does not exist. Please select valid tarfile" % args[0]) + + tarname = args[0] + + num_ks = options.k_coefficients + + calib_flags = 0 + if options.fix_principal_point: + calib_flags |= cv2.CALIB_FIX_PRINCIPAL_POINT + if options.fix_aspect_ratio: + calib_flags |= cv2.CALIB_FIX_ASPECT_RATIO + if options.zero_tangent_dist: + calib_flags |= cv2.CALIB_ZERO_TANGENT_DIST + if (num_ks > 3): + calib_flags |= cv2.CALIB_RATIONAL_MODEL + if (num_ks < 6): + calib_flags |= cv2.CALIB_FIX_K6 + if (num_ks < 5): + calib_flags |= cv2.CALIB_FIX_K5 + if (num_ks < 4): + calib_flags |= cv2.CALIB_FIX_K4 + if (num_ks < 3): + calib_flags |= cv2.CALIB_FIX_K3 + if (num_ks < 2): + calib_flags |= cv2.CALIB_FIX_K2 + if (num_ks < 1): + calib_flags |= cv2.CALIB_FIX_K1 + + cal_from_tarfile(boards, tarname, options.mono, options.upload, calib_flags, options.visualize, options.alpha) diff --git a/future pkg/image_pipeline/camera_calibration/setup.py b/future pkg/image_pipeline/camera_calibration/setup.py new file mode 100644 index 0000000..fd51af2 --- /dev/null +++ b/future pkg/image_pipeline/camera_calibration/setup.py @@ -0,0 +1,9 @@ +#!/usr/bin/env python +from distutils.core import setup +from catkin_pkg.python_setup import generate_distutils_setup + +d = generate_distutils_setup() +d['packages'] = ['camera_calibration'] +d['package_dir'] = {'':'src'} + +setup(**d) diff --git a/future pkg/image_pipeline/camera_calibration/src/camera_calibration/__init__.py b/future pkg/image_pipeline/camera_calibration/src/camera_calibration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/future pkg/image_pipeline/camera_calibration/src/camera_calibration/calibrator.py b/future pkg/image_pipeline/camera_calibration/src/camera_calibration/calibrator.py new file mode 100644 index 0000000..4d68f9d --- /dev/null +++ b/future pkg/image_pipeline/camera_calibration/src/camera_calibration/calibrator.py @@ -0,0 +1,1150 @@ +#!/usr/bin/env python +# +# Software License Agreement (BSD License) +# +# Copyright (c) 2009, Willow Garage, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of the Willow Garage nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +try: + from cStringIO import StringIO +except ImportError: + from io import StringIO +from io import BytesIO +import cv2 +import cv_bridge +import image_geometry +import math +import numpy.linalg +import pickle +import random +import sensor_msgs.msg +import tarfile +import time +from distutils.version import LooseVersion + + +# Supported calibration patterns +class Patterns: + Chessboard, Circles, ACircles = list(range(3)) + +class CalibrationException(Exception): + pass + +# TODO: Make pattern per-board? +class ChessboardInfo(object): + def __init__(self, n_cols = 0, n_rows = 0, dim = 0.0): + self.n_cols = n_cols + self.n_rows = n_rows + self.dim = dim + +# Make all private!!!!! +def lmin(seq1, seq2): + """ Pairwise minimum of two sequences """ + return [min(a, b) for (a, b) in zip(seq1, seq2)] + +def lmax(seq1, seq2): + """ Pairwise maximum of two sequences """ + return [max(a, b) for (a, b) in zip(seq1, seq2)] + +def _pdist(p1, p2): + """ + Distance bwt two points. p1 = (x, y), p2 = (x, y) + """ + return math.sqrt(math.pow(p1[0] - p2[0], 2) + math.pow(p1[1] - p2[1], 2)) + +def _get_outside_corners(corners, board): + """ + Return the four corners of the board as a whole, as (up_left, up_right, down_right, down_left). + """ + xdim = board.n_cols + ydim = board.n_rows + + if corners.shape[1] * corners.shape[0] != xdim * ydim: + raise Exception("Invalid number of corners! %d corners. X: %d, Y: %d" % (corners.shape[1] * corners.shape[0], + xdim, ydim)) + + up_left = corners[0,0] + up_right = corners[xdim - 1,0] + down_right = corners[-1,0] + down_left = corners[-xdim,0] + + return (up_left, up_right, down_right, down_left) + +def _get_skew(corners, board): + """ + Get skew for given checkerboard detection. + Scaled to [0,1], which 0 = no skew, 1 = high skew + Skew is proportional to the divergence of three outside corners from 90 degrees. + """ + # TODO Using three nearby interior corners might be more robust, outside corners occasionally + # get mis-detected + up_left, up_right, down_right, _ = _get_outside_corners(corners, board) + + def angle(a, b, c): + """ + Return angle between lines ab, bc + """ + ab = a - b + cb = c - b + return math.acos(numpy.dot(ab,cb) / (numpy.linalg.norm(ab) * numpy.linalg.norm(cb))) + + skew = min(1.0, 2. * abs((math.pi / 2.) - angle(up_left, up_right, down_right))) + return skew + +def _get_area(corners, board): + """ + Get 2d image area of the detected checkerboard. + The projected checkerboard is assumed to be a convex quadrilateral, and the area computed as + |p X q|/2; see http://mathworld.wolfram.com/Quadrilateral.html. + """ + (up_left, up_right, down_right, down_left) = _get_outside_corners(corners, board) + a = up_right - up_left + b = down_right - up_right + c = down_left - down_right + p = b + c + q = a + b + return abs(p[0]*q[1] - p[1]*q[0]) / 2. + +def _get_corners(img, board, refine = True, checkerboard_flags=0): + """ + Get corners for a particular chessboard for an image + """ + h = img.shape[0] + w = img.shape[1] + if len(img.shape) == 3 and img.shape[2] == 3: + mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + else: + mono = img + (ok, corners) = cv2.findChessboardCorners(mono, (board.n_cols, board.n_rows), flags = cv2.CALIB_CB_ADAPTIVE_THRESH | + cv2.CALIB_CB_NORMALIZE_IMAGE | checkerboard_flags) + if not ok: + return (ok, corners) + + # If any corners are within BORDER pixels of the screen edge, reject the detection by setting ok to false + # NOTE: This may cause problems with very low-resolution cameras, where 8 pixels is a non-negligible fraction + # of the image size. See http://answers.ros.org/question/3155/how-can-i-calibrate-low-resolution-cameras + BORDER = 8 + if not all([(BORDER < corners[i, 0, 0] < (w - BORDER)) and (BORDER < corners[i, 0, 1] < (h - BORDER)) for i in range(corners.shape[0])]): + ok = False + + # Ensure that all corner-arrays are going from top to bottom. + if board.n_rows!=board.n_cols: + if corners[0, 0, 1] > corners[-1, 0, 1]: + corners = numpy.copy(numpy.flipud(corners)) + else: + direction_corners=(corners[-1]-corners[0])>=numpy.array([[0.0,0.0]]) + + if not numpy.all(direction_corners): + if not numpy.any(direction_corners): + corners = numpy.copy(numpy.flipud(corners)) + elif direction_corners[0][0]: + corners=numpy.rot90(corners.reshape(board.n_rows,board.n_cols,2)).reshape(board.n_cols*board.n_rows,1,2) + else: + corners=numpy.rot90(corners.reshape(board.n_rows,board.n_cols,2),3).reshape(board.n_cols*board.n_rows,1,2) + + if refine and ok: + # Use a radius of half the minimum distance between corners. This should be large enough to snap to the + # correct corner, but not so large as to include a wrong corner in the search window. + min_distance = float("inf") + for row in range(board.n_rows): + for col in range(board.n_cols - 1): + index = row*board.n_rows + col + min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + 1, 0])) + for row in range(board.n_rows - 1): + for col in range(board.n_cols): + index = row*board.n_rows + col + min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + board.n_cols, 0])) + radius = int(math.ceil(min_distance * 0.5)) + cv2.cornerSubPix(mono, corners, (radius,radius), (-1,-1), + ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1 )) + + return (ok, corners) + +def _get_circles(img, board, pattern): + """ + Get circle centers for a symmetric or asymmetric grid + """ + h = img.shape[0] + w = img.shape[1] + if len(img.shape) == 3 and img.shape[2] == 3: + mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + else: + mono = img + + flag = cv2.CALIB_CB_SYMMETRIC_GRID + if pattern == Patterns.ACircles: + flag = cv2.CALIB_CB_ASYMMETRIC_GRID + mono_arr = numpy.array(mono) + (ok, corners) = cv2.findCirclesGrid(mono_arr, (board.n_cols, board.n_rows), flags=flag) + + # In symmetric case, findCirclesGrid does not detect the target if it's turned sideways. So we try + # again with dimensions swapped - not so efficient. + # TODO Better to add as second board? Corner ordering will change. + if not ok and pattern == Patterns.Circles: + (ok, corners) = cv2.findCirclesGrid(mono_arr, (board.n_rows, board.n_cols), flags=flag) + + return (ok, corners) + + +# TODO self.size needs to come from CameraInfo, full resolution +class Calibrator(object): + """ + Base class for calibration system + """ + def __init__(self, boards, flags=0, pattern=Patterns.Chessboard, name='', checkerboard_flags=cv2.CALIB_CB_FAST_CHECK): + # Ordering the dimensions for the different detectors is actually a minefield... + if pattern == Patterns.Chessboard: + # Make sure n_cols > n_rows to agree with OpenCV CB detector output + self._boards = [ChessboardInfo(max(i.n_cols, i.n_rows), min(i.n_cols, i.n_rows), i.dim) for i in boards] + elif pattern == Patterns.ACircles: + # 7x4 and 4x7 are actually different patterns. Assume square-ish pattern, so n_rows > n_cols. + self._boards = [ChessboardInfo(min(i.n_cols, i.n_rows), max(i.n_cols, i.n_rows), i.dim) for i in boards] + elif pattern == Patterns.Circles: + # We end up having to check both ways anyway + self._boards = boards + + # Set to true after we perform calibration + self.calibrated = False + self.calib_flags = flags + self.checkerboard_flags = checkerboard_flags + self.pattern = pattern + self.br = cv_bridge.CvBridge() + + # self.db is list of (parameters, image) samples for use in calibration. parameters has form + # (X, Y, size, skew) all normalized to [0,1], to keep track of what sort of samples we've taken + # and ensure enough variety. + self.db = [] + # For each db sample, we also record the detected corners. + self.good_corners = [] + # Set to true when we have sufficiently varied samples to calibrate + self.goodenough = False + self.param_ranges = [0.7, 0.7, 0.4, 0.5] + self.name = name + + def mkgray(self, msg): + """ + Convert a message into a 8-bit 1 channel monochrome OpenCV image + """ + # as cv_bridge automatically scales, we need to remove that behavior + # TODO: get a Python API in cv_bridge to check for the image depth. + if self.br.encoding_to_dtype_with_channels(msg.encoding)[0] in ['uint16', 'int16']: + mono16 = self.br.imgmsg_to_cv2(msg, '16UC1') + mono8 = numpy.array(numpy.clip(mono16, 0, 255), dtype=numpy.uint8) + return mono8 + elif 'FC1' in msg.encoding: + # floating point image handling + img = self.br.imgmsg_to_cv2(msg, "passthrough") + _, max_val, _, _ = cv2.minMaxLoc(img) + if max_val > 0: + scale = 255.0 / max_val + mono_img = (img * scale).astype(numpy.uint8) + else: + mono_img = img.astype(numpy.uint8) + return mono_img + else: + return self.br.imgmsg_to_cv2(msg, "mono8") + + def get_parameters(self, corners, board, size): + """ + Return list of parameters [X, Y, size, skew] describing the checkerboard view. + """ + (width, height) = size + Xs = corners[:,:,0] + Ys = corners[:,:,1] + area = _get_area(corners, board) + border = math.sqrt(area) + # For X and Y, we "shrink" the image all around by approx. half the board size. + # Otherwise large boards are penalized because you can't get much X/Y variation. + p_x = min(1.0, max(0.0, (numpy.mean(Xs) - border / 2) / (width - border))) + p_y = min(1.0, max(0.0, (numpy.mean(Ys) - border / 2) / (height - border))) + p_size = math.sqrt(area / (width * height)) + skew = _get_skew(corners, board) + params = [p_x, p_y, p_size, skew] + return params + + def is_good_sample(self, params): + """ + Returns true if the checkerboard detection described by params should be added to the database. + """ + if not self.db: + return True + + def param_distance(p1, p2): + return sum([abs(a-b) for (a,b) in zip(p1, p2)]) + + db_params = [sample[0] for sample in self.db] + d = min([param_distance(params, p) for p in db_params]) + #print "d = %.3f" % d #DEBUG + # TODO What's a good threshold here? Should it be configurable? + return d > 0.2 + + _param_names = ["X", "Y", "Size", "Skew"] + + def compute_goodenough(self): + if not self.db: + return None + + # Find range of checkerboard poses covered by samples in database + all_params = [sample[0] for sample in self.db] + min_params = all_params[0] + max_params = all_params[0] + for params in all_params[1:]: + min_params = lmin(min_params, params) + max_params = lmax(max_params, params) + # Don't reward small size or skew + min_params = [min_params[0], min_params[1], 0., 0.] + + # For each parameter, judge how much progress has been made toward adequate variation + progress = [min((hi - lo) / r, 1.0) for (lo, hi, r) in zip(min_params, max_params, self.param_ranges)] + # If we have lots of samples, allow calibration even if not all parameters are green + # TODO Awkward that we update self.goodenough instead of returning it + self.goodenough = (len(self.db) >= 40) or all([p == 1.0 for p in progress]) + + return list(zip(self._param_names, min_params, max_params, progress)) + + def mk_object_points(self, boards, use_board_size = False): + opts = [] + for i, b in enumerate(boards): + num_pts = b.n_cols * b.n_rows + opts_loc = numpy.zeros((num_pts, 1, 3), numpy.float32) + for j in range(num_pts): + opts_loc[j, 0, 0] = (j / b.n_cols) + if self.pattern == Patterns.ACircles: + opts_loc[j, 0, 1] = 2*(j % b.n_cols) + (opts_loc[j, 0, 0] % 2) + else: + opts_loc[j, 0, 1] = (j % b.n_cols) + opts_loc[j, 0, 2] = 0 + if use_board_size: + opts_loc[j, 0, :] = opts_loc[j, 0, :] * b.dim + opts.append(opts_loc) + return opts + + def get_corners(self, img, refine = True): + """ + Use cvFindChessboardCorners to find corners of chessboard in image. + + Check all boards. Return corners for first chessboard that it detects + if given multiple size chessboards. + + Returns (ok, corners, board) + """ + + for b in self._boards: + if self.pattern == Patterns.Chessboard: + (ok, corners) = _get_corners(img, b, refine, self.checkerboard_flags) + else: + (ok, corners) = _get_circles(img, b, self.pattern) + if ok: + return (ok, corners, b) + return (False, None, None) + + def downsample_and_detect(self, img): + """ + Downsample the input image to approximately VGA resolution and detect the + calibration target corners in the full-size image. + + Combines these apparently orthogonal duties as an optimization. Checkerboard + detection is too expensive on large images, so it's better to do detection on + the smaller display image and scale the corners back up to the correct size. + + Returns (scrib, corners, downsampled_corners, board, (x_scale, y_scale)). + """ + # Scale the input image down to ~VGA size + height = img.shape[0] + width = img.shape[1] + scale = math.sqrt( (width*height) / (640.*480.) ) + if scale > 1.0: + scrib = cv2.resize(img, (int(width / scale), int(height / scale))) + else: + scrib = img + # Due to rounding, actual horizontal/vertical scaling may differ slightly + x_scale = float(width) / scrib.shape[1] + y_scale = float(height) / scrib.shape[0] + + if self.pattern == Patterns.Chessboard: + # Detect checkerboard + (ok, downsampled_corners, board) = self.get_corners(scrib, refine = True) + + # Scale corners back to full size image + corners = None + if ok: + if scale > 1.0: + # Refine up-scaled corners in the original full-res image + # TODO Does this really make a difference in practice? + corners_unrefined = downsampled_corners.copy() + corners_unrefined[:, :, 0] *= x_scale + corners_unrefined[:, :, 1] *= y_scale + radius = int(math.ceil(scale)) + if len(img.shape) == 3 and img.shape[2] == 3: + mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + else: + mono = img + cv2.cornerSubPix(mono, corners_unrefined, (radius,radius), (-1,-1), + ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1 )) + corners = corners_unrefined + else: + corners = downsampled_corners + else: + # Circle grid detection is fast even on large images + (ok, corners, board) = self.get_corners(img) + # Scale corners to downsampled image for display + downsampled_corners = None + if ok: + if scale > 1.0: + downsampled_corners = corners.copy() + downsampled_corners[:,:,0] /= x_scale + downsampled_corners[:,:,1] /= y_scale + else: + downsampled_corners = corners + + return (scrib, corners, downsampled_corners, board, (x_scale, y_scale)) + + + def lrmsg(self, d, k, r, p): + """ Used by :meth:`as_message`. Return a CameraInfo message for the given calibration matrices """ + msg = sensor_msgs.msg.CameraInfo() + (msg.width, msg.height) = self.size + if d.size > 5: + msg.distortion_model = "rational_polynomial" + else: + msg.distortion_model = "plumb_bob" + msg.D = numpy.ravel(d).copy().tolist() + msg.K = numpy.ravel(k).copy().tolist() + msg.R = numpy.ravel(r).copy().tolist() + msg.P = numpy.ravel(p).copy().tolist() + return msg + + def lrreport(self, d, k, r, p): + print("D = ", numpy.ravel(d).tolist()) + print("K = ", numpy.ravel(k).tolist()) + print("R = ", numpy.ravel(r).tolist()) + print("P = ", numpy.ravel(p).tolist()) + + def lrost(self, name, d, k, r, p): + calmessage = ( + "# oST version 5.0 parameters\n" + + "\n" + + "\n" + + "[image]\n" + + "\n" + + "width\n" + + str(self.size[0]) + "\n" + + "\n" + + "height\n" + + str(self.size[1]) + "\n" + + "\n" + + "[%s]" % name + "\n" + + "\n" + + "camera matrix\n" + + " ".join(["%8f" % k[0,i] for i in range(3)]) + "\n" + + " ".join(["%8f" % k[1,i] for i in range(3)]) + "\n" + + " ".join(["%8f" % k[2,i] for i in range(3)]) + "\n" + + "\n" + + "distortion\n" + + " ".join(["%8f" % d[i,0] for i in range(d.shape[0])]) + "\n" + + "\n" + + "rectification\n" + + " ".join(["%8f" % r[0,i] for i in range(3)]) + "\n" + + " ".join(["%8f" % r[1,i] for i in range(3)]) + "\n" + + " ".join(["%8f" % r[2,i] for i in range(3)]) + "\n" + + "\n" + + "projection\n" + + " ".join(["%8f" % p[0,i] for i in range(4)]) + "\n" + + " ".join(["%8f" % p[1,i] for i in range(4)]) + "\n" + + " ".join(["%8f" % p[2,i] for i in range(4)]) + "\n" + + "\n") + assert len(calmessage) < 525, "Calibration info must be less than 525 bytes" + return calmessage + + def lryaml(self, name, d, k, r, p): + calmessage = ("" + + "image_width: " + str(self.size[0]) + "\n" + + "image_height: " + str(self.size[1]) + "\n" + + "camera_name: " + name + "\n" + + "camera_matrix:\n" + + " rows: 3\n" + + " cols: 3\n" + + " data: [" + ", ".join(["%8f" % i for i in k.reshape(1,9)[0]]) + "]\n" + + "distortion_model: " + ("rational_polynomial" if d.size > 5 else "plumb_bob") + "\n" + + "distortion_coefficients:\n" + + " rows: 1\n" + + " cols: 5\n" + + " data: [" + ", ".join(["%8f" % d[i,0] for i in range(d.shape[0])]) + "]\n" + + "rectification_matrix:\n" + + " rows: 3\n" + + " cols: 3\n" + + " data: [" + ", ".join(["%8f" % i for i in r.reshape(1,9)[0]]) + "]\n" + + "projection_matrix:\n" + + " rows: 3\n" + + " cols: 4\n" + + " data: [" + ", ".join(["%8f" % i for i in p.reshape(1,12)[0]]) + "]\n" + + "") + return calmessage + + def do_save(self): + filename = '/tmp/calibrationdata.tar.gz' + tf = tarfile.open(filename, 'w:gz') + self.do_tarfile_save(tf) # Must be overridden in subclasses + tf.close() + print(("Wrote calibration data to", filename)) + +def image_from_archive(archive, name): + """ + Load image PGM file from tar archive. + + Used for tarfile loading and unit test. + """ + member = archive.getmember(name) + imagefiledata = numpy.fromstring(archive.extractfile(member).read(), numpy.uint8) + imagefiledata.resize((1, imagefiledata.size)) + return cv2.imdecode(imagefiledata, cv2.IMREAD_COLOR) + +class ImageDrawable(object): + """ + Passed to CalibrationNode after image handled. Allows plotting of images + with detected corner points + """ + def __init__(self): + self.params = None + +class MonoDrawable(ImageDrawable): + def __init__(self): + ImageDrawable.__init__(self) + self.scrib = None + self.linear_error = -1.0 + + +class StereoDrawable(ImageDrawable): + def __init__(self): + ImageDrawable.__init__(self) + self.lscrib = None + self.rscrib = None + self.epierror = -1 + self.dim = -1 + + +class MonoCalibrator(Calibrator): + """ + Calibration class for monocular cameras:: + + images = [cv2.imread("mono%d.png") for i in range(8)] + mc = MonoCalibrator() + mc.cal(images) + print mc.as_message() + """ + + is_mono = True # TODO Could get rid of is_mono + + def __init__(self, *args, **kwargs): + if 'name' not in kwargs: + kwargs['name'] = 'narrow_stereo/left' + super(MonoCalibrator, self).__init__(*args, **kwargs) + + def cal(self, images): + """ + Calibrate camera from given images + """ + goodcorners = self.collect_corners(images) + self.cal_fromcorners(goodcorners) + self.calibrated = True + + def collect_corners(self, images): + """ + :param images: source images containing chessboards + :type images: list of :class:`cvMat` + + Find chessboards in all images. + + Return [ (corners, ChessboardInfo) ] + """ + self.size = (images[0].shape[1], images[0].shape[0]) + corners = [self.get_corners(i) for i in images] + + goodcorners = [(co, b) for (ok, co, b) in corners if ok] + if not goodcorners: + raise CalibrationException("No corners found in images!") + return goodcorners + + def cal_fromcorners(self, good): + """ + :param good: Good corner positions and boards + :type good: [(corners, ChessboardInfo)] + + + """ + boards = [ b for (_, b) in good ] + + ipts = [ points for (points, _) in good ] + opts = self.mk_object_points(boards) + + self.intrinsics = numpy.zeros((3, 3), numpy.float64) + if self.calib_flags & cv2.CALIB_RATIONAL_MODEL: + self.distortion = numpy.zeros((8, 1), numpy.float64) # rational polynomial + else: + self.distortion = numpy.zeros((5, 1), numpy.float64) # plumb bob + # If FIX_ASPECT_RATIO flag set, enforce focal lengths have 1/1 ratio + self.intrinsics[0,0] = 1.0 + self.intrinsics[1,1] = 1.0 + cv2.calibrateCamera( + opts, ipts, + self.size, self.intrinsics, + self.distortion, + flags = self.calib_flags) + + # R is identity matrix for monocular calibration + self.R = numpy.eye(3, dtype=numpy.float64) + self.P = numpy.zeros((3, 4), dtype=numpy.float64) + + self.set_alpha(0.0) + + def set_alpha(self, a): + """ + Set the alpha value for the calibrated camera solution. The alpha + value is a zoom, and ranges from 0 (zoomed in, all pixels in + calibrated image are valid) to 1 (zoomed out, all pixels in + original image are in calibrated image). + """ + + # NOTE: Prior to Electric, this code was broken such that we never actually saved the new + # camera matrix. In effect, this enforced P = [K|0] for monocular cameras. + # TODO: Verify that OpenCV #1199 gets applied (improved GetOptimalNewCameraMatrix) + ncm, _ = cv2.getOptimalNewCameraMatrix(self.intrinsics, self.distortion, self.size, a) + for j in range(3): + for i in range(3): + self.P[j,i] = ncm[j, i] + self.mapx, self.mapy = cv2.initUndistortRectifyMap(self.intrinsics, self.distortion, self.R, ncm, self.size, cv2.CV_32FC1) + + def remap(self, src): + """ + :param src: source image + :type src: :class:`cvMat` + + Apply the post-calibration undistortion to the source image + """ + return cv2.remap(src, self.mapx, self.mapy, cv2.INTER_LINEAR) + + def undistort_points(self, src): + """ + :param src: N source pixel points (u,v) as an Nx2 matrix + :type src: :class:`cvMat` + + Apply the post-calibration undistortion to the source points + """ + + return cv2.undistortPoints(src, self.intrinsics, self.distortion, R = self.R, P = self.P) + + def as_message(self): + """ Return the camera calibration as a CameraInfo message """ + return self.lrmsg(self.distortion, self.intrinsics, self.R, self.P) + + def from_message(self, msg, alpha = 0.0): + """ Initialize the camera calibration from a CameraInfo message """ + + self.size = (msg.width, msg.height) + self.intrinsics = numpy.array(msg.K, dtype=numpy.float64, copy=True).reshape((3, 3)) + self.distortion = numpy.array(msg.D, dtype=numpy.float64, copy=True).reshape((len(msg.D), 1)) + self.R = numpy.array(msg.R, dtype=numpy.float64, copy=True).reshape((3, 3)) + self.P = numpy.array(msg.P, dtype=numpy.float64, copy=True).reshape((3, 4)) + + self.set_alpha(0.0) + + def report(self): + self.lrreport(self.distortion, self.intrinsics, self.R, self.P) + + def ost(self): + return self.lrost(self.name, self.distortion, self.intrinsics, self.R, self.P) + + def yaml(self): + return self.lryaml(self.name, self.distortion, self.intrinsics, self.R, self.P) + + def linear_error_from_image(self, image): + """ + Detect the checkerboard and compute the linear error. + Mainly for use in tests. + """ + _, corners, _, board, _ = self.downsample_and_detect(image) + if corners is None: + return None + + undistorted = self.undistort_points(corners) + return self.linear_error(undistorted, board) + + @staticmethod + def linear_error(corners, b): + + """ + Returns the linear error for a set of corners detected in the unrectified image. + """ + + if corners is None: + return None + + def pt2line(x0, y0, x1, y1, x2, y2): + """ point is (x0, y0), line is (x1, y1, x2, y2) """ + return abs((x2 - x1) * (y1 - y0) - (x1 - x0) * (y2 - y1)) / math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2) + + cc = b.n_cols + cr = b.n_rows + errors = [] + for r in range(cr): + (x1, y1) = corners[(cc * r) + 0, 0] + (x2, y2) = corners[(cc * r) + cc - 1, 0] + for i in range(1, cc - 1): + (x0, y0) = corners[(cc * r) + i, 0] + errors.append(pt2line(x0, y0, x1, y1, x2, y2)) + if errors: + return math.sqrt(sum([e**2 for e in errors]) / len(errors)) + else: + return None + + + def handle_msg(self, msg): + """ + Detects the calibration target and, if found and provides enough new information, + adds it to the sample database. + + Returns a MonoDrawable message with the display image and progress info. + """ + gray = self.mkgray(msg) + linear_error = -1 + + # Get display-image-to-be (scrib) and detection of the calibration target + scrib_mono, corners, downsampled_corners, board, (x_scale, y_scale) = self.downsample_and_detect(gray) + + if self.calibrated: + # Show rectified image + # TODO Pull out downsampling code into function + gray_remap = self.remap(gray) + gray_rect = gray_remap + if x_scale != 1.0 or y_scale != 1.0: + gray_rect = cv2.resize(gray_remap, (scrib_mono.shape[1], scrib_mono.shape[0])) + + scrib = cv2.cvtColor(gray_rect, cv2.COLOR_GRAY2BGR) + + if corners is not None: + # Report linear error + undistorted = self.undistort_points(corners) + linear_error = self.linear_error(undistorted, board) + + # Draw rectified corners + scrib_src = undistorted.copy() + scrib_src[:,:,0] /= x_scale + scrib_src[:,:,1] /= y_scale + cv2.drawChessboardCorners(scrib, (board.n_cols, board.n_rows), scrib_src, True) + + else: + scrib = cv2.cvtColor(scrib_mono, cv2.COLOR_GRAY2BGR) + if corners is not None: + # Draw (potentially downsampled) corners onto display image + cv2.drawChessboardCorners(scrib, (board.n_cols, board.n_rows), downsampled_corners, True) + + # Add sample to database only if it's sufficiently different from any previous sample. + params = self.get_parameters(corners, board, (gray.shape[1], gray.shape[0])) + if self.is_good_sample(params): + self.db.append((params, gray)) + self.good_corners.append((corners, board)) + print(("*** Added sample %d, p_x = %.3f, p_y = %.3f, p_size = %.3f, skew = %.3f" % tuple([len(self.db)] + params))) + + rv = MonoDrawable() + rv.scrib = scrib + rv.params = self.compute_goodenough() + rv.linear_error = linear_error + return rv + + def do_calibration(self, dump = False): + if not self.good_corners: + print("**** Collecting corners for all images! ****") #DEBUG + images = [i for (p, i) in self.db] + self.good_corners = self.collect_corners(images) + # Dump should only occur if user wants it + if dump: + pickle.dump((self.is_mono, self.size, self.good_corners), + open("/tmp/camera_calibration_%08x.pickle" % random.getrandbits(32), "w")) + self.size = (self.db[0][1].shape[1], self.db[0][1].shape[0]) # TODO Needs to be set externally + self.cal_fromcorners(self.good_corners) + self.calibrated = True + # DEBUG + print((self.report())) + print((self.ost())) + + def do_tarfile_save(self, tf): + """ Write images and calibration solution to a tarfile object """ + + def taradd(name, buf): + if isinstance(buf, basestring): + s = StringIO(buf) + else: + s = BytesIO(buf) + ti = tarfile.TarInfo(name) + ti.size = len(s.getvalue()) + ti.uname = 'calibrator' + ti.mtime = int(time.time()) + tf.addfile(tarinfo=ti, fileobj=s) + + ims = [("left-%04d.png" % i, im) for i,(_, im) in enumerate(self.db)] + for (name, im) in ims: + taradd(name, cv2.imencode(".png", im)[1].tostring()) + taradd('ost.yaml', self.yaml()) + taradd('ost.txt', self.ost()) + + def do_tarfile_calibration(self, filename): + archive = tarfile.open(filename, 'r') + + limages = [ image_from_archive(archive, f) for f in archive.getnames() if (f.startswith('left') and (f.endswith('.pgm') or f.endswith('png'))) ] + + self.cal(limages) + +# TODO Replicate MonoCalibrator improvements in stereo +class StereoCalibrator(Calibrator): + """ + Calibration class for stereo cameras:: + + limages = [cv2.imread("left%d.png") for i in range(8)] + rimages = [cv2.imread("right%d.png") for i in range(8)] + sc = StereoCalibrator() + sc.cal(limages, rimages) + print sc.as_message() + """ + + is_mono = False + + def __init__(self, *args, **kwargs): + if 'name' not in kwargs: + kwargs['name'] = 'narrow_stereo' + super(StereoCalibrator, self).__init__(*args, **kwargs) + self.l = MonoCalibrator(*args, **kwargs) + self.r = MonoCalibrator(*args, **kwargs) + # Collecting from two cameras in a horizontal stereo rig, can't get + # full X range in the left camera. + self.param_ranges[0] = 0.4 + + def cal(self, limages, rimages): + """ + :param limages: source left images containing chessboards + :type limages: list of :class:`cvMat` + :param rimages: source right images containing chessboards + :type rimages: list of :class:`cvMat` + + Find chessboards in images, and runs the OpenCV calibration solver. + """ + goodcorners = self.collect_corners(limages, rimages) + self.size = (limages[0].shape[1], limages[0].shape[0]) + self.l.size = self.size + self.r.size = self.size + self.cal_fromcorners(goodcorners) + self.calibrated = True + + def collect_corners(self, limages, rimages): + """ + For a sequence of left and right images, find pairs of images where both + left and right have a chessboard, and return their corners as a list of pairs. + """ + # Pick out (corners, board) tuples + lcorners = [ self.downsample_and_detect(i)[1:4:2] for i in limages] + rcorners = [ self.downsample_and_detect(i)[1:4:2] for i in rimages] + good = [(lco, rco, b) for ((lco, b), (rco, br)) in zip( lcorners, rcorners) + if (lco is not None and rco is not None)] + + if len(good) == 0: + raise CalibrationException("No corners found in images!") + return good + + def cal_fromcorners(self, good): + # Perform monocular calibrations + lcorners = [(l, b) for (l, r, b) in good] + rcorners = [(r, b) for (l, r, b) in good] + self.l.cal_fromcorners(lcorners) + self.r.cal_fromcorners(rcorners) + + lipts = [ l for (l, _, _) in good ] + ripts = [ r for (_, r, _) in good ] + boards = [ b for (_, _, b) in good ] + + opts = self.mk_object_points(boards, True) + + flags = cv2.CALIB_FIX_INTRINSIC + + self.T = numpy.zeros((3, 1), dtype=numpy.float64) + self.R = numpy.eye(3, dtype=numpy.float64) + if LooseVersion(cv2.__version__).version[0] == 2: + cv2.stereoCalibrate(opts, lipts, ripts, self.size, + self.l.intrinsics, self.l.distortion, + self.r.intrinsics, self.r.distortion, + self.R, # R + self.T, # T + criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5), + flags = flags) + else: + cv2.stereoCalibrate(opts, lipts, ripts, + self.l.intrinsics, self.l.distortion, + self.r.intrinsics, self.r.distortion, + self.size, + self.R, # R + self.T, # T + criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5), + flags = flags) + + self.set_alpha(0.0) + + def set_alpha(self, a): + """ + Set the alpha value for the calibrated camera solution. The + alpha value is a zoom, and ranges from 0 (zoomed in, all pixels + in calibrated image are valid) to 1 (zoomed out, all pixels in + original image are in calibrated image). + """ + + cv2.stereoRectify(self.l.intrinsics, + self.l.distortion, + self.r.intrinsics, + self.r.distortion, + self.size, + self.R, + self.T, + self.l.R, self.r.R, self.l.P, self.r.P, + alpha = a) + + cv2.initUndistortRectifyMap(self.l.intrinsics, self.l.distortion, self.l.R, self.l.P, self.size, cv2.CV_32FC1, + self.l.mapx, self.l.mapy) + cv2.initUndistortRectifyMap(self.r.intrinsics, self.r.distortion, self.r.R, self.r.P, self.size, cv2.CV_32FC1, + self.r.mapx, self.r.mapy) + + def as_message(self): + """ + Return the camera calibration as a pair of CameraInfo messages, for left + and right cameras respectively. + """ + + return (self.lrmsg(self.l.distortion, self.l.intrinsics, self.l.R, self.l.P), + self.lrmsg(self.r.distortion, self.r.intrinsics, self.r.R, self.r.P)) + + def from_message(self, msgs, alpha = 0.0): + """ Initialize the camera calibration from a pair of CameraInfo messages. """ + self.size = (msgs[0].width, msgs[0].height) + + self.T = numpy.zeros((3, 1), dtype=numpy.float64) + self.R = numpy.eye(3, dtype=numpy.float64) + + self.l.from_message(msgs[0]) + self.r.from_message(msgs[1]) + # Need to compute self.T and self.R here, using the monocular parameters above + if False: + self.set_alpha(0.0) + + def report(self): + print("\nLeft:") + self.lrreport(self.l.distortion, self.l.intrinsics, self.l.R, self.l.P) + print("\nRight:") + self.lrreport(self.r.distortion, self.r.intrinsics, self.r.R, self.r.P) + print("self.T ", numpy.ravel(self.T).tolist()) + print("self.R ", numpy.ravel(self.R).tolist()) + + def ost(self): + return (self.lrost(self.name + "/left", self.l.distortion, self.l.intrinsics, self.l.R, self.l.P) + + self.lrost(self.name + "/right", self.r.distortion, self.r.intrinsics, self.r.R, self.r.P)) + + def yaml(self, suffix, info): + return self.lryaml(self.name + suffix, info.distortion, info.intrinsics, info.R, info.P) + + # TODO Get rid of "from_images" versions of these, instead have function to get undistorted corners + def epipolar_error_from_images(self, limage, rimage): + """ + Detect the checkerboard in both images and compute the epipolar error. + Mainly for use in tests. + """ + lcorners = self.downsample_and_detect(limage)[1] + rcorners = self.downsample_and_detect(rimage)[1] + if lcorners is None or rcorners is None: + return None + + lundistorted = self.l.undistort_points(lcorners) + rundistorted = self.r.undistort_points(rcorners) + + return self.epipolar_error(lundistorted, rundistorted) + + def epipolar_error(self, lcorners, rcorners): + """ + Compute the epipolar error from two sets of matching undistorted points + """ + d = lcorners[:,:,1] - rcorners[:,:,1] + return numpy.sqrt(numpy.square(d).sum() / d.size) + + def chessboard_size_from_images(self, limage, rimage): + _, lcorners, _, board, _ = self.downsample_and_detect(limage) + _, rcorners, _, board, _ = self.downsample_and_detect(rimage) + if lcorners is None or rcorners is None: + return None + + lundistorted = self.l.undistort_points(lcorners) + rundistorted = self.r.undistort_points(rcorners) + + return self.chessboard_size(lundistorted, rundistorted, board) + + def chessboard_size(self, lcorners, rcorners, board, msg = None): + """ + Compute the square edge length from two sets of matching undistorted points + given the current calibration. + :param msg: a tuple of (left_msg, right_msg) + """ + # Project the points to 3d + cam = image_geometry.StereoCameraModel() + if msg == None: + msg = self.as_message() + cam.fromCameraInfo(*msg) + disparities = lcorners[:,:,0] - rcorners[:,:,0] + pt3d = [cam.projectPixelTo3d((lcorners[i,0,0], lcorners[i,0,1]), disparities[i,0]) for i in range(lcorners.shape[0]) ] + def l2(p0, p1): + return math.sqrt(sum([(c0 - c1) ** 2 for (c0, c1) in zip(p0, p1)])) + + # Compute the length from each horizontal and vertical line, and return the mean + cc = board.n_cols + cr = board.n_rows + lengths = ( + [l2(pt3d[cc * r + 0], pt3d[cc * r + (cc - 1)]) / (cc - 1) for r in range(cr)] + + [l2(pt3d[c + 0], pt3d[c + (cc * (cr - 1))]) / (cr - 1) for c in range(cc)]) + return sum(lengths) / len(lengths) + + def handle_msg(self, msg): + # TODO Various asserts that images have same dimension, same board detected... + (lmsg, rmsg) = msg + lgray = self.mkgray(lmsg) + rgray = self.mkgray(rmsg) + epierror = -1 + + # Get display-images-to-be and detections of the calibration target + lscrib_mono, lcorners, ldownsampled_corners, lboard, (x_scale, y_scale) = self.downsample_and_detect(lgray) + rscrib_mono, rcorners, rdownsampled_corners, rboard, _ = self.downsample_and_detect(rgray) + + if self.calibrated: + # Show rectified images + lremap = self.l.remap(lgray) + rremap = self.r.remap(rgray) + lrect = lremap + rrect = rremap + if x_scale != 1.0 or y_scale != 1.0: + lrect = cv2.resize(lremap, (lscrib_mono.shape[1], lscrib_mono.shape[0])) + rrect = cv2.resize(rremap, (rscrib_mono.shape[1], rscrib_mono.shape[0])) + + lscrib = cv2.cvtColor(lrect, cv2.COLOR_GRAY2BGR) + rscrib = cv2.cvtColor(rrect, cv2.COLOR_GRAY2BGR) + + # Draw rectified corners + if lcorners is not None: + lundistorted = self.l.undistort_points(lcorners) + scrib_src = lundistorted.copy() + scrib_src[:,:,0] /= x_scale + scrib_src[:,:,1] /= y_scale + cv2.drawChessboardCorners(lscrib, (lboard.n_cols, lboard.n_rows), scrib_src, True) + + if rcorners is not None: + rundistorted = self.r.undistort_points(rcorners) + scrib_src = rundistorted.copy() + scrib_src[:,:,0] /= x_scale + scrib_src[:,:,1] /= y_scale + cv2.drawChessboardCorners(rscrib, (rboard.n_cols, rboard.n_rows), scrib_src, True) + + # Report epipolar error + if lcorners is not None and rcorners is not None and len(lcorners) == len(rcorners): + epierror = self.epipolar_error(lundistorted, rundistorted) + + else: + lscrib = cv2.cvtColor(lscrib_mono, cv2.COLOR_GRAY2BGR) + rscrib = cv2.cvtColor(rscrib_mono, cv2.COLOR_GRAY2BGR) + # Draw any detected chessboards onto display (downsampled) images + if lcorners is not None: + cv2.drawChessboardCorners(lscrib, (lboard.n_cols, lboard.n_rows), + ldownsampled_corners, True) + if rcorners is not None: + cv2.drawChessboardCorners(rscrib, (rboard.n_cols, rboard.n_rows), + rdownsampled_corners, True) + + # Add sample to database only if it's sufficiently different from any previous sample + if lcorners is not None and rcorners is not None and len(lcorners) == len(rcorners): + params = self.get_parameters(lcorners, lboard, (lgray.shape[1], lgray.shape[0])) + if self.is_good_sample(params): + self.db.append( (params, lgray, rgray) ) + self.good_corners.append( (lcorners, rcorners, lboard) ) + print(("*** Added sample %d, p_x = %.3f, p_y = %.3f, p_size = %.3f, skew = %.3f" % tuple([len(self.db)] + params))) + + rv = StereoDrawable() + rv.lscrib = lscrib + rv.rscrib = rscrib + rv.params = self.compute_goodenough() + rv.epierror = epierror + return rv + + def do_calibration(self, dump = False): + # TODO MonoCalibrator collects corners if needed here + # Dump should only occur if user wants it + if dump: + pickle.dump((self.is_mono, self.size, self.good_corners), + open("/tmp/camera_calibration_%08x.pickle" % random.getrandbits(32), "w")) + self.size = (self.db[0][1].shape[1], self.db[0][1].shape[0]) # TODO Needs to be set externally + self.l.size = self.size + self.r.size = self.size + self.cal_fromcorners(self.good_corners) + self.calibrated = True + # DEBUG + print((self.report())) + print((self.ost())) + + def do_tarfile_save(self, tf): + """ Write images and calibration solution to a tarfile object """ + ims = ([("left-%04d.png" % i, im) for i,(_, im, _) in enumerate(self.db)] + + [("right-%04d.png" % i, im) for i,(_, _, im) in enumerate(self.db)]) + + def taradd(name, buf): + if isinstance(buf, basestring): + s = StringIO(buf) + else: + s = BytesIO(buf) + ti = tarfile.TarInfo(name) + ti.size = len(s.getvalue()) + ti.uname = 'calibrator' + ti.mtime = int(time.time()) + tf.addfile(tarinfo=ti, fileobj=s) + + for (name, im) in ims: + taradd(name, cv2.imencode(".png", im)[1].tostring()) + taradd('left.yaml', self.yaml("/left", self.l)) + taradd('right.yaml', self.yaml("/right", self.r)) + taradd('ost.txt', self.ost()) + + def do_tarfile_calibration(self, filename): + archive = tarfile.open(filename, 'r') + limages = [ image_from_archive(archive, f) for f in archive.getnames() if (f.startswith('left') and (f.endswith('pgm') or f.endswith('png'))) ] + rimages = [ image_from_archive(archive, f) for f in archive.getnames() if (f.startswith('right') and (f.endswith('pgm') or f.endswith('png'))) ] + + if not len(limages) == len(rimages): + raise CalibrationException("Left, right images don't match. %d left images, %d right" % (len(limages), len(rimages))) + + ##\todo Check that the filenames match and stuff + + self.cal(limages, rimages) diff --git a/future pkg/image_pipeline/camera_calibration/src/camera_calibration/camera_calibrator.py b/future pkg/image_pipeline/camera_calibration/src/camera_calibration/camera_calibrator.py new file mode 100755 index 0000000..bb9af7c --- /dev/null +++ b/future pkg/image_pipeline/camera_calibration/src/camera_calibration/camera_calibrator.py @@ -0,0 +1,352 @@ +#!/usr/bin/python +# +# Software License Agreement (BSD License) +# +# Copyright (c) 2009, Willow Garage, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of the Willow Garage nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import cv2 +import message_filters +import numpy +import os +import rospy +import sensor_msgs.msg +import sensor_msgs.srv +import threading +import time +from camera_calibration.calibrator import MonoCalibrator, StereoCalibrator, ChessboardInfo, Patterns +from collections import deque +from message_filters import ApproximateTimeSynchronizer +from std_msgs.msg import String +from std_srvs.srv import Empty + + +class DisplayThread(threading.Thread): + """ + Thread that displays the current images + It is its own thread so that all display can be done + in one thread to overcome imshow limitations and + https://github.com/ros-perception/image_pipeline/issues/85 + """ + def __init__(self, queue, opencv_calibration_node): + threading.Thread.__init__(self) + self.queue = queue + self.opencv_calibration_node = opencv_calibration_node + + def run(self): + cv2.namedWindow("display", cv2.WINDOW_NORMAL) + cv2.setMouseCallback("display", self.opencv_calibration_node.on_mouse) + cv2.createTrackbar("scale", "display", 0, 100, self.opencv_calibration_node.on_scale) + while True: + # wait for an image (could happen at the very beginning when the queue is still empty) + while len(self.queue) == 0: + time.sleep(0.1) + im = self.queue[0] + cv2.imshow("display", im) + k = cv2.waitKey(6) & 0xFF + if k in [27, ord('q')]: + rospy.signal_shutdown('Quit') + elif k == ord('s'): + self.opencv_calibration_node.screendump(im) + +class ConsumerThread(threading.Thread): + def __init__(self, queue, function): + threading.Thread.__init__(self) + self.queue = queue + self.function = function + + def run(self): + while True: + # wait for an image (could happen at the very beginning when the queue is still empty) + while len(self.queue) == 0: + time.sleep(0.1) + self.function(self.queue[0]) + + +class CalibrationNode: + def __init__(self, boards, service_check = True, synchronizer = message_filters.TimeSynchronizer, flags = 0, + pattern=Patterns.Chessboard, camera_name='', checkerboard_flags = 0): + if service_check: + # assume any non-default service names have been set. Wait for the service to become ready + for svcname in ["camera", "left_camera", "right_camera"]: + remapped = rospy.remap_name(svcname) + if remapped != svcname: + fullservicename = "%s/set_camera_info" % remapped + print("Waiting for service", fullservicename, "...") + try: + rospy.wait_for_service(fullservicename, 5) + print("OK") + except rospy.ROSException: + print("Service not found") + rospy.signal_shutdown('Quit') + + self._boards = boards + self._calib_flags = flags + self._checkerboard_flags = checkerboard_flags + self._pattern = pattern + self._camera_name = camera_name + lsub = message_filters.Subscriber('left', sensor_msgs.msg.Image) + rsub = message_filters.Subscriber('right', sensor_msgs.msg.Image) + ts = synchronizer([lsub, rsub], 4) + ts.registerCallback(self.queue_stereo) + + msub = message_filters.Subscriber('image', sensor_msgs.msg.Image) + msub.registerCallback(self.queue_monocular) + + self.set_camera_info_service = rospy.ServiceProxy("%s/set_camera_info" % rospy.remap_name("camera"), + sensor_msgs.srv.SetCameraInfo) + self.set_left_camera_info_service = rospy.ServiceProxy("%s/set_camera_info" % rospy.remap_name("left_camera"), + sensor_msgs.srv.SetCameraInfo) + self.set_right_camera_info_service = rospy.ServiceProxy("%s/set_camera_info" % rospy.remap_name("right_camera"), + sensor_msgs.srv.SetCameraInfo) + + self.q_mono = deque([], 1) + self.q_stereo = deque([], 1) + + self.c = None + + mth = ConsumerThread(self.q_mono, self.handle_monocular) + mth.setDaemon(True) + mth.start() + + sth = ConsumerThread(self.q_stereo, self.handle_stereo) + sth.setDaemon(True) + sth.start() + + def redraw_stereo(self, *args): + pass + def redraw_monocular(self, *args): + pass + + def queue_monocular(self, msg): + self.q_mono.append(msg) + + def queue_stereo(self, lmsg, rmsg): + self.q_stereo.append((lmsg, rmsg)) + + def handle_monocular(self, msg): + if self.c == None: + if self._camera_name: + self.c = MonoCalibrator(self._boards, self._calib_flags, self._pattern, name=self._camera_name, + checkerboard_flags=self._checkerboard_flags) + else: + self.c = MonoCalibrator(self._boards, self._calib_flags, self._pattern, + checkerboard_flags=self.checkerboard_flags) + + # This should just call the MonoCalibrator + drawable = self.c.handle_msg(msg) + self.displaywidth = drawable.scrib.shape[1] + self.redraw_monocular(drawable) + + def handle_stereo(self, msg): + if self.c == None: + if self._camera_name: + self.c = StereoCalibrator(self._boards, self._calib_flags, self._pattern, name=self._camera_name, + checkerboard_flags=self._checkerboard_flags) + else: + self.c = StereoCalibrator(self._boards, self._calib_flags, self._pattern, + checkerboard_flags=self._checkerboard_flags) + + drawable = self.c.handle_msg(msg) + self.displaywidth = drawable.lscrib.shape[1] + drawable.rscrib.shape[1] + self.redraw_stereo(drawable) + + + def check_set_camera_info(self, response): + if response.success: + return True + + for i in range(10): + print("!" * 80) + print() + print("Attempt to set camera info failed: " + response.status_message) + print() + for i in range(10): + print("!" * 80) + print() + rospy.logerr('Unable to set camera info for calibration. Failure message: %s' % response.status_message) + return False + + def do_upload(self): + self.c.report() + print(self.c.ost()) + info = self.c.as_message() + + rv = True + if self.c.is_mono: + response = self.set_camera_info_service(info) + rv = self.check_set_camera_info(response) + else: + response = self.set_left_camera_info_service(info[0]) + rv = rv and self.check_set_camera_info(response) + response = self.set_right_camera_info_service(info[1]) + rv = rv and self.check_set_camera_info(response) + return rv + + +class OpenCVCalibrationNode(CalibrationNode): + """ Calibration node with an OpenCV Gui """ + FONT_FACE = cv2.FONT_HERSHEY_SIMPLEX + FONT_SCALE = 0.6 + FONT_THICKNESS = 2 + + def __init__(self, *args, **kwargs): + + CalibrationNode.__init__(self, *args, **kwargs) + + self.queue_display = deque([], 1) + self.display_thread = DisplayThread(self.queue_display, self) + self.display_thread.setDaemon(True) + self.display_thread.start() + + @classmethod + def putText(cls, img, text, org, color = (0,0,0)): + cv2.putText(img, text, org, cls.FONT_FACE, cls.FONT_SCALE, color, thickness = cls.FONT_THICKNESS) + + @classmethod + def getTextSize(cls, text): + return cv2.getTextSize(text, cls.FONT_FACE, cls.FONT_SCALE, cls.FONT_THICKNESS)[0] + + def on_mouse(self, event, x, y, flags, param): + if event == cv2.EVENT_LBUTTONDOWN and self.displaywidth < x: + if self.c.goodenough: + if 180 <= y < 280: + self.c.do_calibration() + if self.c.calibrated: + if 280 <= y < 380: + self.c.do_save() + elif 380 <= y < 480: + # Only shut down if we set camera info correctly, #3993 + if self.do_upload(): + rospy.signal_shutdown('Quit') + + def on_scale(self, scalevalue): + if self.c.calibrated: + self.c.set_alpha(scalevalue / 100.0) + + def button(self, dst, label, enable): + dst.fill(255) + size = (dst.shape[1], dst.shape[0]) + if enable: + color = (155, 155, 80) + else: + color = (224, 224, 224) + cv2.circle(dst, (size[0] // 2, size[1] // 2), min(size) // 2, color, -1) + (w, h) = self.getTextSize(label) + self.putText(dst, label, ((size[0] - w) // 2, (size[1] + h) // 2), (255,255,255)) + + def buttons(self, display): + x = self.displaywidth + self.button(display[180:280,x:x+100], "CALIBRATE", self.c.goodenough) + self.button(display[280:380,x:x+100], "SAVE", self.c.calibrated) + self.button(display[380:480,x:x+100], "COMMIT", self.c.calibrated) + + def y(self, i): + """Set up right-size images""" + return 30 + 40 * i + + def screendump(self, im): + i = 0 + while os.access("/tmp/dump%d.png" % i, os.R_OK): + i += 1 + cv2.imwrite("/tmp/dump%d.png" % i, im) + + def redraw_monocular(self, drawable): + height = drawable.scrib.shape[0] + width = drawable.scrib.shape[1] + + display = numpy.zeros((max(480, height), width + 100, 3), dtype=numpy.uint8) + display[0:height, 0:width,:] = drawable.scrib + display[0:height, width:width+100,:].fill(255) + + + self.buttons(display) + if not self.c.calibrated: + if drawable.params: + for i, (label, lo, hi, progress) in enumerate(drawable.params): + (w,_) = self.getTextSize(label) + self.putText(display, label, (width + (100 - w) // 2, self.y(i))) + color = (0,255,0) + if progress < 1.0: + color = (0, int(progress*255.), 255) + cv2.line(display, + (int(width + lo * 100), self.y(i) + 20), + (int(width + hi * 100), self.y(i) + 20), + color, 4) + + else: + self.putText(display, "lin.", (width, self.y(0))) + linerror = drawable.linear_error + if linerror < 0: + msg = "?" + else: + msg = "%.2f" % linerror + #print "linear", linerror + self.putText(display, msg, (width, self.y(1))) + + self.queue_display.append(display) + + def redraw_stereo(self, drawable): + height = drawable.lscrib.shape[0] + width = drawable.lscrib.shape[1] + + display = numpy.zeros((max(480, height), 2 * width + 100, 3), dtype=numpy.uint8) + display[0:height, 0:width,:] = drawable.lscrib + display[0:height, width:2*width,:] = drawable.rscrib + display[0:height, 2*width:2*width+100,:].fill(255) + + self.buttons(display) + + if not self.c.calibrated: + if drawable.params: + for i, (label, lo, hi, progress) in enumerate(drawable.params): + (w,_) = self.getTextSize(label) + self.putText(display, label, (2 * width + (100 - w) // 2, self.y(i))) + color = (0,255,0) + if progress < 1.0: + color = (0, int(progress*255.), 255) + cv2.line(display, + (int(2 * width + lo * 100), self.y(i) + 20), + (int(2 * width + hi * 100), self.y(i) + 20), + color, 4) + + else: + self.putText(display, "epi.", (2 * width, self.y(0))) + if drawable.epierror == -1: + msg = "?" + else: + msg = "%.2f" % drawable.epierror + self.putText(display, msg, (2 * width, self.y(1))) + # TODO dim is never set anywhere. Supposed to be observed chessboard size? + if drawable.dim != -1: + self.putText(display, "dim", (2 * width, self.y(2))) + self.putText(display, "%.3f" % drawable.dim, (2 * width, self.y(3))) + + self.queue_display.append(display) diff --git a/future pkg/image_pipeline/camera_calibration/src/camera_calibration/camera_checker.py b/future pkg/image_pipeline/camera_calibration/src/camera_calibration/camera_checker.py new file mode 100755 index 0000000..d27b893 --- /dev/null +++ b/future pkg/image_pipeline/camera_calibration/src/camera_calibration/camera_checker.py @@ -0,0 +1,200 @@ +#!/usr/bin/python +# +# Software License Agreement (BSD License) +# +# Copyright (c) 2009, Willow Garage, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of the Willow Garage nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import cv2 +import cv_bridge +import functools +import message_filters +import numpy +import rospy +import sensor_msgs.msg +import sensor_msgs.srv +import threading + +from camera_calibration.calibrator import MonoCalibrator, StereoCalibrator, ChessboardInfo +from message_filters import ApproximateTimeSynchronizer + +try: + from queue import Queue +except ImportError: + from Queue import Queue + + +def mean(seq): + return sum(seq) / len(seq) + +def lmin(seq1, seq2): + """ Pairwise minimum of two sequences """ + return [min(a, b) for (a, b) in zip(seq1, seq2)] + +def lmax(seq1, seq2): + """ Pairwise maximum of two sequences """ + return [max(a, b) for (a, b) in zip(seq1, seq2)] + +class ConsumerThread(threading.Thread): + def __init__(self, queue, function): + threading.Thread.__init__(self) + self.queue = queue + self.function = function + + def run(self): + while not rospy.is_shutdown(): + while not rospy.is_shutdown(): + m = self.queue.get() + if self.queue.empty(): + break + self.function(m) + +class CameraCheckerNode: + + def __init__(self, chess_size, dim, approximate=0): + self.board = ChessboardInfo() + self.board.n_cols = chess_size[0] + self.board.n_rows = chess_size[1] + self.board.dim = dim + + # make sure n_cols is not smaller than n_rows, otherwise error computation will be off + if self.board.n_cols < self.board.n_rows: + self.board.n_cols, self.board.n_rows = self.board.n_rows, self.board.n_cols + + image_topic = rospy.resolve_name("monocular") + "/image_rect" + camera_topic = rospy.resolve_name("monocular") + "/camera_info" + + tosync_mono = [ + (image_topic, sensor_msgs.msg.Image), + (camera_topic, sensor_msgs.msg.CameraInfo), + ] + + if approximate <= 0: + sync = message_filters.TimeSynchronizer + else: + sync = functools.partial(ApproximateTimeSynchronizer, slop=approximate) + + tsm = sync([message_filters.Subscriber(topic, type) for (topic, type) in tosync_mono], 10) + tsm.registerCallback(self.queue_monocular) + + left_topic = rospy.resolve_name("stereo") + "/left/image_rect" + left_camera_topic = rospy.resolve_name("stereo") + "/left/camera_info" + right_topic = rospy.resolve_name("stereo") + "/right/image_rect" + right_camera_topic = rospy.resolve_name("stereo") + "/right/camera_info" + + tosync_stereo = [ + (left_topic, sensor_msgs.msg.Image), + (left_camera_topic, sensor_msgs.msg.CameraInfo), + (right_topic, sensor_msgs.msg.Image), + (right_camera_topic, sensor_msgs.msg.CameraInfo) + ] + + tss = sync([message_filters.Subscriber(topic, type) for (topic, type) in tosync_stereo], 10) + tss.registerCallback(self.queue_stereo) + + self.br = cv_bridge.CvBridge() + + self.q_mono = Queue() + self.q_stereo = Queue() + + mth = ConsumerThread(self.q_mono, self.handle_monocular) + mth.setDaemon(True) + mth.start() + + sth = ConsumerThread(self.q_stereo, self.handle_stereo) + sth.setDaemon(True) + sth.start() + + self.mc = MonoCalibrator([self.board]) + self.sc = StereoCalibrator([self.board]) + + def queue_monocular(self, msg, cmsg): + self.q_mono.put((msg, cmsg)) + + def queue_stereo(self, lmsg, lcmsg, rmsg, rcmsg): + self.q_stereo.put((lmsg, lcmsg, rmsg, rcmsg)) + + def mkgray(self, msg): + return self.mc.mkgray(msg) + + def image_corners(self, im): + (ok, corners, b) = self.mc.get_corners(im) + if ok: + return corners + else: + return None + + def handle_monocular(self, msg): + + (image, camera) = msg + gray = self.mkgray(image) + C = self.image_corners(gray) + if C is not None: + linearity_rms = self.mc.linear_error(C, self.board) + + # Add in reprojection check + image_points = C + object_points = self.mc.mk_object_points([self.board], use_board_size=True)[0] + dist_coeffs = numpy.zeros((4, 1)) + camera_matrix = numpy.array( [ [ camera.P[0], camera.P[1], camera.P[2] ], + [ camera.P[4], camera.P[5], camera.P[6] ], + [ camera.P[8], camera.P[9], camera.P[10] ] ] ) + ok, rot, trans = cv2.solvePnP(object_points, image_points, camera_matrix, dist_coeffs) + # Convert rotation into a 3x3 Rotation Matrix + rot3x3, _ = cv2.Rodrigues(rot) + # Reproject model points into image + object_points_world = numpy.asmatrix(rot3x3) * numpy.asmatrix(object_points.squeeze().T) + numpy.asmatrix(trans) + reprojected_h = camera_matrix * object_points_world + reprojected = (reprojected_h[0:2, :] / reprojected_h[2, :]) + reprojection_errors = image_points.squeeze().T - reprojected + + reprojection_rms = numpy.sqrt(numpy.sum(numpy.array(reprojection_errors) ** 2) / numpy.product(reprojection_errors.shape)) + + # Print the results + print("Linearity RMS Error: %.3f Pixels Reprojection RMS Error: %.3f Pixels" % (linearity_rms, reprojection_rms)) + else: + print('no chessboard') + + def handle_stereo(self, msg): + + (lmsg, lcmsg, rmsg, rcmsg) = msg + lgray = self.mkgray(lmsg) + rgray = self.mkgray(rmsg) + + L = self.image_corners(lgray) + R = self.image_corners(rgray) + if L is not None and R is not None: + epipolar = self.sc.epipolar_error(L, R) + + dimension = self.sc.chessboard_size(L, R, self.board, msg=(lcmsg, rcmsg)) + + print("epipolar error: %f pixels dimension: %f m" % (epipolar, dimension)) + else: + print("no chessboard") diff --git a/future pkg/image_pipeline/camera_calibration/test/directed.py b/future pkg/image_pipeline/camera_calibration/test/directed.py new file mode 100644 index 0000000..43a7aaa --- /dev/null +++ b/future pkg/image_pipeline/camera_calibration/test/directed.py @@ -0,0 +1,274 @@ +#!/usr/bin/env python +# +# Software License Agreement (BSD License) +# +# Copyright (c) 2009, Willow Garage, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of the Willow Garage nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import roslib +import rosunit +import rospy +import cv2 + +import collections +import copy +import numpy +import os +import sys +import tarfile +import unittest + +from camera_calibration.calibrator import MonoCalibrator, StereoCalibrator, \ + Patterns, CalibrationException, ChessboardInfo, image_from_archive + +board = ChessboardInfo() +board.n_cols = 8 +board.n_rows = 6 +board.dim = 0.108 + +class TestDirected(unittest.TestCase): + def setUp(self): + tar_path = roslib.packages.find_resource('camera_calibration', 'camera_calibration.tar.gz')[0] + self.tar = tarfile.open(tar_path, 'r') + self.limages = [image_from_archive(self.tar, "wide/left%04d.pgm" % i) for i in range(3, 15)] + self.rimages = [image_from_archive(self.tar, "wide/right%04d.pgm" % i) for i in range(3, 15)] + self.l = {} + self.r = {} + self.sizes = [(320,240), (640,480), (800,600), (1024,768)] + for dim in self.sizes: + self.l[dim] = [] + self.r[dim] = [] + for li,ri in zip(self.limages, self.rimages): + rli = cv2.resize(li, (dim[0], dim[1])) + rri = cv2.resize(ri, (dim[0], dim[1])) + self.l[dim].append(rli) + self.r[dim].append(rri) + + def assert_good_mono(self, c, dim, max_err): + #c.report() + self.assert_(len(c.ost()) > 0) + lin_err = 0 + n = 0 + for img in self.l[dim]: + lin_err_local = c.linear_error_from_image(img) + if lin_err_local: + lin_err += lin_err_local + n += 1 + if n > 0: + lin_err /= n + self.assert_(0.0 < lin_err, 'lin_err is %f' % lin_err) + self.assert_(lin_err < max_err, 'lin_err is %f' % lin_err) + + flat = c.remap(img) + self.assertEqual(img.shape, flat.shape) + + def test_monocular(self): + # Run the calibrator, produce a calibration, check it + mc = MonoCalibrator([ board ], cv2.CALIB_FIX_K3) + max_errs = [0.1, 0.2, 0.4, 0.7] + for i, dim in enumerate(self.sizes): + mc.cal(self.l[dim]) + self.assert_good_mono(mc, dim, max_errs[i]) + + # Make another calibration, import previous calibration as a message, + # and assert that the new one is good. + + mc2 = MonoCalibrator([board]) + mc2.from_message(mc.as_message()) + self.assert_good_mono(mc2, dim, max_errs[i]) + + def test_stereo(self): + epierrors = [0.1, 0.2, 0.45, 1.0] + for i, dim in enumerate(self.sizes): + print("Dim =", dim) + sc = StereoCalibrator([board], cv2.CALIB_FIX_K3) + sc.cal(self.l[dim], self.r[dim]) + + sc.report() + #print sc.ost() + + # NOTE: epipolar error currently increases with resolution. + # At highest res expect error ~0.75 + epierror = 0 + n = 0 + for l_img, r_img in zip(self.l[dim], self.r[dim]): + epierror_local = sc.epipolar_error_from_images(l_img, r_img) + if epierror_local: + epierror += epierror_local + n += 1 + epierror /= n + self.assert_(epierror < epierrors[i], + 'Epipolar error is %f for resolution i = %d' % (epierror, i)) + + self.assertAlmostEqual(sc.chessboard_size_from_images(self.l[dim][0], self.r[dim][0]), .108, 2) + + #print sc.as_message() + + img = self.l[dim][0] + flat = sc.l.remap(img) + self.assertEqual(img.shape, flat.shape) + flat = sc.r.remap(img) + self.assertEqual(img.shape, flat.shape) + + sc2 = StereoCalibrator([board]) + sc2.from_message(sc.as_message()) + # sc2.set_alpha(1.0) + #sc2.report() + self.assert_(len(sc2.ost()) > 0) + + def test_nochecker(self): + # Run with same images, but looking for an incorrect chessboard size (8, 7). + # Should raise an exception because of lack of input points. + new_board = copy.deepcopy(board) + new_board.n_cols = 8 + new_board.n_rows = 7 + + sc = StereoCalibrator([new_board]) + self.assertRaises(CalibrationException, lambda: sc.cal(self.limages, self.rimages)) + mc = MonoCalibrator([new_board]) + self.assertRaises(CalibrationException, lambda: mc.cal(self.limages)) + + + +class TestArtificial(unittest.TestCase): + Setup = collections.namedtuple('Setup', ['pattern', 'cols', 'rows', 'lin_err', 'K_err']) + + def setUp(self): + # Define some image transforms that will simulate a camera position + M = [] + cv2.getPerspectiveTransform + self.K = numpy.array([[500, 0, 250], [0, 500, 250], [0, 0, 1]], numpy.float32) + self.D = numpy.array([]) + # physical size of the board + self.board_width_dim = 1 + + # Generate data for different grid types. For each grid type, define the different sizes of + # grid that are recognized (n row, n col) + # Patterns.Circles, Patterns.ACircles + self.setups = [ self.Setup(pattern=Patterns.Chessboard, cols=7, rows=8, lin_err=0.2, K_err=8.2), + self.Setup(pattern=Patterns.Circles, cols=7, rows=8, lin_err=0.1, K_err=4), + self.Setup(pattern=Patterns.ACircles, cols=3, rows=5, lin_err=0.1, K_err=8) ] + self.limages = [] + self.rimages = [] + for setup in self.setups: + self.limages.append([]) + self.rimages.append([]) + + # Create the pattern + if setup.pattern == Patterns.Chessboard: + pattern = numpy.zeros((50*(setup.rows+3), 50*(setup.cols+3), 1), numpy.uint8) + pattern.fill(255) + for j in range(1, setup.rows+2): + for i in range(1+(j%2), setup.cols+2, 2): + pattern[50*j:50*(j+1), 50*i:50*(i+1)].fill(0) + elif setup.pattern == Patterns.Circles: + pattern = numpy.zeros((50*(setup.rows+2), 50*(setup.cols+2), 1), numpy.uint8) + pattern.fill(255) + for j in range(1, setup.rows+1): + for i in range(1, setup.cols+1): + cv2.circle(pattern, (50*i + 25, 50*j + 25), 15, (0,0,0), -1 ) + elif setup.pattern == Patterns.ACircles: + x = 60 + pattern = numpy.zeros((x*(setup.rows+2), x*(setup.cols+5), 1), numpy.uint8) + pattern.fill(255) + for j in range(1, setup.rows+1): + for i in range(0, setup.cols): + cv2.circle(pattern, (x*(1 + 2*i + (j%2)) + x/2, x*j + x/2), x/3, (0,0,0), -1) + + rows, cols, _ = pattern.shape + object_points_2d = numpy.array([[0, 0], [0, cols-1], [rows-1, cols-1], [rows-1, 0]], numpy.float32) + object_points_3d = numpy.array([[0, 0, 0], [0, cols-1, 0], [rows-1, cols-1, 0], [rows-1, 0, 0]], numpy.float32) + object_points_3d *= self.board_width_dim/float(cols) + + # create the artificial view points + rvec = [ [0, 0, 0], [0, 0, 0.4], [0, 0.4, 0], [0.4, 0, 0], [0.4, 0.4, 0], [0.4, 0, 0.4], [0, 0.4, 0.4], [0.4, 0.4, 0.4] ] + tvec = [ [-0.5, -0.5, 3], [-0.5, -0.5, 3], [-0.5, -0.1, 3], [-0.1, -0.5, 3], [-0.1, -0.1, 3], [-0.1, -0.5, 3], [-0.5, -0.1, 3], [-0.1, 0.1, 3] ] + + dsize = (480, 640) + for i in range(len(rvec)): + R = numpy.array(rvec[i], numpy.float32) + T = numpy.array(tvec[i], numpy.float32) + + image_points, _ = cv2.projectPoints(object_points_3d, R, T, self.K, self.D) + + # deduce the perspective transform + M.append(cv2.getPerspectiveTransform(object_points_2d, image_points)) + + # project the pattern according to the different cameras + pattern_warped = cv2.warpPerspective(pattern, M[i], dsize) + self.limages[-1].append(pattern_warped) + + def assert_good_mono(self, c, images, max_err): + #c.report() + self.assert_(len(c.ost()) > 0) + lin_err = 0 + n = 0 + for img in images: + lin_err_local = c.linear_error_from_image(img) + if lin_err_local: + lin_err += lin_err_local + n += 1 + if n > 0: + lin_err /= n + print("linear error is %f" % lin_err) + self.assert_(0.0 < lin_err, 'lin_err is %f' % lin_err) + self.assert_(lin_err < max_err, 'lin_err is %f' % lin_err) + + flat = c.remap(img) + self.assertEqual(img.shape, flat.shape) + + def test_monocular(self): + # Run the calibrator, produce a calibration, check it + for i, setup in enumerate(self.setups): + board = ChessboardInfo() + board.n_cols = setup.cols + board.n_rows = setup.rows + board.dim = self.board_width_dim + + mc = MonoCalibrator([ board ], flags=cv2.CALIB_FIX_K3, pattern=setup.pattern) + + if 0: + # display the patterns viewed by the camera + for pattern_warped in self.limages[i]: + cv2.imshow("toto", pattern_warped) + cv2.waitKey(0) + + mc.cal(self.limages[i]) + self.assert_good_mono(mc, self.limages[i], setup.lin_err) + + # Make sure the intrinsics are similar + err_intrinsics = numpy.linalg.norm(mc.intrinsics - self.K, ord=numpy.inf) + self.assert_(err_intrinsics < setup.K_err, + 'intrinsics error is %f for resolution i = %d' % (err_intrinsics, i)) + print('intrinsics error is %f' % numpy.linalg.norm(mc.intrinsics - self.K, ord=numpy.inf)) + +if __name__ == '__main__': + #rosunit.unitrun('camera_calibration', 'directed', TestDirected) + rosunit.unitrun('camera_calibration', 'artificial', TestArtificial) diff --git a/future pkg/image_pipeline/camera_calibration/test/multiple_boards.py b/future pkg/image_pipeline/camera_calibration/test/multiple_boards.py new file mode 100644 index 0000000..a726a70 --- /dev/null +++ b/future pkg/image_pipeline/camera_calibration/test/multiple_boards.py @@ -0,0 +1,87 @@ +#!/usr/bin/env python +# +# Software License Agreement (BSD License) +# +# Copyright (c) 2009, Willow Garage, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of the Willow Garage nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import roslib +import rostest +import rospy +import unittest +import tarfile +import copy +import os, sys + +from camera_calibration.calibrator import StereoCalibrator, ChessboardInfo, image_from_archive + +# Large board used for PR2 calibration +board = ChessboardInfo() +board.n_cols = 7 +board.n_rows = 6 +board.dim = 0.108 + +class TestMultipleBoards(unittest.TestCase): + def test_multiple_boards(self): + small_board = ChessboardInfo() + small_board.n_cols = 5 + small_board.n_rows = 4 + small_board.dim = 0.025 + + stereo_cal = StereoCalibrator([board, small_board]) + + my_archive_name = roslib.packages.find_resource('camera_calibration', 'multi_board_calibration.tar.gz')[0] + stereo_cal.do_tarfile_calibration(my_archive_name) + + stereo_cal.report() + stereo_cal.ost() + + # Check error for big image + archive = tarfile.open(my_archive_name) + l1_big = image_from_archive(archive, "left-0000.png") + r1_big = image_from_archive(archive, "right-0000.png") + epi_big = stereo_cal.epipolar_error_from_images(l1_big, r1_big) + self.assert_(epi_big < 1.0, "Epipolar error for large checkerboard > 1.0. Error: %.2f" % epi_big) + + # Small checkerboard has larger error threshold for now + l1_sm = image_from_archive(archive, "left-0012-sm.png") + r1_sm = image_from_archive(archive, "right-0012-sm.png") + epi_sm = stereo_cal.epipolar_error_from_images(l1_sm, r1_sm) + self.assert_(epi_sm < 2.0, "Epipolar error for small checkerboard > 2.0. Error: %.2f" % epi_sm) + + + +if __name__ == '__main__': + if 1: + rostest.unitrun('camera_calibration', 'test_multi_board_cal', TestMultipleBoards) + else: + suite = unittest.TestSuite() + suite.addTest(TestMultipleBoards('test_multi_board_cal')) + unittest.TextTestRunner(verbosity=2).run(suite) diff --git a/future pkg/image_pipeline/depth_image_proc/.tar b/future pkg/image_pipeline/depth_image_proc/.tar new file mode 100644 index 0000000..7cdfe41 --- /dev/null +++ b/future pkg/image_pipeline/depth_image_proc/.tar @@ -0,0 +1,2 @@ +{!!python/unicode 'url': 'https://github.com/ros-gbp/image_pipeline-release/archive/release/kinetic/depth_image_proc/1.12.23-0.tar.gz', + !!python/unicode 'version': image_pipeline-release-release-kinetic-depth_image_proc-1.12.23-0} diff --git a/future pkg/image_pipeline/depth_image_proc/CHANGELOG.rst b/future pkg/image_pipeline/depth_image_proc/CHANGELOG.rst new file mode 100644 index 0000000..3ea67b2 --- /dev/null +++ b/future pkg/image_pipeline/depth_image_proc/CHANGELOG.rst @@ -0,0 +1,122 @@ +1.12.23 (2018-05-10) +-------------------- + +1.12.22 (2017-12-08) +-------------------- + +1.12.21 (2017-11-05) +-------------------- +* Fix C++11 compilation + This fixes `#292 `_ and `#291 `_ +* Contributors: Vincent Rabaud + +1.12.20 (2017-04-30) +-------------------- +* Fix CMake warnings about Eigen. +* Convert depth image metric from [m] to [mm] +* address gcc6 build error + With gcc6, compiling fails with `stdlib.h: No such file or directory`, + as including '-isystem /usr/include' breaks with gcc6, cf., + https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70129. + This commit addresses this issue for this package in the same way + it was addressed in various other ROS packages. A list of related + commits and pull requests is at: + https://github.com/ros/rosdistro/issues/12783 + Signed-off-by: Lukas Bulwahn +* Contributors: Kentaro Wada, Lukas Bulwahn, Vincent Rabaud + +1.12.19 (2016-07-24) +-------------------- + +1.12.18 (2016-07-12) +-------------------- + +1.12.17 (2016-07-11) +-------------------- + +1.12.16 (2016-03-19) +-------------------- +* check number of channels before the process +* search minimum value with OpenCV +* Use OpenCV to be faster +* Add a feature for a depth image to crop foremost image +* Contributors: Kenta Yonekura + +1.12.15 (2016-01-17) +-------------------- +* Add option for exact time sync for point_cloud_xyzrgb +* simplify OpenCV3 conversion +* Contributors: Kentaro Wada, Vincent Rabaud + +1.12.14 (2015-07-22) +-------------------- + +1.12.13 (2015-04-06) +-------------------- +* Add radial point cloud processors +* Contributors: Hunter Laux + +1.12.12 (2014-12-31) +-------------------- +* adds range_max +* exports depth_conversions + with convert for xyz PC only +* exports DepthTraits +* Contributors: enriquefernandez + +1.12.11 (2014-10-26) +-------------------- + +1.12.10 (2014-09-28) +-------------------- + +1.12.9 (2014-09-21) +------------------- +* get code to compile with OpenCV3 + fixes `#96 `_ +* Contributors: Vincent Rabaud + +1.12.8 (2014-08-19) +------------------- + +1.12.6 (2014-07-27) +------------------- +* Add point_cloud_xyzi nodelet + This is for cameras that output depth and intensity images. + It's based on the point_cloud_xyzrgb nodelet. +* Missing runtime dependency - eigen_conversions + `libdepth_image_proc` is missing this dependency at runtime + ``` + > ldd libdepth_image_proc.so | grep eigen + libeigen_conversions.so => not found + ``` + Which causes the following error on loading depth_image_proc: + ``` + [ INFO] [1402564815.530736554]: /camera/rgb/camera_info -> /camera/rgb/camera_info + [ERROR] [1402564815.727176562]: Failed to load nodelet [/camera/depth_metric_rect] of type + [depth_image_proc/convert_metric]: Failed to load library /opt/ros/indigo/lib//libdepth_image_proc.so. + Make sure that you are calling the PLUGINLIB_EXPORT_CLASS macro in the library code, and that + names are consistent between this macro and your XML. Error string: Could not load library (Poco + exception = libeigen_conversions.so: cannot open shared object file: No such file or directory) + [FATAL] [1402564815.727410623]: Service call failed! + ``` +* Contributors: Daniel Stonier, Hunter Laux + +1.12.4 (2014-04-28) +------------------- +* depth_image_proc: fix missing symbols in nodelets +* Contributors: Michael Ferguson + +1.12.3 (2014-04-12) +------------------- + +1.12.2 (2014-04-08) +------------------- + +1.12.1 (2014-04-06) +------------------- +* replace tf usage by tf2 usage + +1.12.0 (2014-04-04) +------------------- +* remove PCL dependency diff --git a/future pkg/image_pipeline/depth_image_proc/CMakeLists.txt b/future pkg/image_pipeline/depth_image_proc/CMakeLists.txt new file mode 100644 index 0000000..7bb99d5 --- /dev/null +++ b/future pkg/image_pipeline/depth_image_proc/CMakeLists.txt @@ -0,0 +1,44 @@ +cmake_minimum_required(VERSION 2.8) +project(depth_image_proc) + +find_package(catkin REQUIRED cmake_modules cv_bridge eigen_conversions image_geometry image_transport message_filters nodelet sensor_msgs stereo_msgs tf2 tf2_ros) + +if(cv_bridge_VERSION VERSION_GREATER "1.12.0") + add_compile_options(-std=c++11) +endif() + +catkin_package( + INCLUDE_DIRS include + LIBRARIES ${PROJECT_NAME}) + +find_package(Boost REQUIRED) +find_package(Eigen3 QUIET) +if(NOT EIGEN3_FOUND) + find_package(Eigen REQUIRED) + set(EIGEN3_INCLUDE_DIRS ${EIGEN_INCLUDE_DIRS}) +endif() +find_package(OpenCV REQUIRED) +include_directories(include ${BOOST_INCLUDE_DIRS} ${catkin_INCLUDE_DIRS} ${EIGEN3_INCLUDE_DIRS}) + +add_library(${PROJECT_NAME} src/nodelets/convert_metric.cpp + src/nodelets/crop_foremost.cpp + src/nodelets/disparity.cpp + src/nodelets/point_cloud_xyz.cpp + src/nodelets/point_cloud_xyzrgb.cpp + src/nodelets/point_cloud_xyzi.cpp + src/nodelets/point_cloud_xyz_radial.cpp + src/nodelets/point_cloud_xyzi_radial.cpp + src/nodelets/register.cpp +) +target_link_libraries(${PROJECT_NAME} ${catkin_LIBRARIES} ${OpenCV_LIBRARIES}) + +install(DIRECTORY include/${PROJECT_NAME}/ + DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} + FILES_MATCHING PATTERN "*.h") + +install(TARGETS ${PROJECT_NAME} + DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +) +install(FILES nodelet_plugins.xml + DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} +) diff --git a/future pkg/image_pipeline/depth_image_proc/include/depth_image_proc/depth_conversions.h b/future pkg/image_pipeline/depth_image_proc/include/depth_image_proc/depth_conversions.h new file mode 100644 index 0000000..9d4ad0c --- /dev/null +++ b/future pkg/image_pipeline/depth_image_proc/include/depth_image_proc/depth_conversions.h @@ -0,0 +1,101 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#ifndef DEPTH_IMAGE_PROC_DEPTH_CONVERSIONS +#define DEPTH_IMAGE_PROC_DEPTH_CONVERSIONS + +#include +#include +#include +#include + +#include + +namespace depth_image_proc { + +typedef sensor_msgs::PointCloud2 PointCloud; + +// Handles float or uint16 depths +template +void convert( + const sensor_msgs::ImageConstPtr& depth_msg, + PointCloud::Ptr& cloud_msg, + const image_geometry::PinholeCameraModel& model, + double range_max = 0.0) +{ + // Use correct principal point from calibration + float center_x = model.cx(); + float center_y = model.cy(); + + // Combine unit conversion (if necessary) with scaling by focal length for computing (X,Y) + double unit_scaling = DepthTraits::toMeters( T(1) ); + float constant_x = unit_scaling / model.fx(); + float constant_y = unit_scaling / model.fy(); + float bad_point = std::numeric_limits::quiet_NaN(); + + sensor_msgs::PointCloud2Iterator iter_x(*cloud_msg, "x"); + sensor_msgs::PointCloud2Iterator iter_y(*cloud_msg, "y"); + sensor_msgs::PointCloud2Iterator iter_z(*cloud_msg, "z"); + const T* depth_row = reinterpret_cast(&depth_msg->data[0]); + int row_step = depth_msg->step / sizeof(T); + for (int v = 0; v < (int)cloud_msg->height; ++v, depth_row += row_step) + { + for (int u = 0; u < (int)cloud_msg->width; ++u, ++iter_x, ++iter_y, ++iter_z) + { + T depth = depth_row[u]; + + // Missing points denoted by NaNs + if (!DepthTraits::valid(depth)) + { + if (range_max != 0.0) + { + depth = DepthTraits::fromMeters(range_max); + } + else + { + *iter_x = *iter_y = *iter_z = bad_point; + continue; + } + } + + // Fill in XYZ + *iter_x = (u - center_x) * depth * constant_x; + *iter_y = (v - center_y) * depth * constant_y; + *iter_z = DepthTraits::toMeters(depth); + } + } +} + +} // namespace depth_image_proc + +#endif diff --git a/future pkg/image_pipeline/depth_image_proc/include/depth_image_proc/depth_traits.h b/future pkg/image_pipeline/depth_image_proc/include/depth_image_proc/depth_traits.h new file mode 100644 index 0000000..9c76de9 --- /dev/null +++ b/future pkg/image_pipeline/depth_image_proc/include/depth_image_proc/depth_traits.h @@ -0,0 +1,71 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#ifndef DEPTH_IMAGE_PROC_DEPTH_TRAITS +#define DEPTH_IMAGE_PROC_DEPTH_TRAITS + +#include +#include + +namespace depth_image_proc { + +// Encapsulate differences between processing float and uint16_t depths +template struct DepthTraits {}; + +template<> +struct DepthTraits +{ + static inline bool valid(uint16_t depth) { return depth != 0; } + static inline float toMeters(uint16_t depth) { return depth * 0.001f; } // originally mm + static inline uint16_t fromMeters(float depth) { return (depth * 1000.0f) + 0.5f; } + static inline void initializeBuffer(std::vector& buffer) {} // Do nothing - already zero-filled +}; + +template<> +struct DepthTraits +{ + static inline bool valid(float depth) { return std::isfinite(depth); } + static inline float toMeters(float depth) { return depth; } + static inline float fromMeters(float depth) { return depth; } + + static inline void initializeBuffer(std::vector& buffer) + { + float* start = reinterpret_cast(&buffer[0]); + float* end = reinterpret_cast(&buffer[0] + buffer.size()); + std::fill(start, end, std::numeric_limits::quiet_NaN()); + } +}; + +} // namespace depth_image_proc + +#endif diff --git a/future pkg/image_pipeline/depth_image_proc/mainpage.dox b/future pkg/image_pipeline/depth_image_proc/mainpage.dox new file mode 100644 index 0000000..6b0414b --- /dev/null +++ b/future pkg/image_pipeline/depth_image_proc/mainpage.dox @@ -0,0 +1,26 @@ +/** +\mainpage +\htmlinclude manifest.html + +\b depth_image_proc is ... + + + + +\section codeapi Code API + + + + +*/ diff --git a/future pkg/image_pipeline/depth_image_proc/nodelet_plugins.xml b/future pkg/image_pipeline/depth_image_proc/nodelet_plugins.xml new file mode 100644 index 0000000..de29b96 --- /dev/null +++ b/future pkg/image_pipeline/depth_image_proc/nodelet_plugins.xml @@ -0,0 +1,73 @@ + + + + + Nodelet to convert raw uint16 depth image in mm to float depth image in m. + + + + + + Nodelet to convert depth image to disparity image. + + + + + + Nodelet to convert depth image to XYZ point cloud. + + + + + + Nodelet to combine registered depth image and RGB image into XYZRGB point cloud. + + + + + + Nodelet to combine registered depth image and intensity image into XYZI point cloud. + + + + + Nodelet to convert an Radial depth map to a point. + + + + + + Nodelet to convert an Radial depth and intensity map to a point. + + + + + Nodelet to create a depth image registered to another camera frame. + + + + + + Nodelet to crop a depth image from foremost depth to a specific range. + + + + diff --git a/future pkg/image_pipeline/depth_image_proc/package.xml b/future pkg/image_pipeline/depth_image_proc/package.xml new file mode 100644 index 0000000..8a1abb7 --- /dev/null +++ b/future pkg/image_pipeline/depth_image_proc/package.xml @@ -0,0 +1,46 @@ + + depth_image_proc + 1.12.23 + + + Contains nodelets for processing depth images such as those + produced by OpenNI camera. Functions include creating disparity + images and point clouds, as well as registering (reprojecting) + a depth image into another camera frame. + + + Patrick Mihelich + Vincent Rabaud + BSD + http://ros.org/wiki/depth_image_proc + + + + + + catkin + + rostest + + boost + cmake_modules + cv_bridge + eigen_conversions + image_geometry + image_transport + message_filters + nodelet + sensor_msgs + stereo_msgs + tf2 + tf2_ros + + boost + cv_bridge + eigen_conversions + image_geometry + image_transport + nodelet + tf2 + tf2_ros + diff --git a/future pkg/image_pipeline/depth_image_proc/src/nodelets/convert_metric.cpp b/future pkg/image_pipeline/depth_image_proc/src/nodelets/convert_metric.cpp new file mode 100644 index 0000000..d5d6676 --- /dev/null +++ b/future pkg/image_pipeline/depth_image_proc/src/nodelets/convert_metric.cpp @@ -0,0 +1,140 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include +#include +#include +#include +#include + +namespace depth_image_proc { + +namespace enc = sensor_msgs::image_encodings; + +class ConvertMetricNodelet : public nodelet::Nodelet +{ + // Subscriptions + boost::shared_ptr it_; + image_transport::Subscriber sub_raw_; + + // Publications + boost::mutex connect_mutex_; + image_transport::Publisher pub_depth_; + + virtual void onInit(); + + void connectCb(); + + void depthCb(const sensor_msgs::ImageConstPtr& raw_msg); +}; + +void ConvertMetricNodelet::onInit() +{ + ros::NodeHandle& nh = getNodeHandle(); + it_.reset(new image_transport::ImageTransport(nh)); + + // Monitor whether anyone is subscribed to the output + image_transport::SubscriberStatusCallback connect_cb = boost::bind(&ConvertMetricNodelet::connectCb, this); + // Make sure we don't enter connectCb() between advertising and assigning to pub_depth_ + boost::lock_guard lock(connect_mutex_); + pub_depth_ = it_->advertise("image", 1, connect_cb, connect_cb); +} + +// Handles (un)subscribing when clients (un)subscribe +void ConvertMetricNodelet::connectCb() +{ + boost::lock_guard lock(connect_mutex_); + if (pub_depth_.getNumSubscribers() == 0) + { + sub_raw_.shutdown(); + } + else if (!sub_raw_) + { + image_transport::TransportHints hints("raw", ros::TransportHints(), getPrivateNodeHandle()); + sub_raw_ = it_->subscribe("image_raw", 1, &ConvertMetricNodelet::depthCb, this, hints); + } +} + +void ConvertMetricNodelet::depthCb(const sensor_msgs::ImageConstPtr& raw_msg) +{ + // Allocate new Image message + sensor_msgs::ImagePtr depth_msg( new sensor_msgs::Image ); + depth_msg->header = raw_msg->header; + depth_msg->height = raw_msg->height; + depth_msg->width = raw_msg->width; + + // Set data, encoding and step after converting the metric. + if (raw_msg->encoding == enc::TYPE_16UC1) + { + depth_msg->encoding = enc::TYPE_32FC1; + depth_msg->step = raw_msg->width * (enc::bitDepth(depth_msg->encoding) / 8); + depth_msg->data.resize(depth_msg->height * depth_msg->step); + // Fill in the depth image data, converting mm to m + float bad_point = std::numeric_limits::quiet_NaN (); + const uint16_t* raw_data = reinterpret_cast(&raw_msg->data[0]); + float* depth_data = reinterpret_cast(&depth_msg->data[0]); + for (unsigned index = 0; index < depth_msg->height * depth_msg->width; ++index) + { + uint16_t raw = raw_data[index]; + depth_data[index] = (raw == 0) ? bad_point : (float)raw * 0.001f; + } + } + else if (raw_msg->encoding == enc::TYPE_32FC1) + { + depth_msg->encoding = enc::TYPE_16UC1; + depth_msg->step = raw_msg->width * (enc::bitDepth(depth_msg->encoding) / 8); + depth_msg->data.resize(depth_msg->height * depth_msg->step); + // Fill in the depth image data, converting m to mm + uint16_t bad_point = 0; + const float* raw_data = reinterpret_cast(&raw_msg->data[0]); + uint16_t* depth_data = reinterpret_cast(&depth_msg->data[0]); + for (unsigned index = 0; index < depth_msg->height * depth_msg->width; ++index) + { + float raw = raw_data[index]; + depth_data[index] = std::isnan(raw) ? bad_point : (uint16_t)(raw * 1000); + } + } + else + { + ROS_ERROR("Unsupported image conversion from %s.", raw_msg->encoding.c_str()); + return; + } + + pub_depth_.publish(depth_msg); +} + +} // namespace depth_image_proc + +// Register as nodelet +#include +PLUGINLIB_EXPORT_CLASS(depth_image_proc::ConvertMetricNodelet,nodelet::Nodelet); diff --git a/future pkg/image_pipeline/depth_image_proc/src/nodelets/crop_foremost.cpp b/future pkg/image_pipeline/depth_image_proc/src/nodelets/crop_foremost.cpp new file mode 100755 index 0000000..68ed8e2 --- /dev/null +++ b/future pkg/image_pipeline/depth_image_proc/src/nodelets/crop_foremost.cpp @@ -0,0 +1,142 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +//#include +#include +#include +#include +#include +#include + +namespace depth_image_proc { + +namespace enc = sensor_msgs::image_encodings; + +class CropForemostNodelet : public nodelet::Nodelet +{ + // Subscriptions + boost::shared_ptr it_; + image_transport::Subscriber sub_raw_; + + // Publications + boost::mutex connect_mutex_; + image_transport::Publisher pub_depth_; + + virtual void onInit(); + + void connectCb(); + + void depthCb(const sensor_msgs::ImageConstPtr& raw_msg); + + double distance_; +}; + +void CropForemostNodelet::onInit() +{ + ros::NodeHandle& nh = getNodeHandle(); + ros::NodeHandle& private_nh = getPrivateNodeHandle(); + private_nh.getParam("distance", distance_); + it_.reset(new image_transport::ImageTransport(nh)); + + // Monitor whether anyone is subscribed to the output + image_transport::SubscriberStatusCallback connect_cb = boost::bind(&CropForemostNodelet::connectCb, this); + // Make sure we don't enter connectCb() between advertising and assigning to pub_depth_ + boost::lock_guard lock(connect_mutex_); + pub_depth_ = it_->advertise("image", 1, connect_cb, connect_cb); +} + +// Handles (un)subscribing when clients (un)subscribe +void CropForemostNodelet::connectCb() +{ + boost::lock_guard lock(connect_mutex_); + if (pub_depth_.getNumSubscribers() == 0) + { + sub_raw_.shutdown(); + } + else if (!sub_raw_) + { + image_transport::TransportHints hints("raw", ros::TransportHints(), getPrivateNodeHandle()); + sub_raw_ = it_->subscribe("image_raw", 1, &CropForemostNodelet::depthCb, this, hints); + } +} + +void CropForemostNodelet::depthCb(const sensor_msgs::ImageConstPtr& raw_msg) +{ + cv_bridge::CvImagePtr cv_ptr; + try + { + cv_ptr = cv_bridge::toCvCopy(raw_msg); + } + catch (cv_bridge::Exception& e) + { + ROS_ERROR("cv_bridge exception: %s", e.what()); + return; + } + + // Check the number of channels + if(sensor_msgs::image_encodings::numChannels(raw_msg->encoding) != 1){ + NODELET_ERROR_THROTTLE(2, "Only grayscale image is acceptable, got [%s]", raw_msg->encoding.c_str()); + return; + } + + // search the min value without invalid value "0" + double minVal; + cv::minMaxIdx(cv_ptr->image, &minVal, 0, 0, 0, cv_ptr->image != 0); + + int imtype = cv_bridge::getCvType(raw_msg->encoding); + switch (imtype){ + case CV_8UC1: + case CV_8SC1: + case CV_32F: + cv::threshold(cv_ptr->image, cv_ptr->image, minVal + distance_, 0, CV_THRESH_TOZERO_INV); + break; + case CV_16UC1: + case CV_16SC1: + case CV_32SC1: + case CV_64F: + // 8 bit or 32 bit floating array is required to use cv::threshold + cv_ptr->image.convertTo(cv_ptr->image, CV_32F); + cv::threshold(cv_ptr->image, cv_ptr->image, minVal + distance_, 1, CV_THRESH_TOZERO_INV); + + cv_ptr->image.convertTo(cv_ptr->image, imtype); + break; + } + + pub_depth_.publish(cv_ptr->toImageMsg()); +} + +} // namespace depth_image_proc + +// Register as nodelet +#include +PLUGINLIB_EXPORT_CLASS(depth_image_proc::CropForemostNodelet,nodelet::Nodelet); diff --git a/future pkg/image_pipeline/depth_image_proc/src/nodelets/disparity.cpp b/future pkg/image_pipeline/depth_image_proc/src/nodelets/disparity.cpp new file mode 100644 index 0000000..dd82537 --- /dev/null +++ b/future pkg/image_pipeline/depth_image_proc/src/nodelets/disparity.cpp @@ -0,0 +1,189 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include +#if ((BOOST_VERSION / 100) % 1000) >= 53 +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace depth_image_proc { + +namespace enc = sensor_msgs::image_encodings; + +class DisparityNodelet : public nodelet::Nodelet +{ + boost::shared_ptr left_it_; + ros::NodeHandlePtr right_nh_; + image_transport::SubscriberFilter sub_depth_image_; + message_filters::Subscriber sub_info_; + typedef message_filters::TimeSynchronizer Sync; + boost::shared_ptr sync_; + + boost::mutex connect_mutex_; + ros::Publisher pub_disparity_; + double min_range_; + double max_range_; + double delta_d_; + + virtual void onInit(); + + void connectCb(); + + void depthCb(const sensor_msgs::ImageConstPtr& depth_msg, + const sensor_msgs::CameraInfoConstPtr& info_msg); + + template + void convert(const sensor_msgs::ImageConstPtr& depth_msg, + stereo_msgs::DisparityImagePtr& disp_msg); +}; + +void DisparityNodelet::onInit() +{ + ros::NodeHandle &nh = getNodeHandle(); + ros::NodeHandle &private_nh = getPrivateNodeHandle(); + ros::NodeHandle left_nh(nh, "left"); + left_it_.reset(new image_transport::ImageTransport(left_nh)); + right_nh_.reset( new ros::NodeHandle(nh, "right") ); + + // Read parameters + int queue_size; + private_nh.param("queue_size", queue_size, 5); + private_nh.param("min_range", min_range_, 0.0); + private_nh.param("max_range", max_range_, std::numeric_limits::infinity()); + private_nh.param("delta_d", delta_d_, 0.125); + + // Synchronize inputs. Topic subscriptions happen on demand in the connection callback. + sync_.reset( new Sync(sub_depth_image_, sub_info_, queue_size) ); + sync_->registerCallback(boost::bind(&DisparityNodelet::depthCb, this, _1, _2)); + + // Monitor whether anyone is subscribed to the output + ros::SubscriberStatusCallback connect_cb = boost::bind(&DisparityNodelet::connectCb, this); + // Make sure we don't enter connectCb() between advertising and assigning to pub_disparity_ + boost::lock_guard lock(connect_mutex_); + pub_disparity_ = left_nh.advertise("disparity", 1, connect_cb, connect_cb); +} + +// Handles (un)subscribing when clients (un)subscribe +void DisparityNodelet::connectCb() +{ + boost::lock_guard lock(connect_mutex_); + if (pub_disparity_.getNumSubscribers() == 0) + { + sub_depth_image_.unsubscribe(); + sub_info_ .unsubscribe(); + } + else if (!sub_depth_image_.getSubscriber()) + { + image_transport::TransportHints hints("raw", ros::TransportHints(), getPrivateNodeHandle()); + sub_depth_image_.subscribe(*left_it_, "image_rect", 1, hints); + sub_info_.subscribe(*right_nh_, "camera_info", 1); + } +} + +void DisparityNodelet::depthCb(const sensor_msgs::ImageConstPtr& depth_msg, + const sensor_msgs::CameraInfoConstPtr& info_msg) +{ + // Allocate new DisparityImage message + stereo_msgs::DisparityImagePtr disp_msg( new stereo_msgs::DisparityImage ); + disp_msg->header = depth_msg->header; + disp_msg->image.header = disp_msg->header; + disp_msg->image.encoding = enc::TYPE_32FC1; + disp_msg->image.height = depth_msg->height; + disp_msg->image.width = depth_msg->width; + disp_msg->image.step = disp_msg->image.width * sizeof (float); + disp_msg->image.data.resize( disp_msg->image.height * disp_msg->image.step, 0.0f ); + double fx = info_msg->P[0]; + disp_msg->T = -info_msg->P[3] / fx; + disp_msg->f = fx; + // Remaining fields depend on device characteristics, so rely on user input + disp_msg->min_disparity = disp_msg->f * disp_msg->T / max_range_; + disp_msg->max_disparity = disp_msg->f * disp_msg->T / min_range_; + disp_msg->delta_d = delta_d_; + + if (depth_msg->encoding == enc::TYPE_16UC1) + { + convert(depth_msg, disp_msg); + } + else if (depth_msg->encoding == enc::TYPE_32FC1) + { + convert(depth_msg, disp_msg); + } + else + { + NODELET_ERROR_THROTTLE(5, "Depth image has unsupported encoding [%s]", depth_msg->encoding.c_str()); + return; + } + + pub_disparity_.publish(disp_msg); +} + +template +void DisparityNodelet::convert(const sensor_msgs::ImageConstPtr& depth_msg, + stereo_msgs::DisparityImagePtr& disp_msg) +{ + // For each depth Z, disparity d = fT / Z + float unit_scaling = DepthTraits::toMeters( T(1) ); + float constant = disp_msg->f * disp_msg->T / unit_scaling; + + const T* depth_row = reinterpret_cast(&depth_msg->data[0]); + int row_step = depth_msg->step / sizeof(T); + float* disp_data = reinterpret_cast(&disp_msg->image.data[0]); + for (int v = 0; v < (int)depth_msg->height; ++v) + { + for (int u = 0; u < (int)depth_msg->width; ++u) + { + T depth = depth_row[u]; + if (DepthTraits::valid(depth)) + *disp_data = constant / depth; + ++disp_data; + } + + depth_row += row_step; + } +} + +} // namespace depth_image_proc + +// Register as nodelet +#include +PLUGINLIB_EXPORT_CLASS(depth_image_proc::DisparityNodelet,nodelet::Nodelet); diff --git a/future pkg/image_pipeline/depth_image_proc/src/nodelets/point_cloud_xyz.cpp b/future pkg/image_pipeline/depth_image_proc/src/nodelets/point_cloud_xyz.cpp new file mode 100644 index 0000000..084ae73 --- /dev/null +++ b/future pkg/image_pipeline/depth_image_proc/src/nodelets/point_cloud_xyz.cpp @@ -0,0 +1,138 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace depth_image_proc { + +namespace enc = sensor_msgs::image_encodings; + +class PointCloudXyzNodelet : public nodelet::Nodelet +{ + // Subscriptions + boost::shared_ptr it_; + image_transport::CameraSubscriber sub_depth_; + int queue_size_; + + // Publications + boost::mutex connect_mutex_; + typedef sensor_msgs::PointCloud2 PointCloud; + ros::Publisher pub_point_cloud_; + + image_geometry::PinholeCameraModel model_; + + virtual void onInit(); + + void connectCb(); + + void depthCb(const sensor_msgs::ImageConstPtr& depth_msg, + const sensor_msgs::CameraInfoConstPtr& info_msg); +}; + +void PointCloudXyzNodelet::onInit() +{ + ros::NodeHandle& nh = getNodeHandle(); + ros::NodeHandle& private_nh = getPrivateNodeHandle(); + it_.reset(new image_transport::ImageTransport(nh)); + + // Read parameters + private_nh.param("queue_size", queue_size_, 5); + + // Monitor whether anyone is subscribed to the output + ros::SubscriberStatusCallback connect_cb = boost::bind(&PointCloudXyzNodelet::connectCb, this); + // Make sure we don't enter connectCb() between advertising and assigning to pub_point_cloud_ + boost::lock_guard lock(connect_mutex_); + pub_point_cloud_ = nh.advertise("points", 1, connect_cb, connect_cb); +} + +// Handles (un)subscribing when clients (un)subscribe +void PointCloudXyzNodelet::connectCb() +{ + boost::lock_guard lock(connect_mutex_); + if (pub_point_cloud_.getNumSubscribers() == 0) + { + sub_depth_.shutdown(); + } + else if (!sub_depth_) + { + image_transport::TransportHints hints("raw", ros::TransportHints(), getPrivateNodeHandle()); + sub_depth_ = it_->subscribeCamera("image_rect", queue_size_, &PointCloudXyzNodelet::depthCb, this, hints); + } +} + +void PointCloudXyzNodelet::depthCb(const sensor_msgs::ImageConstPtr& depth_msg, + const sensor_msgs::CameraInfoConstPtr& info_msg) +{ + PointCloud::Ptr cloud_msg(new PointCloud); + cloud_msg->header = depth_msg->header; + cloud_msg->height = depth_msg->height; + cloud_msg->width = depth_msg->width; + cloud_msg->is_dense = false; + cloud_msg->is_bigendian = false; + + sensor_msgs::PointCloud2Modifier pcd_modifier(*cloud_msg); + pcd_modifier.setPointCloud2FieldsByString(1, "xyz"); + + // Update camera model + model_.fromCameraInfo(info_msg); + + if (depth_msg->encoding == enc::TYPE_16UC1) + { + convert(depth_msg, cloud_msg, model_); + } + else if (depth_msg->encoding == enc::TYPE_32FC1) + { + convert(depth_msg, cloud_msg, model_); + } + else + { + NODELET_ERROR_THROTTLE(5, "Depth image has unsupported encoding [%s]", depth_msg->encoding.c_str()); + return; + } + + pub_point_cloud_.publish (cloud_msg); +} + +} // namespace depth_image_proc + +// Register as nodelet +#include +PLUGINLIB_EXPORT_CLASS(depth_image_proc::PointCloudXyzNodelet,nodelet::Nodelet); diff --git a/future pkg/image_pipeline/depth_image_proc/src/nodelets/point_cloud_xyz_radial.cpp b/future pkg/image_pipeline/depth_image_proc/src/nodelets/point_cloud_xyz_radial.cpp new file mode 100644 index 0000000..847cbfd --- /dev/null +++ b/future pkg/image_pipeline/depth_image_proc/src/nodelets/point_cloud_xyz_radial.cpp @@ -0,0 +1,235 @@ +/********************************************************************* + * Software License Agreement (BSD License) + * + * Copyright (c) 2008, Willow Garage, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Willow Garage nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + *********************************************************************/ +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace depth_image_proc { + + namespace enc = sensor_msgs::image_encodings; + + class PointCloudXyzRadialNodelet : public nodelet::Nodelet + { + // Subscriptions + boost::shared_ptr it_; + image_transport::CameraSubscriber sub_depth_; + int queue_size_; + + // Publications + boost::mutex connect_mutex_; + typedef sensor_msgs::PointCloud2 PointCloud; + ros::Publisher pub_point_cloud_; + + + std::vector D_; + boost::array K_; + + int width_; + int height_; + + cv::Mat binned; + + virtual void onInit(); + + void connectCb(); + + void depthCb(const sensor_msgs::ImageConstPtr& depth_msg, + const sensor_msgs::CameraInfoConstPtr& info_msg); + + // Handles float or uint16 depths + template + void convert(const sensor_msgs::ImageConstPtr& depth_msg, PointCloud::Ptr& cloud_msg); + }; + + cv::Mat initMatrix(cv::Mat cameraMatrix, cv::Mat distCoeffs, int width, int height, bool radial) + { + int i,j; + int totalsize = width*height; + cv::Mat pixelVectors(1,totalsize,CV_32FC3); + cv::Mat dst(1,totalsize,CV_32FC3); + + cv::Mat sensorPoints(cv::Size(height,width), CV_32FC2); + cv::Mat undistortedSensorPoints(1,totalsize, CV_32FC2); + + std::vector ch; + for(j = 0; j < height; j++) + { + for(i = 0; i < width; i++) + { + cv::Vec2f &p = sensorPoints.at(i,j); + p[0] = i; + p[1] = j; + } + } + + sensorPoints = sensorPoints.reshape(2,1); + + cv::undistortPoints(sensorPoints, undistortedSensorPoints, cameraMatrix, distCoeffs); + + ch.push_back(undistortedSensorPoints); + ch.push_back(cv::Mat::ones(1,totalsize,CV_32FC1)); + cv::merge(ch,pixelVectors); + + if(radial) + { + for(i = 0; i < totalsize; i++) + { + normalize(pixelVectors.at(i), + dst.at(i)); + } + pixelVectors = dst; + } + return pixelVectors.reshape(3,width); + } + + + void PointCloudXyzRadialNodelet::onInit() + { + ros::NodeHandle& nh = getNodeHandle(); + ros::NodeHandle& private_nh = getPrivateNodeHandle(); + it_.reset(new image_transport::ImageTransport(nh)); + + // Read parameters + private_nh.param("queue_size", queue_size_, 5); + + // Monitor whether anyone is subscribed to the output + ros::SubscriberStatusCallback connect_cb = + boost::bind(&PointCloudXyzRadialNodelet::connectCb, this); + // Make sure we don't enter connectCb() between advertising and assigning to pub_point_cloud_ + boost::lock_guard lock(connect_mutex_); + pub_point_cloud_ = nh.advertise("points", 1, connect_cb, connect_cb); + } + + // Handles (un)subscribing when clients (un)subscribe + void PointCloudXyzRadialNodelet::connectCb() + { + boost::lock_guard lock(connect_mutex_); + if (pub_point_cloud_.getNumSubscribers() == 0) + { + sub_depth_.shutdown(); + } + else if (!sub_depth_) + { + image_transport::TransportHints hints("raw", ros::TransportHints(), getPrivateNodeHandle()); + sub_depth_ = it_->subscribeCamera("image_raw", + queue_size_, + &PointCloudXyzRadialNodelet::depthCb, + this, hints); + } + } + + void PointCloudXyzRadialNodelet::depthCb(const sensor_msgs::ImageConstPtr& depth_msg, + const sensor_msgs::CameraInfoConstPtr& info_msg) + { + PointCloud::Ptr cloud_msg(new PointCloud); + cloud_msg->header = depth_msg->header; + cloud_msg->height = depth_msg->height; + cloud_msg->width = depth_msg->width; + cloud_msg->is_dense = false; + cloud_msg->is_bigendian = false; + + sensor_msgs::PointCloud2Modifier pcd_modifier(*cloud_msg); + pcd_modifier.setPointCloud2FieldsByString(1, "xyz"); + + if(info_msg->D != D_ || info_msg->K != K_ || width_ != info_msg->width || + height_ != info_msg->height) + { + D_ = info_msg->D; + K_ = info_msg->K; + width_ = info_msg->width; + height_ = info_msg->height; + binned = initMatrix(cv::Mat_(3, 3, &K_[0]),cv::Mat(D_),width_,height_,true); + } + + if (depth_msg->encoding == enc::TYPE_16UC1) + { + convert(depth_msg, cloud_msg); + } + else if (depth_msg->encoding == enc::TYPE_32FC1) + { + convert(depth_msg, cloud_msg); + } + else + { + NODELET_ERROR_THROTTLE(5, "Depth image has unsupported encoding [%s]", depth_msg->encoding.c_str()); + return; + } + + pub_point_cloud_.publish (cloud_msg); + } + + template + void PointCloudXyzRadialNodelet::convert(const sensor_msgs::ImageConstPtr& depth_msg, PointCloud::Ptr& cloud_msg) + { + // Combine unit conversion (if necessary) with scaling by focal length for computing (X,Y) + double unit_scaling = DepthTraits::toMeters( T(1) ); + float bad_point = std::numeric_limits::quiet_NaN(); + + sensor_msgs::PointCloud2Iterator iter_x(*cloud_msg, "x"); + sensor_msgs::PointCloud2Iterator iter_y(*cloud_msg, "y"); + sensor_msgs::PointCloud2Iterator iter_z(*cloud_msg, "z"); + const T* depth_row = reinterpret_cast(&depth_msg->data[0]); + int row_step = depth_msg->step / sizeof(T); + for (int v = 0; v < (int)cloud_msg->height; ++v, depth_row += row_step) + { + for (int u = 0; u < (int)cloud_msg->width; ++u, ++iter_x, ++iter_y, ++iter_z) + { + T depth = depth_row[u]; + + // Missing points denoted by NaNs + if (!DepthTraits::valid(depth)) + { + *iter_x = *iter_y = *iter_z = bad_point; + continue; + } + const cv::Vec3f &cvPoint = binned.at(u,v) * DepthTraits::toMeters(depth); + // Fill in XYZ + *iter_x = cvPoint(0); + *iter_y = cvPoint(1); + *iter_z = cvPoint(2); + } + } + } + +} // namespace depth_image_proc + +// Register as nodelet +#include +PLUGINLIB_EXPORT_CLASS(depth_image_proc::PointCloudXyzRadialNodelet,nodelet::Nodelet); diff --git a/future pkg/image_pipeline/depth_image_proc/src/nodelets/point_cloud_xyzi.cpp b/future pkg/image_pipeline/depth_image_proc/src/nodelets/point_cloud_xyzi.cpp new file mode 100644 index 0000000..9260442 --- /dev/null +++ b/future pkg/image_pipeline/depth_image_proc/src/nodelets/point_cloud_xyzi.cpp @@ -0,0 +1,316 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include +#if ((BOOST_VERSION / 100) % 1000) >= 53 +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace depth_image_proc { + +using namespace message_filters::sync_policies; +namespace enc = sensor_msgs::image_encodings; + +class PointCloudXyziNodelet : public nodelet::Nodelet +{ + ros::NodeHandlePtr intensity_nh_; + boost::shared_ptr intensity_it_, depth_it_; + + // Subscriptions + image_transport::SubscriberFilter sub_depth_, sub_intensity_; + message_filters::Subscriber sub_info_; + typedef ApproximateTime SyncPolicy; + typedef message_filters::Synchronizer Synchronizer; + boost::shared_ptr sync_; + + // Publications + boost::mutex connect_mutex_; + typedef sensor_msgs::PointCloud2 PointCloud; + ros::Publisher pub_point_cloud_; + + image_geometry::PinholeCameraModel model_; + + virtual void onInit(); + + void connectCb(); + + void imageCb(const sensor_msgs::ImageConstPtr& depth_msg, + const sensor_msgs::ImageConstPtr& intensity_msg, + const sensor_msgs::CameraInfoConstPtr& info_msg); + + template + void convert(const sensor_msgs::ImageConstPtr& depth_msg, + const sensor_msgs::ImageConstPtr& intensity_msg, + const PointCloud::Ptr& cloud_msg); +}; + +void PointCloudXyziNodelet::onInit() +{ + ros::NodeHandle& nh = getNodeHandle(); + ros::NodeHandle& private_nh = getPrivateNodeHandle(); + intensity_nh_.reset( new ros::NodeHandle(nh, "intensity") ); + ros::NodeHandle depth_nh(nh, "depth"); + intensity_it_ .reset( new image_transport::ImageTransport(*intensity_nh_) ); + depth_it_.reset( new image_transport::ImageTransport(depth_nh) ); + + // Read parameters + int queue_size; + private_nh.param("queue_size", queue_size, 5); + + // Synchronize inputs. Topic subscriptions happen on demand in the connection callback. + sync_.reset( new Synchronizer(SyncPolicy(queue_size), sub_depth_, sub_intensity_, sub_info_) ); + sync_->registerCallback(boost::bind(&PointCloudXyziNodelet::imageCb, this, _1, _2, _3)); + + // Monitor whether anyone is subscribed to the output + ros::SubscriberStatusCallback connect_cb = boost::bind(&PointCloudXyziNodelet::connectCb, this); + // Make sure we don't enter connectCb() between advertising and assigning to pub_point_cloud_ + boost::lock_guard lock(connect_mutex_); + pub_point_cloud_ = depth_nh.advertise("points", 1, connect_cb, connect_cb); +} + +// Handles (un)subscribing when clients (un)subscribe +void PointCloudXyziNodelet::connectCb() +{ + boost::lock_guard lock(connect_mutex_); + if (pub_point_cloud_.getNumSubscribers() == 0) + { + sub_depth_.unsubscribe(); + sub_intensity_ .unsubscribe(); + sub_info_ .unsubscribe(); + } + else if (!sub_depth_.getSubscriber()) + { + ros::NodeHandle& private_nh = getPrivateNodeHandle(); + // parameter for depth_image_transport hint + std::string depth_image_transport_param = "depth_image_transport"; + + // depth image can use different transport.(e.g. compressedDepth) + image_transport::TransportHints depth_hints("raw",ros::TransportHints(), private_nh, depth_image_transport_param); + sub_depth_.subscribe(*depth_it_, "image_rect", 1, depth_hints); + + // intensity uses normal ros transport hints. + image_transport::TransportHints hints("raw", ros::TransportHints(), private_nh); + sub_intensity_.subscribe(*intensity_it_, "image_rect", 1, hints); + sub_info_.subscribe(*intensity_nh_, "camera_info", 1); + } +} + +void PointCloudXyziNodelet::imageCb(const sensor_msgs::ImageConstPtr& depth_msg, + const sensor_msgs::ImageConstPtr& intensity_msg_in, + const sensor_msgs::CameraInfoConstPtr& info_msg) +{ + // Check for bad inputs + if (depth_msg->header.frame_id != intensity_msg_in->header.frame_id) + { + NODELET_ERROR_THROTTLE(5, "Depth image frame id [%s] doesn't match image frame id [%s]", + depth_msg->header.frame_id.c_str(), intensity_msg_in->header.frame_id.c_str()); + return; + } + + // Update camera model + model_.fromCameraInfo(info_msg); + + // Check if the input image has to be resized + sensor_msgs::ImageConstPtr intensity_msg = intensity_msg_in; + if (depth_msg->width != intensity_msg->width || depth_msg->height != intensity_msg->height) + { + sensor_msgs::CameraInfo info_msg_tmp = *info_msg; + info_msg_tmp.width = depth_msg->width; + info_msg_tmp.height = depth_msg->height; + float ratio = float(depth_msg->width)/float(intensity_msg->width); + info_msg_tmp.K[0] *= ratio; + info_msg_tmp.K[2] *= ratio; + info_msg_tmp.K[4] *= ratio; + info_msg_tmp.K[5] *= ratio; + info_msg_tmp.P[0] *= ratio; + info_msg_tmp.P[2] *= ratio; + info_msg_tmp.P[5] *= ratio; + info_msg_tmp.P[6] *= ratio; + model_.fromCameraInfo(info_msg_tmp); + + cv_bridge::CvImageConstPtr cv_ptr; + try + { + cv_ptr = cv_bridge::toCvShare(intensity_msg, intensity_msg->encoding); + } + catch (cv_bridge::Exception& e) + { + ROS_ERROR("cv_bridge exception: %s", e.what()); + return; + } + cv_bridge::CvImage cv_rsz; + cv_rsz.header = cv_ptr->header; + cv_rsz.encoding = cv_ptr->encoding; + cv::resize(cv_ptr->image.rowRange(0,depth_msg->height/ratio), cv_rsz.image, cv::Size(depth_msg->width, depth_msg->height)); + if ((intensity_msg->encoding == enc::MONO8) || (intensity_msg->encoding == enc::MONO16)) + intensity_msg = cv_rsz.toImageMsg(); + else + intensity_msg = cv_bridge::toCvCopy(cv_rsz.toImageMsg(), enc::MONO8)->toImageMsg(); + + //NODELET_ERROR_THROTTLE(5, "Depth resolution (%ux%u) does not match resolution (%ux%u)", + // depth_msg->width, depth_msg->height, rgb_msg->width, rgb_msg->height); + //return; + } else + intensity_msg = intensity_msg_in; + + // Supported color encodings: MONO8, MONO16 + if (intensity_msg->encoding != enc::MONO8 || intensity_msg->encoding != enc::MONO16) + { + try + { + intensity_msg = cv_bridge::toCvCopy(intensity_msg, enc::MONO8)->toImageMsg(); + } + catch (cv_bridge::Exception& e) + { + NODELET_ERROR_THROTTLE(5, "Unsupported encoding [%s]: %s", intensity_msg->encoding.c_str(), e.what()); + return; + } + } + + // Allocate new point cloud message + PointCloud::Ptr cloud_msg (new PointCloud); + cloud_msg->header = depth_msg->header; // Use depth image time stamp + cloud_msg->height = depth_msg->height; + cloud_msg->width = depth_msg->width; + cloud_msg->is_dense = false; + cloud_msg->is_bigendian = false; + + sensor_msgs::PointCloud2Modifier pcd_modifier(*cloud_msg); +// pcd_modifier.setPointCloud2FieldsByString(2, "xyz", "i"); + pcd_modifier.setPointCloud2Fields(4, + "x", 1, sensor_msgs::PointField::FLOAT32, + "y", 1, sensor_msgs::PointField::FLOAT32, + "z", 1, sensor_msgs::PointField::FLOAT32, + "intensity", 1, sensor_msgs::PointField::FLOAT32); + + + if (depth_msg->encoding == enc::TYPE_16UC1 && + intensity_msg->encoding == enc::MONO8) + { + convert(depth_msg, intensity_msg, cloud_msg); + } + else if (depth_msg->encoding == enc::TYPE_16UC1 && + intensity_msg->encoding == enc::MONO16) + { + convert(depth_msg, intensity_msg, cloud_msg); + } + else if (depth_msg->encoding == enc::TYPE_32FC1 && + intensity_msg->encoding == enc::MONO8) + { + convert(depth_msg, intensity_msg, cloud_msg); + } + else if (depth_msg->encoding == enc::TYPE_32FC1 && + intensity_msg->encoding == enc::MONO16) + { + convert(depth_msg, intensity_msg, cloud_msg); + } + else + { + NODELET_ERROR_THROTTLE(5, "Depth image has unsupported encoding [%s]", depth_msg->encoding.c_str()); + return; + } + + pub_point_cloud_.publish (cloud_msg); +} + +template +void PointCloudXyziNodelet::convert(const sensor_msgs::ImageConstPtr& depth_msg, + const sensor_msgs::ImageConstPtr& intensity_msg, + const PointCloud::Ptr& cloud_msg) +{ + // Use correct principal point from calibration + float center_x = model_.cx(); + float center_y = model_.cy(); + + // Combine unit conversion (if necessary) with scaling by focal length for computing (X,Y) + double unit_scaling = DepthTraits::toMeters( T(1) ); + float constant_x = unit_scaling / model_.fx(); + float constant_y = unit_scaling / model_.fy(); + float bad_point = std::numeric_limits::quiet_NaN (); + + const T* depth_row = reinterpret_cast(&depth_msg->data[0]); + int row_step = depth_msg->step / sizeof(T); + + const T2* inten_row = reinterpret_cast(&intensity_msg->data[0]); + int inten_row_step = intensity_msg->step / sizeof(T2); + + sensor_msgs::PointCloud2Iterator iter_x(*cloud_msg, "x"); + sensor_msgs::PointCloud2Iterator iter_y(*cloud_msg, "y"); + sensor_msgs::PointCloud2Iterator iter_z(*cloud_msg, "z"); + sensor_msgs::PointCloud2Iterator iter_i(*cloud_msg, "intensity"); + + for (int v = 0; v < int(cloud_msg->height); ++v, depth_row += row_step, inten_row += inten_row_step) + { + for (int u = 0; u < int(cloud_msg->width); ++u, ++iter_x, ++iter_y, ++iter_z, ++iter_i) + { + T depth = depth_row[u]; + T2 inten = inten_row[u]; + // Check for invalid measurements + if (!DepthTraits::valid(depth)) + { + *iter_x = *iter_y = *iter_z = bad_point; + } + else + { + // Fill in XYZ + *iter_x = (u - center_x) * depth * constant_x; + *iter_y = (v - center_y) * depth * constant_y; + *iter_z = DepthTraits::toMeters(depth); + } + + // Fill in intensity + *iter_i = inten; + } + } +} + +} // namespace depth_image_proc + +// Register as nodelet +#include +PLUGINLIB_EXPORT_CLASS(depth_image_proc::PointCloudXyziNodelet,nodelet::Nodelet); diff --git a/future pkg/image_pipeline/depth_image_proc/src/nodelets/point_cloud_xyzi_radial.cpp b/future pkg/image_pipeline/depth_image_proc/src/nodelets/point_cloud_xyzi_radial.cpp new file mode 100644 index 0000000..1d9294d --- /dev/null +++ b/future pkg/image_pipeline/depth_image_proc/src/nodelets/point_cloud_xyzi_radial.cpp @@ -0,0 +1,313 @@ +/********************************************************************* + * Software License Agreement (BSD License) + * + * Copyright (c) 2008, Willow Garage, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Willow Garage nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + *********************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace depth_image_proc { + + using namespace message_filters::sync_policies; + namespace enc = sensor_msgs::image_encodings; + typedef ExactTime SyncPolicy; + + class PointCloudXyziRadialNodelet : public nodelet::Nodelet + { + ros::NodeHandlePtr intensity_nh_; + + // Subscriptions + boost::shared_ptr intensity_it_, depth_it_; + image_transport::SubscriberFilter sub_depth_, sub_intensity_; + message_filters::Subscriber sub_info_; + + int queue_size_; + + // Publications + boost::mutex connect_mutex_; + typedef sensor_msgs::PointCloud2 PointCloud; + ros::Publisher pub_point_cloud_; + + + typedef message_filters::Synchronizer Synchronizer; + boost::shared_ptr sync_; + + std::vector D_; + boost::array K_; + + int width_; + int height_; + + cv::Mat transform_; + + virtual void onInit(); + + void connectCb(); + + void imageCb(const sensor_msgs::ImageConstPtr& depth_msg, + const sensor_msgs::ImageConstPtr& intensity_msg_in, + const sensor_msgs::CameraInfoConstPtr& info_msg); + + // Handles float or uint16 depths + template + void convert_depth(const sensor_msgs::ImageConstPtr& depth_msg, PointCloud::Ptr& cloud_msg); + + template + void convert_intensity(const sensor_msgs::ImageConstPtr &inten_msg, PointCloud::Ptr& cloud_msg); + + cv::Mat initMatrix(cv::Mat cameraMatrix, cv::Mat distCoeffs, int width, int height, bool radial); + + }; + + cv::Mat PointCloudXyziRadialNodelet::initMatrix(cv::Mat cameraMatrix, cv::Mat distCoeffs, int width, int height, bool radial) + { + int i,j; + int totalsize = width*height; + cv::Mat pixelVectors(1,totalsize,CV_32FC3); + cv::Mat dst(1,totalsize,CV_32FC3); + + cv::Mat sensorPoints(cv::Size(height,width), CV_32FC2); + cv::Mat undistortedSensorPoints(1,totalsize, CV_32FC2); + + std::vector ch; + for(j = 0; j < height; j++) + { + for(i = 0; i < width; i++) + { + cv::Vec2f &p = sensorPoints.at(i,j); + p[0] = i; + p[1] = j; + } + } + + sensorPoints = sensorPoints.reshape(2,1); + + cv::undistortPoints(sensorPoints, undistortedSensorPoints, cameraMatrix, distCoeffs); + + ch.push_back(undistortedSensorPoints); + ch.push_back(cv::Mat::ones(1,totalsize,CV_32FC1)); + cv::merge(ch,pixelVectors); + + if(radial) + { + for(i = 0; i < totalsize; i++) + { + normalize(pixelVectors.at(i), + dst.at(i)); + } + pixelVectors = dst; + } + return pixelVectors.reshape(3,width); + } + + + void PointCloudXyziRadialNodelet::onInit() + { + ros::NodeHandle& nh = getNodeHandle(); + ros::NodeHandle& private_nh = getPrivateNodeHandle(); + + intensity_nh_.reset( new ros::NodeHandle(nh, "intensity") ); + ros::NodeHandle depth_nh(nh, "depth"); + intensity_it_ .reset( new image_transport::ImageTransport(*intensity_nh_) ); + depth_it_.reset( new image_transport::ImageTransport(depth_nh) ); + + // Read parameters + private_nh.param("queue_size", queue_size_, 5); + + // Synchronize inputs. Topic subscriptions happen on demand in the connection callback. + sync_.reset( new Synchronizer(SyncPolicy(queue_size_), sub_depth_, sub_intensity_, sub_info_) ); + sync_->registerCallback(boost::bind(&PointCloudXyziRadialNodelet::imageCb, this, _1, _2, _3)); + + // Monitor whether anyone is subscribed to the output + ros::SubscriberStatusCallback connect_cb = + boost::bind(&PointCloudXyziRadialNodelet::connectCb, this); + // Make sure we don't enter connectCb() between advertising and assigning to pub_point_cloud_ + boost::lock_guard lock(connect_mutex_); + pub_point_cloud_ = nh.advertise("points", 20, connect_cb, connect_cb); + } + + // Handles (un)subscribing when clients (un)subscribe + void PointCloudXyziRadialNodelet::connectCb() + { + boost::lock_guard lock(connect_mutex_); + + if (pub_point_cloud_.getNumSubscribers() == 0) + { + sub_depth_.unsubscribe(); + sub_intensity_.unsubscribe(); + sub_info_.unsubscribe(); + } + else if (!sub_depth_.getSubscriber()) + { + ros::NodeHandle& private_nh = getPrivateNodeHandle(); + // parameter for depth_image_transport hint + std::string depth_image_transport_param = "depth_image_transport"; + + // depth image can use different transport.(e.g. compressedDepth) + image_transport::TransportHints depth_hints("raw",ros::TransportHints(), private_nh, depth_image_transport_param); + sub_depth_.subscribe(*depth_it_, "image_raw", 5, depth_hints); + + // intensity uses normal ros transport hints. + image_transport::TransportHints hints("raw", ros::TransportHints(), private_nh); + sub_intensity_.subscribe(*intensity_it_, "image_raw", 5, hints); + sub_info_.subscribe(*intensity_nh_, "camera_info", 5); + } + } + + void PointCloudXyziRadialNodelet::imageCb(const sensor_msgs::ImageConstPtr& depth_msg, + const sensor_msgs::ImageConstPtr& intensity_msg, + const sensor_msgs::CameraInfoConstPtr& info_msg) + { + PointCloud::Ptr cloud_msg(new PointCloud); + cloud_msg->header = depth_msg->header; + cloud_msg->height = depth_msg->height; + cloud_msg->width = depth_msg->width; + cloud_msg->is_dense = false; + cloud_msg->is_bigendian = false; + + sensor_msgs::PointCloud2Modifier pcd_modifier(*cloud_msg); + pcd_modifier.setPointCloud2Fields(4, + "x", 1, sensor_msgs::PointField::FLOAT32, + "y", 1, sensor_msgs::PointField::FLOAT32, + "z", 1, sensor_msgs::PointField::FLOAT32, + "intensity", 1, sensor_msgs::PointField::FLOAT32); + + + if(info_msg->D != D_ || info_msg->K != K_ || width_ != info_msg->width || + height_ != info_msg->height) + { + D_ = info_msg->D; + K_ = info_msg->K; + width_ = info_msg->width; + height_ = info_msg->height; + transform_ = initMatrix(cv::Mat_(3, 3, &K_[0]),cv::Mat(D_),width_,height_,true); + } + + if (depth_msg->encoding == enc::TYPE_16UC1) + { + convert_depth(depth_msg, cloud_msg); + } + else if (depth_msg->encoding == enc::TYPE_32FC1) + { + convert_depth(depth_msg, cloud_msg); + } + else + { + NODELET_ERROR_THROTTLE(5, "Depth image has unsupported encoding [%s]", depth_msg->encoding.c_str()); + return; + } + + if(intensity_msg->encoding == enc::TYPE_16UC1) + { + convert_intensity(intensity_msg, cloud_msg); + + } + else if(intensity_msg->encoding == enc::MONO8) + { + convert_intensity(intensity_msg, cloud_msg); + } + else + { + NODELET_ERROR_THROTTLE(5, "Intensity image has unsupported encoding [%s]", intensity_msg->encoding.c_str()); + return; + } + + pub_point_cloud_.publish (cloud_msg); + } + + template + void PointCloudXyziRadialNodelet::convert_depth(const sensor_msgs::ImageConstPtr& depth_msg, + PointCloud::Ptr& cloud_msg) + { + // Combine unit conversion (if necessary) with scaling by focal length for computing (X,Y) + double unit_scaling = DepthTraits::toMeters( T(1) ); + float bad_point = std::numeric_limits::quiet_NaN(); + + sensor_msgs::PointCloud2Iterator iter_x(*cloud_msg, "x"); + sensor_msgs::PointCloud2Iterator iter_y(*cloud_msg, "y"); + sensor_msgs::PointCloud2Iterator iter_z(*cloud_msg, "z"); + const T* depth_row = reinterpret_cast(&depth_msg->data[0]); + + int row_step = depth_msg->step / sizeof(T); + for (int v = 0; v < (int)cloud_msg->height; ++v, depth_row += row_step) + { + for (int u = 0; u < (int)cloud_msg->width; ++u, ++iter_x, ++iter_y, ++iter_z) + { + T depth = depth_row[u]; + + // Missing points denoted by NaNs + if (!DepthTraits::valid(depth)) + { + *iter_x = *iter_y = *iter_z = bad_point; + continue; + } + const cv::Vec3f &cvPoint = transform_.at(u,v) * DepthTraits::toMeters(depth); + // Fill in XYZ + *iter_x = cvPoint(0); + *iter_y = cvPoint(1); + *iter_z = cvPoint(2); + } + } + } + + template + void PointCloudXyziRadialNodelet::convert_intensity(const sensor_msgs::ImageConstPtr& intensity_msg, + PointCloud::Ptr& cloud_msg) + { + sensor_msgs::PointCloud2Iterator iter_i(*cloud_msg, "intensity"); + const T* inten_row = reinterpret_cast(&intensity_msg->data[0]); + + const int i_row_step = intensity_msg->step/sizeof(T); + for (int v = 0; v < (int)cloud_msg->height; ++v, inten_row += i_row_step) + { + for (int u = 0; u < (int)cloud_msg->width; ++u, ++iter_i) + { + *iter_i = inten_row[u]; + } + } + } + +} // namespace depth_image_proc + +// Register as nodelet +#include +PLUGINLIB_EXPORT_CLASS(depth_image_proc::PointCloudXyziRadialNodelet,nodelet::Nodelet); diff --git a/future pkg/image_pipeline/depth_image_proc/src/nodelets/point_cloud_xyzrgb.cpp b/future pkg/image_pipeline/depth_image_proc/src/nodelets/point_cloud_xyzrgb.cpp new file mode 100644 index 0000000..bd680aa --- /dev/null +++ b/future pkg/image_pipeline/depth_image_proc/src/nodelets/point_cloud_xyzrgb.cpp @@ -0,0 +1,345 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include +#if ((BOOST_VERSION / 100) % 1000) >= 53 +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace depth_image_proc { + +using namespace message_filters::sync_policies; +namespace enc = sensor_msgs::image_encodings; + +class PointCloudXyzrgbNodelet : public nodelet::Nodelet +{ + ros::NodeHandlePtr rgb_nh_; + boost::shared_ptr rgb_it_, depth_it_; + + // Subscriptions + image_transport::SubscriberFilter sub_depth_, sub_rgb_; + message_filters::Subscriber sub_info_; + typedef ApproximateTime SyncPolicy; + typedef ExactTime ExactSyncPolicy; + typedef message_filters::Synchronizer Synchronizer; + typedef message_filters::Synchronizer ExactSynchronizer; + boost::shared_ptr sync_; + boost::shared_ptr exact_sync_; + + // Publications + boost::mutex connect_mutex_; + typedef sensor_msgs::PointCloud2 PointCloud; + ros::Publisher pub_point_cloud_; + + image_geometry::PinholeCameraModel model_; + + virtual void onInit(); + + void connectCb(); + + void imageCb(const sensor_msgs::ImageConstPtr& depth_msg, + const sensor_msgs::ImageConstPtr& rgb_msg, + const sensor_msgs::CameraInfoConstPtr& info_msg); + + template + void convert(const sensor_msgs::ImageConstPtr& depth_msg, + const sensor_msgs::ImageConstPtr& rgb_msg, + const PointCloud::Ptr& cloud_msg, + int red_offset, int green_offset, int blue_offset, int color_step); +}; + +void PointCloudXyzrgbNodelet::onInit() +{ + ros::NodeHandle& nh = getNodeHandle(); + ros::NodeHandle& private_nh = getPrivateNodeHandle(); + rgb_nh_.reset( new ros::NodeHandle(nh, "rgb") ); + ros::NodeHandle depth_nh(nh, "depth_registered"); + rgb_it_ .reset( new image_transport::ImageTransport(*rgb_nh_) ); + depth_it_.reset( new image_transport::ImageTransport(depth_nh) ); + + // Read parameters + int queue_size; + private_nh.param("queue_size", queue_size, 5); + bool use_exact_sync; + private_nh.param("exact_sync", use_exact_sync, false); + + // Synchronize inputs. Topic subscriptions happen on demand in the connection callback. + if (use_exact_sync) + { + exact_sync_.reset( new ExactSynchronizer(ExactSyncPolicy(queue_size), sub_depth_, sub_rgb_, sub_info_) ); + exact_sync_->registerCallback(boost::bind(&PointCloudXyzrgbNodelet::imageCb, this, _1, _2, _3)); + } + else + { + sync_.reset( new Synchronizer(SyncPolicy(queue_size), sub_depth_, sub_rgb_, sub_info_) ); + sync_->registerCallback(boost::bind(&PointCloudXyzrgbNodelet::imageCb, this, _1, _2, _3)); + } + + // Monitor whether anyone is subscribed to the output + ros::SubscriberStatusCallback connect_cb = boost::bind(&PointCloudXyzrgbNodelet::connectCb, this); + // Make sure we don't enter connectCb() between advertising and assigning to pub_point_cloud_ + boost::lock_guard lock(connect_mutex_); + pub_point_cloud_ = depth_nh.advertise("points", 1, connect_cb, connect_cb); +} + +// Handles (un)subscribing when clients (un)subscribe +void PointCloudXyzrgbNodelet::connectCb() +{ + boost::lock_guard lock(connect_mutex_); + if (pub_point_cloud_.getNumSubscribers() == 0) + { + sub_depth_.unsubscribe(); + sub_rgb_ .unsubscribe(); + sub_info_ .unsubscribe(); + } + else if (!sub_depth_.getSubscriber()) + { + ros::NodeHandle& private_nh = getPrivateNodeHandle(); + // parameter for depth_image_transport hint + std::string depth_image_transport_param = "depth_image_transport"; + + // depth image can use different transport.(e.g. compressedDepth) + image_transport::TransportHints depth_hints("raw",ros::TransportHints(), private_nh, depth_image_transport_param); + sub_depth_.subscribe(*depth_it_, "image_rect", 1, depth_hints); + + // rgb uses normal ros transport hints. + image_transport::TransportHints hints("raw", ros::TransportHints(), private_nh); + sub_rgb_ .subscribe(*rgb_it_, "image_rect_color", 1, hints); + sub_info_ .subscribe(*rgb_nh_, "camera_info", 1); + } +} + +void PointCloudXyzrgbNodelet::imageCb(const sensor_msgs::ImageConstPtr& depth_msg, + const sensor_msgs::ImageConstPtr& rgb_msg_in, + const sensor_msgs::CameraInfoConstPtr& info_msg) +{ + // Check for bad inputs + if (depth_msg->header.frame_id != rgb_msg_in->header.frame_id) + { + NODELET_ERROR_THROTTLE(5, "Depth image frame id [%s] doesn't match RGB image frame id [%s]", + depth_msg->header.frame_id.c_str(), rgb_msg_in->header.frame_id.c_str()); + return; + } + + // Update camera model + model_.fromCameraInfo(info_msg); + + // Check if the input image has to be resized + sensor_msgs::ImageConstPtr rgb_msg = rgb_msg_in; + if (depth_msg->width != rgb_msg->width || depth_msg->height != rgb_msg->height) + { + sensor_msgs::CameraInfo info_msg_tmp = *info_msg; + info_msg_tmp.width = depth_msg->width; + info_msg_tmp.height = depth_msg->height; + float ratio = float(depth_msg->width)/float(rgb_msg->width); + info_msg_tmp.K[0] *= ratio; + info_msg_tmp.K[2] *= ratio; + info_msg_tmp.K[4] *= ratio; + info_msg_tmp.K[5] *= ratio; + info_msg_tmp.P[0] *= ratio; + info_msg_tmp.P[2] *= ratio; + info_msg_tmp.P[5] *= ratio; + info_msg_tmp.P[6] *= ratio; + model_.fromCameraInfo(info_msg_tmp); + + cv_bridge::CvImageConstPtr cv_ptr; + try + { + cv_ptr = cv_bridge::toCvShare(rgb_msg, rgb_msg->encoding); + } + catch (cv_bridge::Exception& e) + { + ROS_ERROR("cv_bridge exception: %s", e.what()); + return; + } + cv_bridge::CvImage cv_rsz; + cv_rsz.header = cv_ptr->header; + cv_rsz.encoding = cv_ptr->encoding; + cv::resize(cv_ptr->image.rowRange(0,depth_msg->height/ratio), cv_rsz.image, cv::Size(depth_msg->width, depth_msg->height)); + if ((rgb_msg->encoding == enc::RGB8) || (rgb_msg->encoding == enc::BGR8) || (rgb_msg->encoding == enc::MONO8)) + rgb_msg = cv_rsz.toImageMsg(); + else + rgb_msg = cv_bridge::toCvCopy(cv_rsz.toImageMsg(), enc::RGB8)->toImageMsg(); + + //NODELET_ERROR_THROTTLE(5, "Depth resolution (%ux%u) does not match RGB resolution (%ux%u)", + // depth_msg->width, depth_msg->height, rgb_msg->width, rgb_msg->height); + //return; + } else + rgb_msg = rgb_msg_in; + + // Supported color encodings: RGB8, BGR8, MONO8 + int red_offset, green_offset, blue_offset, color_step; + if (rgb_msg->encoding == enc::RGB8) + { + red_offset = 0; + green_offset = 1; + blue_offset = 2; + color_step = 3; + } + else if (rgb_msg->encoding == enc::BGR8) + { + red_offset = 2; + green_offset = 1; + blue_offset = 0; + color_step = 3; + } + else if (rgb_msg->encoding == enc::MONO8) + { + red_offset = 0; + green_offset = 0; + blue_offset = 0; + color_step = 1; + } + else + { + try + { + rgb_msg = cv_bridge::toCvCopy(rgb_msg, enc::RGB8)->toImageMsg(); + } + catch (cv_bridge::Exception& e) + { + NODELET_ERROR_THROTTLE(5, "Unsupported encoding [%s]: %s", rgb_msg->encoding.c_str(), e.what()); + return; + } + red_offset = 0; + green_offset = 1; + blue_offset = 2; + color_step = 3; + } + + // Allocate new point cloud message + PointCloud::Ptr cloud_msg (new PointCloud); + cloud_msg->header = depth_msg->header; // Use depth image time stamp + cloud_msg->height = depth_msg->height; + cloud_msg->width = depth_msg->width; + cloud_msg->is_dense = false; + cloud_msg->is_bigendian = false; + + sensor_msgs::PointCloud2Modifier pcd_modifier(*cloud_msg); + pcd_modifier.setPointCloud2FieldsByString(2, "xyz", "rgb"); + + if (depth_msg->encoding == enc::TYPE_16UC1) + { + convert(depth_msg, rgb_msg, cloud_msg, red_offset, green_offset, blue_offset, color_step); + } + else if (depth_msg->encoding == enc::TYPE_32FC1) + { + convert(depth_msg, rgb_msg, cloud_msg, red_offset, green_offset, blue_offset, color_step); + } + else + { + NODELET_ERROR_THROTTLE(5, "Depth image has unsupported encoding [%s]", depth_msg->encoding.c_str()); + return; + } + + pub_point_cloud_.publish (cloud_msg); +} + +template +void PointCloudXyzrgbNodelet::convert(const sensor_msgs::ImageConstPtr& depth_msg, + const sensor_msgs::ImageConstPtr& rgb_msg, + const PointCloud::Ptr& cloud_msg, + int red_offset, int green_offset, int blue_offset, int color_step) +{ + // Use correct principal point from calibration + float center_x = model_.cx(); + float center_y = model_.cy(); + + // Combine unit conversion (if necessary) with scaling by focal length for computing (X,Y) + double unit_scaling = DepthTraits::toMeters( T(1) ); + float constant_x = unit_scaling / model_.fx(); + float constant_y = unit_scaling / model_.fy(); + float bad_point = std::numeric_limits::quiet_NaN (); + + const T* depth_row = reinterpret_cast(&depth_msg->data[0]); + int row_step = depth_msg->step / sizeof(T); + const uint8_t* rgb = &rgb_msg->data[0]; + int rgb_skip = rgb_msg->step - rgb_msg->width * color_step; + + sensor_msgs::PointCloud2Iterator iter_x(*cloud_msg, "x"); + sensor_msgs::PointCloud2Iterator iter_y(*cloud_msg, "y"); + sensor_msgs::PointCloud2Iterator iter_z(*cloud_msg, "z"); + sensor_msgs::PointCloud2Iterator iter_r(*cloud_msg, "r"); + sensor_msgs::PointCloud2Iterator iter_g(*cloud_msg, "g"); + sensor_msgs::PointCloud2Iterator iter_b(*cloud_msg, "b"); + sensor_msgs::PointCloud2Iterator iter_a(*cloud_msg, "a"); + + for (int v = 0; v < int(cloud_msg->height); ++v, depth_row += row_step, rgb += rgb_skip) + { + for (int u = 0; u < int(cloud_msg->width); ++u, rgb += color_step, ++iter_x, ++iter_y, ++iter_z, ++iter_a, ++iter_r, ++iter_g, ++iter_b) + { + T depth = depth_row[u]; + + // Check for invalid measurements + if (!DepthTraits::valid(depth)) + { + *iter_x = *iter_y = *iter_z = bad_point; + } + else + { + // Fill in XYZ + *iter_x = (u - center_x) * depth * constant_x; + *iter_y = (v - center_y) * depth * constant_y; + *iter_z = DepthTraits::toMeters(depth); + } + + // Fill in color + *iter_a = 255; + *iter_r = rgb[red_offset]; + *iter_g = rgb[green_offset]; + *iter_b = rgb[blue_offset]; + } + } +} + +} // namespace depth_image_proc + +// Register as nodelet +#include +PLUGINLIB_EXPORT_CLASS(depth_image_proc::PointCloudXyzrgbNodelet,nodelet::Nodelet); diff --git a/future pkg/image_pipeline/depth_image_proc/src/nodelets/register.cpp b/future pkg/image_pipeline/depth_image_proc/src/nodelets/register.cpp new file mode 100644 index 0000000..980b3cb --- /dev/null +++ b/future pkg/image_pipeline/depth_image_proc/src/nodelets/register.cpp @@ -0,0 +1,263 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace depth_image_proc { + +using namespace message_filters::sync_policies; +namespace enc = sensor_msgs::image_encodings; + +class RegisterNodelet : public nodelet::Nodelet +{ + ros::NodeHandlePtr nh_depth_, nh_rgb_; + boost::shared_ptr it_depth_; + + // Subscriptions + image_transport::SubscriberFilter sub_depth_image_; + message_filters::Subscriber sub_depth_info_, sub_rgb_info_; + boost::shared_ptr tf_buffer_; + boost::shared_ptr tf_; + typedef ApproximateTime SyncPolicy; + typedef message_filters::Synchronizer Synchronizer; + boost::shared_ptr sync_; + + // Publications + boost::mutex connect_mutex_; + image_transport::CameraPublisher pub_registered_; + + image_geometry::PinholeCameraModel depth_model_, rgb_model_; + + virtual void onInit(); + + void connectCb(); + + void imageCb(const sensor_msgs::ImageConstPtr& depth_image_msg, + const sensor_msgs::CameraInfoConstPtr& depth_info_msg, + const sensor_msgs::CameraInfoConstPtr& rgb_info_msg); + + template + void convert(const sensor_msgs::ImageConstPtr& depth_msg, + const sensor_msgs::ImagePtr& registered_msg, + const Eigen::Affine3d& depth_to_rgb); +}; + +void RegisterNodelet::onInit() +{ + ros::NodeHandle& nh = getNodeHandle(); + ros::NodeHandle& private_nh = getPrivateNodeHandle(); + nh_depth_.reset( new ros::NodeHandle(nh, "depth") ); + nh_rgb_.reset( new ros::NodeHandle(nh, "rgb") ); + it_depth_.reset( new image_transport::ImageTransport(*nh_depth_) ); + tf_buffer_.reset( new tf2_ros::Buffer ); + tf_.reset( new tf2_ros::TransformListener(*tf_buffer_) ); + + // Read parameters + int queue_size; + private_nh.param("queue_size", queue_size, 5); + + // Synchronize inputs. Topic subscriptions happen on demand in the connection callback. + sync_.reset( new Synchronizer(SyncPolicy(queue_size), sub_depth_image_, sub_depth_info_, sub_rgb_info_) ); + sync_->registerCallback(boost::bind(&RegisterNodelet::imageCb, this, _1, _2, _3)); + + // Monitor whether anyone is subscribed to the output + image_transport::ImageTransport it_depth_reg(ros::NodeHandle(nh, "depth_registered")); + image_transport::SubscriberStatusCallback image_connect_cb = boost::bind(&RegisterNodelet::connectCb, this); + ros::SubscriberStatusCallback info_connect_cb = boost::bind(&RegisterNodelet::connectCb, this); + // Make sure we don't enter connectCb() between advertising and assigning to pub_registered_ + boost::lock_guard lock(connect_mutex_); + pub_registered_ = it_depth_reg.advertiseCamera("image_rect", 1, + image_connect_cb, image_connect_cb, + info_connect_cb, info_connect_cb); +} + +// Handles (un)subscribing when clients (un)subscribe +void RegisterNodelet::connectCb() +{ + boost::lock_guard lock(connect_mutex_); + if (pub_registered_.getNumSubscribers() == 0) + { + sub_depth_image_.unsubscribe(); + sub_depth_info_ .unsubscribe(); + sub_rgb_info_ .unsubscribe(); + } + else if (!sub_depth_image_.getSubscriber()) + { + image_transport::TransportHints hints("raw", ros::TransportHints(), getPrivateNodeHandle()); + sub_depth_image_.subscribe(*it_depth_, "image_rect", 1, hints); + sub_depth_info_ .subscribe(*nh_depth_, "camera_info", 1); + sub_rgb_info_ .subscribe(*nh_rgb_, "camera_info", 1); + } +} + +void RegisterNodelet::imageCb(const sensor_msgs::ImageConstPtr& depth_image_msg, + const sensor_msgs::CameraInfoConstPtr& depth_info_msg, + const sensor_msgs::CameraInfoConstPtr& rgb_info_msg) +{ + // Update camera models - these take binning & ROI into account + depth_model_.fromCameraInfo(depth_info_msg); + rgb_model_ .fromCameraInfo(rgb_info_msg); + + // Query tf2 for transform from (X,Y,Z) in depth camera frame to RGB camera frame + Eigen::Affine3d depth_to_rgb; + try + { + geometry_msgs::TransformStamped transform = tf_buffer_->lookupTransform ( + rgb_info_msg->header.frame_id, depth_info_msg->header.frame_id, + depth_info_msg->header.stamp); + + tf::transformMsgToEigen(transform.transform, depth_to_rgb); + } + catch (tf2::TransformException& ex) + { + NODELET_WARN_THROTTLE(2, "TF2 exception:\n%s", ex.what()); + return; + /// @todo Can take on order of a minute to register a disconnect callback when we + /// don't call publish() in this cb. What's going on roscpp? + } + + // Allocate registered depth image + sensor_msgs::ImagePtr registered_msg( new sensor_msgs::Image ); + registered_msg->header.stamp = depth_image_msg->header.stamp; + registered_msg->header.frame_id = rgb_info_msg->header.frame_id; + registered_msg->encoding = depth_image_msg->encoding; + + cv::Size resolution = rgb_model_.reducedResolution(); + registered_msg->height = resolution.height; + registered_msg->width = resolution.width; + // step and data set in convert(), depend on depth data type + + if (depth_image_msg->encoding == enc::TYPE_16UC1) + { + convert(depth_image_msg, registered_msg, depth_to_rgb); + } + else if (depth_image_msg->encoding == enc::TYPE_32FC1) + { + convert(depth_image_msg, registered_msg, depth_to_rgb); + } + else + { + NODELET_ERROR_THROTTLE(5, "Depth image has unsupported encoding [%s]", depth_image_msg->encoding.c_str()); + return; + } + + // Registered camera info is the same as the RGB info, but uses the depth timestamp + sensor_msgs::CameraInfoPtr registered_info_msg( new sensor_msgs::CameraInfo(*rgb_info_msg) ); + registered_info_msg->header.stamp = registered_msg->header.stamp; + + pub_registered_.publish(registered_msg, registered_info_msg); +} + +template +void RegisterNodelet::convert(const sensor_msgs::ImageConstPtr& depth_msg, + const sensor_msgs::ImagePtr& registered_msg, + const Eigen::Affine3d& depth_to_rgb) +{ + // Allocate memory for registered depth image + registered_msg->step = registered_msg->width * sizeof(T); + registered_msg->data.resize( registered_msg->height * registered_msg->step ); + // data is already zero-filled in the uint16 case, but for floats we want to initialize everything to NaN. + DepthTraits::initializeBuffer(registered_msg->data); + + // Extract all the parameters we need + double inv_depth_fx = 1.0 / depth_model_.fx(); + double inv_depth_fy = 1.0 / depth_model_.fy(); + double depth_cx = depth_model_.cx(), depth_cy = depth_model_.cy(); + double depth_Tx = depth_model_.Tx(), depth_Ty = depth_model_.Ty(); + double rgb_fx = rgb_model_.fx(), rgb_fy = rgb_model_.fy(); + double rgb_cx = rgb_model_.cx(), rgb_cy = rgb_model_.cy(); + double rgb_Tx = rgb_model_.Tx(), rgb_Ty = rgb_model_.Ty(); + + // Transform the depth values into the RGB frame + /// @todo When RGB is higher res, interpolate by rasterizing depth triangles onto the registered image + const T* depth_row = reinterpret_cast(&depth_msg->data[0]); + int row_step = depth_msg->step / sizeof(T); + T* registered_data = reinterpret_cast(®istered_msg->data[0]); + int raw_index = 0; + for (unsigned v = 0; v < depth_msg->height; ++v, depth_row += row_step) + { + for (unsigned u = 0; u < depth_msg->width; ++u, ++raw_index) + { + T raw_depth = depth_row[u]; + if (!DepthTraits::valid(raw_depth)) + continue; + + double depth = DepthTraits::toMeters(raw_depth); + + /// @todo Combine all operations into one matrix multiply on (u,v,d) + // Reproject (u,v,Z) to (X,Y,Z,1) in depth camera frame + Eigen::Vector4d xyz_depth; + xyz_depth << ((u - depth_cx)*depth - depth_Tx) * inv_depth_fx, + ((v - depth_cy)*depth - depth_Ty) * inv_depth_fy, + depth, + 1; + + // Transform to RGB camera frame + Eigen::Vector4d xyz_rgb = depth_to_rgb * xyz_depth; + + // Project to (u,v) in RGB image + double inv_Z = 1.0 / xyz_rgb.z(); + int u_rgb = (rgb_fx*xyz_rgb.x() + rgb_Tx)*inv_Z + rgb_cx + 0.5; + int v_rgb = (rgb_fy*xyz_rgb.y() + rgb_Ty)*inv_Z + rgb_cy + 0.5; + + if (u_rgb < 0 || u_rgb >= (int)registered_msg->width || + v_rgb < 0 || v_rgb >= (int)registered_msg->height) + continue; + + T& reg_depth = registered_data[v_rgb*registered_msg->width + u_rgb]; + T new_depth = DepthTraits::fromMeters(xyz_rgb.z()); + // Validity and Z-buffer checks + if (!DepthTraits::valid(reg_depth) || reg_depth > new_depth) + reg_depth = new_depth; + } + } +} + +} // namespace depth_image_proc + +// Register as nodelet +#include +PLUGINLIB_EXPORT_CLASS(depth_image_proc::RegisterNodelet,nodelet::Nodelet); diff --git a/future pkg/image_pipeline/image_pipeline/.tar b/future pkg/image_pipeline/image_pipeline/.tar new file mode 100644 index 0000000..655c895 --- /dev/null +++ b/future pkg/image_pipeline/image_pipeline/.tar @@ -0,0 +1,2 @@ +{!!python/unicode 'url': 'https://github.com/ros-gbp/image_pipeline-release/archive/release/kinetic/image_pipeline/1.12.23-0.tar.gz', + !!python/unicode 'version': image_pipeline-release-release-kinetic-image_pipeline-1.12.23-0} diff --git a/future pkg/image_pipeline/image_pipeline/CHANGELOG.rst b/future pkg/image_pipeline/image_pipeline/CHANGELOG.rst new file mode 100644 index 0000000..8e0904f --- /dev/null +++ b/future pkg/image_pipeline/image_pipeline/CHANGELOG.rst @@ -0,0 +1,68 @@ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Changelog for package image_pipeline +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +1.12.23 (2018-05-10) +-------------------- + +1.12.22 (2017-12-08) +-------------------- + +1.12.21 (2017-11-05) +-------------------- + +1.12.20 (2017-04-30) +-------------------- +* Update package.xml (`#263 `_) +* Contributors: Kei Okada + +1.12.19 (2016-07-24) +-------------------- + +1.12.18 (2016-07-12) +-------------------- + +1.12.17 (2016-07-11) +-------------------- + +1.12.16 (2016-03-19) +-------------------- + +1.12.15 (2016-01-17) +-------------------- + +1.12.14 (2015-07-22) +-------------------- + +1.12.13 (2015-04-06) +-------------------- + +1.12.12 (2014-12-31) +-------------------- + +1.12.11 (2014-10-26) +-------------------- + +1.12.10 (2014-09-28) +-------------------- + +1.12.9 (2014-09-21) +------------------- + +1.12.8 (2014-08-19) +------------------- + +1.12.6 (2014-07-27) +------------------- + +1.12.4 (2014-04-28) +------------------- + +1.12.3 (2014-04-12) +------------------- + +1.12.2 (2014-04-08) +------------------- + +1.11.7 (2014-03-28) +------------------- diff --git a/future pkg/image_pipeline/image_pipeline/CMakeLists.txt b/future pkg/image_pipeline/image_pipeline/CMakeLists.txt new file mode 100644 index 0000000..69f7132 --- /dev/null +++ b/future pkg/image_pipeline/image_pipeline/CMakeLists.txt @@ -0,0 +1,4 @@ +cmake_minimum_required(VERSION 2.8.3) +project(image_pipeline) +find_package(catkin REQUIRED) +catkin_metapackage() diff --git a/future pkg/image_pipeline/image_pipeline/package.xml b/future pkg/image_pipeline/image_pipeline/package.xml new file mode 100644 index 0000000..6dacd33 --- /dev/null +++ b/future pkg/image_pipeline/image_pipeline/package.xml @@ -0,0 +1,29 @@ + + image_pipeline + 1.12.23 + image_pipeline fills the gap between getting raw images from a camera driver and higher-level vision processing. + Patrick Mihelich + James Bowman + Vincent Rabaud + BSD + + http://www.ros.org/wiki/image_pipeline + https://github.com/ros-perception/image_pipeline/issues + https://github.com/ros-perception/image_pipeline + + catkin + + + camera_calibration + depth_image_proc + image_proc + image_publisher + image_rotate + image_view + stereo_image_proc + + + + + + diff --git a/future pkg/image_pipeline/image_proc/.tar b/future pkg/image_pipeline/image_proc/.tar new file mode 100644 index 0000000..eb6fe5b --- /dev/null +++ b/future pkg/image_pipeline/image_proc/.tar @@ -0,0 +1,2 @@ +{!!python/unicode 'url': 'https://github.com/ros-gbp/image_pipeline-release/archive/release/kinetic/image_proc/1.12.23-0.tar.gz', + !!python/unicode 'version': image_pipeline-release-release-kinetic-image_proc-1.12.23-0} diff --git a/future pkg/image_pipeline/image_proc/CHANGELOG.rst b/future pkg/image_pipeline/image_proc/CHANGELOG.rst new file mode 100644 index 0000000..6275315 --- /dev/null +++ b/future pkg/image_pipeline/image_proc/CHANGELOG.rst @@ -0,0 +1,119 @@ +1.12.23 (2018-05-10) +-------------------- + +1.12.22 (2017-12-08) +-------------------- +* Merge pull request `#311 `_ from knorth55/revert-299 + Revert "Fix image_resize nodelet (`#299 `_)" + This reverts commit 32e19697ebce47101b063c6a02b95dfa2c5dbc52. +* Contributors: Shingo Kitagawa, Tully Foote + +1.12.21 (2017-11-05) +-------------------- +* Fix image_resize nodelet (`#299 `_) + Update interpolation types + Add arguments to enable disable each nodelet + Add default arguments for image_resize and image_rect + Use toCVShare instead of toCVCopy + Include image_resize in image_proc +* Updated fix for traits change. (`#303 `_) +* Fix C++11 compilation + This fixes `#292 `_ and `#291 `_ +* [image_proc][crop_decimate] support changing target image frame_id (`#276 `_) +* Contributors: Furushchev, Mike Purvis, Vincent Rabaud, bikramak + +1.12.20 (2017-04-30) +-------------------- +* Add nodelet to resize image and camera_info (`#273 `_) + * Add nodelet to resize image and camera_info + * Depends on nodelet_topic_tools + * Use recursive_mutex for mutex guard for dynamic reconfiguring +* Fix nodelet name: crop_nonZero -> crop_non_zero (`#269 `_) + Fix https://github.com/ros-perception/image_pipeline/issues/217 +* Fix permission of executable files unexpectedly (`#260 `_) +* address gcc6 build error + With gcc6, compiling fails with `stdlib.h: No such file or directory`, + as including '-isystem /usr/include' breaks with gcc6, cf., + https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70129. + This commit addresses this issue for this package in the same way + it was addressed in various other ROS packages. A list of related + commits and pull requests is at: + https://github.com/ros/rosdistro/issues/12783 + Signed-off-by: Lukas Bulwahn +* Contributors: Kentaro Wada, Lukas Bulwahn + +1.12.19 (2016-07-24) +-------------------- + +1.12.18 (2016-07-12) +-------------------- + +1.12.17 (2016-07-11) +-------------------- + +1.12.16 (2016-03-19) +-------------------- +* clean OpenCV dependency in package.xml +* issue `#180 `_ Check if all distortion coefficients are zero. + Test with: + rostest --reuse-master --text image_proc test_rectify.xml + Can also test interactively with vimjay image_rect.launch, which brings up an rqt gui and camera info distortion coefficients can be dynamically reconfigured. +* Add a feature to crop the largest valid (non zero) area + Remove unnecessary headers + change a filename to fit for the ROS convention +* Contributors: Kenta Yonekura, Lucas Walter, Vincent Rabaud + +1.12.15 (2016-01-17) +-------------------- +* simplify OpenCV3 conversion +* Contributors: Vincent Rabaud + +1.12.14 (2015-07-22) +-------------------- + +1.12.13 (2015-04-06) +-------------------- +* fix dependencies +* Contributors: Vincent Rabaud + +1.12.12 (2014-12-31) +-------------------- + +1.12.11 (2014-10-26) +-------------------- + +1.12.10 (2014-09-28) +-------------------- + +1.12.9 (2014-09-21) +------------------- +* get code to compile with OpenCV3 + fixes `#96 `_ +* Contributors: Vincent Rabaud + +1.12.8 (2014-08-19) +------------------- + +1.12.6 (2014-07-27) +------------------- + +1.12.4 (2014-04-28) +------------------- + +1.12.3 (2014-04-12) +------------------- + +1.12.2 (2014-04-08) +------------------- + +1.12.1 (2014-04-06) +------------------- +* get proper opencv dependency +* Contributors: Vincent Rabaud + +1.11.7 (2014-03-28) +------------------- + +1.11.6 (2014-01-29 00:38:55 +0100) +---------------------------------- +- fix bad OpenCV linkage (#53) diff --git a/future pkg/image_pipeline/image_proc/CMakeLists.txt b/future pkg/image_pipeline/image_proc/CMakeLists.txt new file mode 100644 index 0000000..be750ce --- /dev/null +++ b/future pkg/image_pipeline/image_proc/CMakeLists.txt @@ -0,0 +1,68 @@ +cmake_minimum_required(VERSION 2.8) +project(image_proc) + +find_package(catkin REQUIRED) + +find_package(catkin REQUIRED cv_bridge dynamic_reconfigure image_geometry image_transport nodelet nodelet_topic_tools roscpp sensor_msgs) +find_package(OpenCV REQUIRED) +find_package(Boost REQUIRED COMPONENTS thread) + +if(cv_bridge_VERSION VERSION_GREATER "1.12.0") + add_compile_options(-std=c++11) +endif() + +# Dynamic reconfigure support +generate_dynamic_reconfigure_options(cfg/CropDecimate.cfg cfg/Debayer.cfg cfg/Rectify.cfg cfg/Resize.cfg) + +catkin_package( + CATKIN_DEPENDS image_geometry roscpp sensor_msgs + DEPENDS OpenCV + INCLUDE_DIRS include + LIBRARIES ${PROJECT_NAME} +) + +include_directories(include ${catkin_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS}) + +# Temporary fix for DataType deprecation in OpenCV 3.3.1. We continue to use the deprecated form for now because +# the new one is not available in OpenCV 2.4 (on Trusty). +add_definitions(-DOPENCV_TRAITS_ENABLE_DEPRECATED) + +# Nodelet library +add_library(${PROJECT_NAME} src/libimage_proc/processor.cpp + src/nodelets/debayer.cpp + src/nodelets/rectify.cpp + src/nodelets/resize.cpp + src/nodelets/crop_decimate.cpp + src/libimage_proc/advertisement_checker.cpp + src/nodelets/edge_aware.cpp + src/nodelets/crop_non_zero.cpp +) +add_dependencies(${PROJECT_NAME} ${PROJECT_NAME}_gencfg) +target_link_libraries(${PROJECT_NAME} ${catkin_LIBRARIES} ${Boost_LIBRARIES} ${OpenCV_LIBRARIES}) + +install(TARGETS ${PROJECT_NAME} + DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +) +install(DIRECTORY include/${PROJECT_NAME}/ + DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} +) +install(FILES nodelet_plugins.xml + DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} +) + +# Standalone node +add_executable(image_proc_exe src/nodes/image_proc.cpp) +target_link_libraries(image_proc_exe ${PROJECT_NAME} ${Boost_LIBRARIES} ${OpenCV_LIBRARIES}) +SET_TARGET_PROPERTIES(image_proc_exe PROPERTIES OUTPUT_NAME image_proc) +install(TARGETS image_proc_exe + DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +) + +# install the launch file +install(DIRECTORY launch + DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}/ +) + +if(CATKIN_ENABLE_TESTING) + add_subdirectory(test) +endif() diff --git a/future pkg/image_pipeline/image_proc/cfg/CropDecimate.cfg b/future pkg/image_pipeline/image_proc/cfg/CropDecimate.cfg new file mode 100755 index 0000000..e00ff75 --- /dev/null +++ b/future pkg/image_pipeline/image_proc/cfg/CropDecimate.cfg @@ -0,0 +1,35 @@ +#! /usr/bin/env python + +PACKAGE='image_proc' + +from dynamic_reconfigure.parameter_generator_catkin import * + +gen = ParameterGenerator() + +# Decimation parameters +gen.add("decimation_x", int_t, 0, "Number of pixels to decimate to one horizontally", 1, 1, 16) +gen.add("decimation_y", int_t, 0, "Number of pixels to decimate to one vertically", 1, 1, 16) + +# ROI parameters +# Maximums are arbitrary set to the resolution of a 5Mp Prosilica, since we can't set +# the dynamically. +gen.add("x_offset", int_t, 0, "X offset of the region of interest", 0, 0, 2447) +gen.add("y_offset", int_t, 0, "Y offset of the region of interest", 0, 0, 2049) +gen.add("width", int_t, 0, "Width of the region of interest", 0, 0, 2448) +gen.add("height", int_t, 0, "Height of the region of interest", 0, 0, 2050) + +interpolate_enum = gen.enum([ gen.const("NN", int_t, 0, "Nearest-neighbor sampling"), + gen.const("Linear", int_t, 1, "Bilinear interpolation"), + gen.const("Cubic", int_t, 2, "Bicubic interpolation over 4x4 neighborhood"), + gen.const("Area", int_t, 3, "Resampling using pixel area relation"), + gen.const("Lanczos4", int_t, 4, "Lanczos interpolation over 8x8 neighborhood")], + "interpolation type") + +gen.add("interpolation", int_t, 0, + "Sampling algorithm", + 0, 0, 4, edit_method = interpolate_enum) + +# First string value is node name, used only for generating documentation +# Second string value ("CropDecimate") is name of class and generated +# .h file, with "Config" added, so class CropDecimateConfig +exit(gen.generate(PACKAGE, "image_proc", "CropDecimate")) diff --git a/future pkg/image_pipeline/image_proc/cfg/Debayer.cfg b/future pkg/image_pipeline/image_proc/cfg/Debayer.cfg new file mode 100755 index 0000000..b5bc6d9 --- /dev/null +++ b/future pkg/image_pipeline/image_proc/cfg/Debayer.cfg @@ -0,0 +1,26 @@ +#! /usr/bin/env python + +PACKAGE='image_proc' + +from dynamic_reconfigure.parameter_generator_catkin import * + +gen = ParameterGenerator() + +debayer_enum = gen.enum([ gen.const("Bilinear", int_t, 0, + "Fast algorithm using bilinear interpolation"), + gen.const("EdgeAware", int_t, 1, + "Edge-aware algorithm"), + gen.const("EdgeAwareWeighted", int_t, 2, + "Weighted edge-aware algorithm"), + gen.const("VNG", int_t, 3, + "Slow but high quality Variable Number of Gradients algorithm")], + "Debayering algorithm") + +gen.add("debayer", int_t, 0, + "Debayering algorithm", + 0, 0, 3, edit_method = debayer_enum) + +# First string value is node name, used only for generating documentation +# Second string value ("Debayer") is name of class and generated +# .h file, with "Config" added, so class DebayerConfig +exit(gen.generate(PACKAGE, "image_proc", "Debayer")) diff --git a/future pkg/image_pipeline/image_proc/cfg/Rectify.cfg b/future pkg/image_pipeline/image_proc/cfg/Rectify.cfg new file mode 100755 index 0000000..c264cd2 --- /dev/null +++ b/future pkg/image_pipeline/image_proc/cfg/Rectify.cfg @@ -0,0 +1,22 @@ +#! /usr/bin/env python + +PACKAGE='image_proc' + +from dynamic_reconfigure.parameter_generator_catkin import * + +gen = ParameterGenerator() + +interpolate_enum = gen.enum([ gen.const("NN", int_t, 0, "Nearest neighbor"), + gen.const("Linear", int_t, 1, "Linear"), + gen.const("Cubic", int_t, 2, "Cubic"), + gen.const("Lanczos4", int_t, 4, "Lanczos4")], + "interpolation type") + +gen.add("interpolation", int_t, 0, + "Interpolation algorithm between source image pixels", + 1, 0, 4, edit_method = interpolate_enum) + +# First string value is node name, used only for generating documentation +# Second string value ("Rectify") is name of class and generated +# .h file, with "Config" added, so class RectifyConfig +exit(gen.generate(PACKAGE, "image_proc", "Rectify")) diff --git a/future pkg/image_pipeline/image_proc/cfg/Resize.cfg b/future pkg/image_pipeline/image_proc/cfg/Resize.cfg new file mode 100755 index 0000000..a299654 --- /dev/null +++ b/future pkg/image_pipeline/image_proc/cfg/Resize.cfg @@ -0,0 +1,37 @@ +#! /usr/bin/env python + +from dynamic_reconfigure.parameter_generator_catkin import * + + +PACKAGE = 'image_proc' +ID = 'Resize' + +gen = ParameterGenerator() + +interpolate_enum = gen.enum([gen.const('NN', int_t, 0, 'Nearest neighbor'), + gen.const('Linear', int_t, 1, 'Linear'), + gen.const('Cubic', int_t, 2, 'Cubic'), + gen.const('Lanczos4', int_t, 4, 'Lanczos4')], + 'interpolation type') +gen.add('interpolation', int_t, level=0, + description='Interpolation algorithm between source image pixels', + default=1, min=0, max=4, edit_method=interpolate_enum) + +gen.add('use_scale', bool_t, level=0, + description='Flag to use scale instead of static size.', + default=True) +gen.add('scale_height', double_t, level=0, + description='Scale of height.', + default=1, min=0, max=10) +gen.add('scale_width', double_t, level=0, + description='Scale of width', + default=1, min=0, max=10) + +gen.add('height', int_t, level=0, + description='Destination height. Ignored if negative.', + default=-1, min=-1) +gen.add('width', int_t, level=0, + description='Destination width. Ignored if negative.', + default=-1, min=-1) + +exit(gen.generate(PACKAGE, PACKAGE, ID)) diff --git a/future pkg/image_pipeline/image_proc/include/image_proc/advertisement_checker.h b/future pkg/image_pipeline/image_proc/include/image_proc/advertisement_checker.h new file mode 100644 index 0000000..cb7e64b --- /dev/null +++ b/future pkg/image_pipeline/image_proc/include/image_proc/advertisement_checker.h @@ -0,0 +1,61 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#ifndef IMAGE_PROC_ADVERTISEMENT_CHECKER_H +#define IMAGE_PROC_ADVERTISEMENT_CHECKER_H + +#include + +namespace image_proc { + +class AdvertisementChecker +{ + ros::NodeHandle nh_; + std::string name_; + ros::WallTimer timer_; + ros::V_string topics_; + + void timerCb(); + +public: + AdvertisementChecker(const ros::NodeHandle& nh = ros::NodeHandle(), + const std::string& name = std::string()); + + void start(const ros::V_string& topics, double duration); + + void stop(); +}; + +} // namespace image_proc + +#endif diff --git a/future pkg/image_pipeline/image_proc/include/image_proc/processor.h b/future pkg/image_pipeline/image_proc/include/image_proc/processor.h new file mode 100644 index 0000000..df2d3e4 --- /dev/null +++ b/future pkg/image_pipeline/image_proc/include/image_proc/processor.h @@ -0,0 +1,77 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#ifndef IMAGE_PROC_PROCESSOR_H +#define IMAGE_PROC_PROCESSOR_H + +#include +#include +#include + +namespace image_proc { + +struct ImageSet +{ + std::string color_encoding; + cv::Mat mono; + cv::Mat rect; + cv::Mat color; + cv::Mat rect_color; +}; + +class Processor +{ +public: + Processor() + : interpolation_(cv::INTER_LINEAR) + { + } + + int interpolation_; + + enum { + MONO = 1 << 0, + RECT = 1 << 1, + COLOR = 1 << 2, + RECT_COLOR = 1 << 3, + ALL = MONO | RECT | COLOR | RECT_COLOR + }; + + bool process(const sensor_msgs::ImageConstPtr& raw_image, + const image_geometry::PinholeCameraModel& model, + ImageSet& output, int flags = ALL) const; +}; + +} //namespace image_proc + +#endif diff --git a/future pkg/image_pipeline/image_proc/launch/image_proc.launch b/future pkg/image_pipeline/image_proc/launch/image_proc.launch new file mode 100644 index 0000000..bc321f0 --- /dev/null +++ b/future pkg/image_pipeline/image_proc/launch/image_proc.launch @@ -0,0 +1,29 @@ + + + + + + + + + + + + + + + + + + + + + + + diff --git a/future pkg/image_pipeline/image_proc/mainpage.dox b/future pkg/image_pipeline/image_proc/mainpage.dox new file mode 100644 index 0000000..ee611a5 --- /dev/null +++ b/future pkg/image_pipeline/image_proc/mainpage.dox @@ -0,0 +1,12 @@ +/** +@mainpage +@htmlinclude manifest.html + +@b image_proc provides a node for performing single image rectification and +color processing on the raw images produced by a camera. The outputs of +image_proc are suitable for visual processing by other nodes. See +http://www.ros.org/wiki/image_proc for documentation. + +Currently this package has no public code API. + +*/ diff --git a/future pkg/image_pipeline/image_proc/nodelet_plugins.xml b/future pkg/image_pipeline/image_proc/nodelet_plugins.xml new file mode 100644 index 0000000..56835b2 --- /dev/null +++ b/future pkg/image_pipeline/image_proc/nodelet_plugins.xml @@ -0,0 +1,48 @@ + + + + + Nodelet to debayer (if needed) a raw camera image stream. + + + + + + Nodelet to rectify an unrectified camera image stream. + + + + + + Nodelet to resize image and camera_info. + + + + + + Nodelet to apply decimation (software binning) and ROI to a raw camera + image post-capture. + + + + + + + + Nodelet to crop the largest valid (Non Zero) area of the image. + + + + diff --git a/future pkg/image_pipeline/image_proc/package.xml b/future pkg/image_pipeline/image_proc/package.xml new file mode 100644 index 0000000..315d31e --- /dev/null +++ b/future pkg/image_pipeline/image_proc/package.xml @@ -0,0 +1,39 @@ + + image_proc + 1.12.23 + Single image rectification and color processing. + Patrick Mihelich + Kurt Konolige + Jeremy Leibs + Vincent Rabaud + BSD + http://www.ros.org/wiki/image_proc + + + + + + catkin + + rostest + camera_calibration_parsers + + boost + cv_bridge + dynamic_reconfigure + image_geometry + image_transport + nodelet + nodelet_topic_tools + roscpp + sensor_msgs + + cv_bridge + dynamic_reconfigure + image_geometry + image_transport + nodelet + nodelet_topic_tools + roscpp + sensor_msgs + diff --git a/future pkg/image_pipeline/image_proc/src/libimage_proc/advertisement_checker.cpp b/future pkg/image_pipeline/image_proc/src/libimage_proc/advertisement_checker.cpp new file mode 100644 index 0000000..ee1b17a --- /dev/null +++ b/future pkg/image_pipeline/image_proc/src/libimage_proc/advertisement_checker.cpp @@ -0,0 +1,92 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include "image_proc/advertisement_checker.h" +#include + +namespace image_proc { + +AdvertisementChecker::AdvertisementChecker(const ros::NodeHandle& nh, + const std::string& name) + : nh_(nh), + name_(name) +{ +} + +void AdvertisementChecker::timerCb() +{ + ros::master::V_TopicInfo topic_info; + if (!ros::master::getTopics(topic_info)) return; + + ros::V_string::iterator topic_it = topics_.begin(); + while (topic_it != topics_.end()) + { + // Should use std::find_if + bool found = false; + ros::master::V_TopicInfo::iterator info_it = topic_info.begin(); + while (!found && info_it != topic_info.end()) + { + found = (*topic_it == info_it->name); + ++info_it; + } + if (found) + topic_it = topics_.erase(topic_it); + else + { + ROS_WARN_NAMED(name_, "The input topic '%s' is not yet advertised", topic_it->c_str()); + ++topic_it; + } + } + + if (topics_.empty()) + stop(); +} + +void AdvertisementChecker::start(const ros::V_string& topics, double duration) +{ + topics_.clear(); + BOOST_FOREACH(const std::string& topic, topics) + topics_.push_back(nh_.resolveName(topic)); + + ros::NodeHandle nh; + timer_ = nh.createWallTimer(ros::WallDuration(duration), + boost::bind(&AdvertisementChecker::timerCb, this)); + timerCb(); +} + +void AdvertisementChecker::stop() +{ + timer_.stop(); +} + +} // namespace image_proc diff --git a/future pkg/image_pipeline/image_proc/src/libimage_proc/processor.cpp b/future pkg/image_pipeline/image_proc/src/libimage_proc/processor.cpp new file mode 100644 index 0000000..254b90a --- /dev/null +++ b/future pkg/image_pipeline/image_proc/src/libimage_proc/processor.cpp @@ -0,0 +1,130 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include "image_proc/processor.h" +#include +#include + +namespace image_proc { + +namespace enc = sensor_msgs::image_encodings; + +bool Processor::process(const sensor_msgs::ImageConstPtr& raw_image, + const image_geometry::PinholeCameraModel& model, + ImageSet& output, int flags) const +{ + static const int MONO_EITHER = MONO | RECT; + static const int COLOR_EITHER = COLOR | RECT_COLOR; + if (!(flags & ALL)) return true; + + // Check if raw_image is color + const std::string& raw_encoding = raw_image->encoding; + int raw_type = CV_8UC1; + if (raw_encoding == enc::BGR8 || raw_encoding == enc::RGB8) { + raw_type = CV_8UC3; + output.color_encoding = raw_encoding; + } + // Construct cv::Mat pointing to raw_image data + const cv::Mat raw(raw_image->height, raw_image->width, raw_type, + const_cast(&raw_image->data[0]), raw_image->step); + + /////////////////////////////////////////////////////// + // Construct colorized (unrectified) images from raw // + /////////////////////////////////////////////////////// + + // Bayer case + if (raw_encoding.find("bayer") != std::string::npos) { + // Convert to color BGR + /// @todo Faster to convert directly to mono when color is not requested, but OpenCV doesn't support + int code = 0; + if (raw_encoding == enc::BAYER_RGGB8) + code = cv::COLOR_BayerBG2BGR; + else if (raw_encoding == enc::BAYER_BGGR8) + code = cv::COLOR_BayerRG2BGR; + else if (raw_encoding == enc::BAYER_GBRG8) + code = cv::COLOR_BayerGR2BGR; + else if (raw_encoding == enc::BAYER_GRBG8) + code = cv::COLOR_BayerGB2BGR; + else { + ROS_ERROR("[image_proc] Unsupported encoding '%s'", raw_encoding.c_str()); + return false; + } + cv::cvtColor(raw, output.color, code); + output.color_encoding = enc::BGR8; + + if (flags & MONO_EITHER) + cv::cvtColor(output.color, output.mono, cv::COLOR_BGR2GRAY); + } + // Color case + else if (raw_type == CV_8UC3) { + output.color = raw; + if (flags & MONO_EITHER) { + int code = (raw_encoding == enc::BGR8) ? cv::COLOR_BGR2GRAY : cv::COLOR_RGB2GRAY; + cv::cvtColor(output.color, output.mono, code); + } + } + // Mono case + else if (raw_encoding == enc::MONO8) { + output.mono = raw; + if (flags & COLOR_EITHER) { + output.color_encoding = enc::MONO8; + output.color = raw; + } + } + // 8UC3 does not specify a color encoding. Is it BGR, RGB, HSV, XYZ, LUV...? + else if (raw_encoding == enc::TYPE_8UC3) { + ROS_ERROR("[image_proc] Ambiguous encoding '8UC3'. The camera driver " + "should set the encoding to 'bgr8' or 'rgb8'."); + return false; + } + // Something else we can't handle + else { + ROS_ERROR("[image_proc] Unsupported encoding '%s'", raw_encoding.c_str()); + return false; + } + + ////////////////////////////////////////////////////// + // Construct rectified images from colorized images // + ////////////////////////////////////////////////////// + + /// @todo If no distortion, could just point to the colorized data. But copy is + /// already way faster than remap. + if (flags & RECT) + model.rectifyImage(output.mono, output.rect, interpolation_); + if (flags & RECT_COLOR) + model.rectifyImage(output.color, output.rect_color, interpolation_); + + return true; +} + +} //namespace image_proc diff --git a/future pkg/image_pipeline/image_proc/src/nodelets/crop_decimate.cpp b/future pkg/image_pipeline/image_proc/src/nodelets/crop_decimate.cpp new file mode 100644 index 0000000..46a3291 --- /dev/null +++ b/future pkg/image_pipeline/image_proc/src/nodelets/crop_decimate.cpp @@ -0,0 +1,353 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include +#if ((BOOST_VERSION / 100) % 1000) >= 53 +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace image_proc { + +using namespace cv_bridge; // CvImage, toCvShare + +class CropDecimateNodelet : public nodelet::Nodelet +{ + // ROS communication + boost::shared_ptr it_in_, it_out_; + image_transport::CameraSubscriber sub_; + int queue_size_; + std::string target_frame_id_; + + boost::mutex connect_mutex_; + image_transport::CameraPublisher pub_; + + // Dynamic reconfigure + boost::recursive_mutex config_mutex_; + typedef image_proc::CropDecimateConfig Config; + typedef dynamic_reconfigure::Server ReconfigureServer; + boost::shared_ptr reconfigure_server_; + Config config_; + + virtual void onInit(); + + void connectCb(); + + void imageCb(const sensor_msgs::ImageConstPtr& image_msg, + const sensor_msgs::CameraInfoConstPtr& info_msg); + + void configCb(Config &config, uint32_t level); +}; + +void CropDecimateNodelet::onInit() +{ + ros::NodeHandle& nh = getNodeHandle(); + ros::NodeHandle& private_nh = getPrivateNodeHandle(); + ros::NodeHandle nh_in (nh, "camera"); + ros::NodeHandle nh_out(nh, "camera_out"); + it_in_ .reset(new image_transport::ImageTransport(nh_in)); + it_out_.reset(new image_transport::ImageTransport(nh_out)); + + // Read parameters + private_nh.param("queue_size", queue_size_, 5); + private_nh.param("target_frame_id", target_frame_id_, std::string()); + + // Set up dynamic reconfigure + reconfigure_server_.reset(new ReconfigureServer(config_mutex_, private_nh)); + ReconfigureServer::CallbackType f = boost::bind(&CropDecimateNodelet::configCb, this, _1, _2); + reconfigure_server_->setCallback(f); + + // Monitor whether anyone is subscribed to the output + image_transport::SubscriberStatusCallback connect_cb = boost::bind(&CropDecimateNodelet::connectCb, this); + ros::SubscriberStatusCallback connect_cb_info = boost::bind(&CropDecimateNodelet::connectCb, this); + // Make sure we don't enter connectCb() between advertising and assigning to pub_ + boost::lock_guard lock(connect_mutex_); + pub_ = it_out_->advertiseCamera("image_raw", 1, connect_cb, connect_cb, connect_cb_info, connect_cb_info); +} + +// Handles (un)subscribing when clients (un)subscribe +void CropDecimateNodelet::connectCb() +{ + boost::lock_guard lock(connect_mutex_); + if (pub_.getNumSubscribers() == 0) + sub_.shutdown(); + else if (!sub_) + { + image_transport::TransportHints hints("raw", ros::TransportHints(), getPrivateNodeHandle()); + sub_ = it_in_->subscribeCamera("image_raw", queue_size_, &CropDecimateNodelet::imageCb, this, hints); + } +} + +template +void debayer2x2toBGR(const cv::Mat& src, cv::Mat& dst, int R, int G1, int G2, int B) +{ + typedef cv::Vec DstPixel; // 8- or 16-bit BGR + dst.create(src.rows / 2, src.cols / 2, cv::DataType::type); + + int src_row_step = src.step1(); + int dst_row_step = dst.step1(); + const T* src_row = src.ptr(); + T* dst_row = dst.ptr(); + + // 2x2 downsample and debayer at once + for (int y = 0; y < dst.rows; ++y) + { + for (int x = 0; x < dst.cols; ++x) + { + dst_row[x*3 + 0] = src_row[x*2 + B]; + dst_row[x*3 + 1] = (src_row[x*2 + G1] + src_row[x*2 + G2]) / 2; + dst_row[x*3 + 2] = src_row[x*2 + R]; + } + src_row += src_row_step * 2; + dst_row += dst_row_step; + } +} + +// Templated on pixel size, in bytes (MONO8 = 1, BGR8 = 3, RGBA16 = 8, ...) +template +void decimate(const cv::Mat& src, cv::Mat& dst, int decimation_x, int decimation_y) +{ + dst.create(src.rows / decimation_y, src.cols / decimation_x, src.type()); + + int src_row_step = src.step[0] * decimation_y; + int src_pixel_step = N * decimation_x; + int dst_row_step = dst.step[0]; + + const uint8_t* src_row = src.ptr(); + uint8_t* dst_row = dst.ptr(); + + for (int y = 0; y < dst.rows; ++y) + { + const uint8_t* src_pixel = src_row; + uint8_t* dst_pixel = dst_row; + for (int x = 0; x < dst.cols; ++x) + { + memcpy(dst_pixel, src_pixel, N); // Should inline with small, fixed N + src_pixel += src_pixel_step; + dst_pixel += N; + } + src_row += src_row_step; + dst_row += dst_row_step; + } +} + +void CropDecimateNodelet::imageCb(const sensor_msgs::ImageConstPtr& image_msg, + const sensor_msgs::CameraInfoConstPtr& info_msg) +{ + /// @todo Check image dimensions match info_msg + /// @todo Publish tweaks to config_ so they appear in reconfigure_gui + + Config config; + { + boost::lock_guard lock(config_mutex_); + config = config_; + } + int decimation_x = config.decimation_x; + int decimation_y = config.decimation_y; + + // Compute the ROI we'll actually use + bool is_bayer = sensor_msgs::image_encodings::isBayer(image_msg->encoding); + if (is_bayer) + { + // Odd offsets for Bayer images basically change the Bayer pattern, but that's + // unnecessarily complicated to support + config.x_offset &= ~0x1; + config.y_offset &= ~0x1; + config.width &= ~0x1; + config.height &= ~0x1; + } + + int max_width = image_msg->width - config.x_offset; + int max_height = image_msg->height - config.y_offset; + int width = config.width; + int height = config.height; + if (width == 0 || width > max_width) + width = max_width; + if (height == 0 || height > max_height) + height = max_height; + + // On no-op, just pass the messages along + if (decimation_x == 1 && + decimation_y == 1 && + config.x_offset == 0 && + config.y_offset == 0 && + width == (int)image_msg->width && + height == (int)image_msg->height) + { + pub_.publish(image_msg, info_msg); + return; + } + + // Get a cv::Mat view of the source data + CvImageConstPtr source = toCvShare(image_msg); + + // Except in Bayer downsampling case, output has same encoding as the input + CvImage output(source->header, source->encoding); + // Apply ROI (no copy, still a view of the image_msg data) + output.image = source->image(cv::Rect(config.x_offset, config.y_offset, width, height)); + + // Special case: when decimating Bayer images, we first do a 2x2 decimation to BGR + if (is_bayer && (decimation_x > 1 || decimation_y > 1)) + { + if (decimation_x % 2 != 0 || decimation_y % 2 != 0) + { + NODELET_ERROR_THROTTLE(2, "Odd decimation not supported for Bayer images"); + return; + } + + cv::Mat bgr; + int step = output.image.step1(); + if (image_msg->encoding == sensor_msgs::image_encodings::BAYER_RGGB8) + debayer2x2toBGR(output.image, bgr, 0, 1, step, step + 1); + else if (image_msg->encoding == sensor_msgs::image_encodings::BAYER_BGGR8) + debayer2x2toBGR(output.image, bgr, step + 1, 1, step, 0); + else if (image_msg->encoding == sensor_msgs::image_encodings::BAYER_GBRG8) + debayer2x2toBGR(output.image, bgr, step, 0, step + 1, 1); + else if (image_msg->encoding == sensor_msgs::image_encodings::BAYER_GRBG8) + debayer2x2toBGR(output.image, bgr, 1, 0, step + 1, step); + else if (image_msg->encoding == sensor_msgs::image_encodings::BAYER_RGGB16) + debayer2x2toBGR(output.image, bgr, 0, 1, step, step + 1); + else if (image_msg->encoding == sensor_msgs::image_encodings::BAYER_BGGR16) + debayer2x2toBGR(output.image, bgr, step + 1, 1, step, 0); + else if (image_msg->encoding == sensor_msgs::image_encodings::BAYER_GBRG16) + debayer2x2toBGR(output.image, bgr, step, 0, step + 1, 1); + else if (image_msg->encoding == sensor_msgs::image_encodings::BAYER_GRBG16) + debayer2x2toBGR(output.image, bgr, 1, 0, step + 1, step); + else + { + NODELET_ERROR_THROTTLE(2, "Unrecognized Bayer encoding '%s'", image_msg->encoding.c_str()); + return; + } + + output.image = bgr; + output.encoding = (bgr.depth() == CV_8U) ? sensor_msgs::image_encodings::BGR8 + : sensor_msgs::image_encodings::BGR16; + decimation_x /= 2; + decimation_y /= 2; + } + + // Apply further downsampling, if necessary + if (decimation_x > 1 || decimation_y > 1) + { + cv::Mat decimated; + + if (config.interpolation == image_proc::CropDecimate_NN) + { + // Use optimized method instead of OpenCV's more general NN resize + int pixel_size = output.image.elemSize(); + switch (pixel_size) + { + // Currently support up through 4-channel float + case 1: + decimate<1>(output.image, decimated, decimation_x, decimation_y); + break; + case 2: + decimate<2>(output.image, decimated, decimation_x, decimation_y); + break; + case 3: + decimate<3>(output.image, decimated, decimation_x, decimation_y); + break; + case 4: + decimate<4>(output.image, decimated, decimation_x, decimation_y); + break; + case 6: + decimate<6>(output.image, decimated, decimation_x, decimation_y); + break; + case 8: + decimate<8>(output.image, decimated, decimation_x, decimation_y); + break; + case 12: + decimate<12>(output.image, decimated, decimation_x, decimation_y); + break; + case 16: + decimate<16>(output.image, decimated, decimation_x, decimation_y); + break; + default: + NODELET_ERROR_THROTTLE(2, "Unsupported pixel size, %d bytes", pixel_size); + return; + } + } + else + { + // Linear, cubic, area, ... + cv::Size size(output.image.cols / decimation_x, output.image.rows / decimation_y); + cv::resize(output.image, decimated, size, 0.0, 0.0, config.interpolation); + } + + output.image = decimated; + } + + // Create output Image message + /// @todo Could save copies by allocating this above and having output.image alias it + sensor_msgs::ImagePtr out_image = output.toImageMsg(); + + // Create updated CameraInfo message + sensor_msgs::CameraInfoPtr out_info = boost::make_shared(*info_msg); + int binning_x = std::max((int)info_msg->binning_x, 1); + int binning_y = std::max((int)info_msg->binning_y, 1); + out_info->binning_x = binning_x * config.decimation_x; + out_info->binning_y = binning_y * config.decimation_y; + out_info->roi.x_offset += config.x_offset * binning_x; + out_info->roi.y_offset += config.y_offset * binning_y; + out_info->roi.height = height * binning_y; + out_info->roi.width = width * binning_x; + // If no ROI specified, leave do_rectify as-is. If ROI specified, set do_rectify = true. + if (width != (int)image_msg->width || height != (int)image_msg->height) + out_info->roi.do_rectify = true; + + if (!target_frame_id_.empty()) { + out_image->header.frame_id = target_frame_id_; + out_info->header.frame_id = target_frame_id_; + } + + pub_.publish(out_image, out_info); +} + +void CropDecimateNodelet::configCb(Config &config, uint32_t level) +{ + config_ = config; +} + +} // namespace image_proc + +// Register nodelet +#include +PLUGINLIB_EXPORT_CLASS( image_proc::CropDecimateNodelet, nodelet::Nodelet) diff --git a/future pkg/image_pipeline/image_proc/src/nodelets/crop_non_zero.cpp b/future pkg/image_pipeline/image_proc/src/nodelets/crop_non_zero.cpp new file mode 100644 index 0000000..4c8cd05 --- /dev/null +++ b/future pkg/image_pipeline/image_proc/src/nodelets/crop_non_zero.cpp @@ -0,0 +1,146 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include +#include +#include +#include +//#include // for std::max_element + +namespace image_proc { + +namespace enc = sensor_msgs::image_encodings; + +class CropNonZeroNodelet : public nodelet::Nodelet +{ + // Subscriptions + boost::shared_ptr it_; + image_transport::Subscriber sub_raw_; + + // Publications + boost::mutex connect_mutex_; + image_transport::Publisher pub_; + + virtual void onInit(); + + void connectCb(); + + void imageCb(const sensor_msgs::ImageConstPtr& raw_msg); +}; + +void CropNonZeroNodelet::onInit() +{ + ros::NodeHandle& nh = getNodeHandle(); + it_.reset(new image_transport::ImageTransport(nh)); + + // Monitor whether anyone is subscribed to the output + image_transport::SubscriberStatusCallback connect_cb = boost::bind(&CropNonZeroNodelet::connectCb, this); + // Make sure we don't enter connectCb() between advertising and assigning to pub_depth_ + boost::lock_guard lock(connect_mutex_); + pub_ = it_->advertise("image", 1, connect_cb, connect_cb); +} + +// Handles (un)subscribing when clients (un)subscribe +void CropNonZeroNodelet::connectCb() +{ + boost::lock_guard lock(connect_mutex_); + if (pub_.getNumSubscribers() == 0) + { + sub_raw_.shutdown(); + } + else if (!sub_raw_) + { + image_transport::TransportHints hints("raw", ros::TransportHints(), getPrivateNodeHandle()); + sub_raw_ = it_->subscribe("image_raw", 1, &CropNonZeroNodelet::imageCb, this, hints); + } +} + +void CropNonZeroNodelet::imageCb(const sensor_msgs::ImageConstPtr& raw_msg) +{ + cv_bridge::CvImagePtr cv_ptr; + try + { + cv_ptr = cv_bridge::toCvCopy(raw_msg); + } + catch (cv_bridge::Exception& e) + { + ROS_ERROR("cv_bridge exception: %s", e.what()); + return; + } + + // Check the number of channels + if(sensor_msgs::image_encodings::numChannels(raw_msg->encoding) != 1){ + NODELET_ERROR_THROTTLE(2, "Only grayscale image is acceptable, got [%s]", raw_msg->encoding.c_str()); + return; + } + + std::vector >cnt; + cv::Mat1b m(raw_msg->width, raw_msg->height); + + if (raw_msg->encoding == enc::TYPE_8UC1){ + m = cv_ptr->image; + }else{ + double minVal, maxVal; + cv::minMaxIdx(cv_ptr->image, &minVal, &maxVal, 0, 0, cv_ptr->image != 0.); + double ra = maxVal - minVal; + + cv_ptr->image.convertTo(m, CV_8U, 255./ra, -minVal*255./ra); + } + + cv::findContours(m, cnt, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE); + + // search the largest area + /* // -std=c++11 + std::vector >::iterator it = std::max_element(cnt.begin(), cnt.end(), [](std::vector a, std::vector b) { + return a.size() < b.size(); + }); + */ + std::vector >::iterator it = cnt.begin(); + for(std::vector >::iterator i=cnt.begin();i!=cnt.end();++i){ + it = (*it).size() < (*i).size() ? i : it; + } + cv::Rect r = cv::boundingRect(cnt[std::distance(cnt.begin(), it)]); + + cv_bridge::CvImage out_msg; + out_msg.header = raw_msg->header; + out_msg.encoding = raw_msg->encoding; + out_msg.image = cv_ptr->image(r); + + pub_.publish(out_msg.toImageMsg()); +} + +} // namespace image_proc + +// Register as nodelet +#include +PLUGINLIB_EXPORT_CLASS(image_proc::CropNonZeroNodelet,nodelet::Nodelet); diff --git a/future pkg/image_pipeline/image_proc/src/nodelets/debayer.cpp b/future pkg/image_pipeline/image_proc/src/nodelets/debayer.cpp new file mode 100644 index 0000000..608cfe4 --- /dev/null +++ b/future pkg/image_pipeline/image_proc/src/nodelets/debayer.cpp @@ -0,0 +1,276 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include +#include +#if ((BOOST_VERSION / 100) % 1000) >= 53 +#include +#endif + +#include +#include +#include +#include +#include +#include + +#include +// Until merged into OpenCV +#include "edge_aware.h" + +#include + +namespace image_proc { + +namespace enc = sensor_msgs::image_encodings; + +class DebayerNodelet : public nodelet::Nodelet +{ + // ROS communication + boost::shared_ptr it_; + image_transport::Subscriber sub_raw_; + + boost::mutex connect_mutex_; + image_transport::Publisher pub_mono_; + image_transport::Publisher pub_color_; + + // Dynamic reconfigure + boost::recursive_mutex config_mutex_; + typedef image_proc::DebayerConfig Config; + typedef dynamic_reconfigure::Server ReconfigureServer; + boost::shared_ptr reconfigure_server_; + Config config_; + + virtual void onInit(); + + void connectCb(); + + void imageCb(const sensor_msgs::ImageConstPtr& raw_msg); + + void configCb(Config &config, uint32_t level); +}; + +void DebayerNodelet::onInit() +{ + ros::NodeHandle &nh = getNodeHandle(); + ros::NodeHandle &private_nh = getPrivateNodeHandle(); + it_.reset(new image_transport::ImageTransport(nh)); + + // Set up dynamic reconfigure + reconfigure_server_.reset(new ReconfigureServer(config_mutex_, private_nh)); + ReconfigureServer::CallbackType f = boost::bind(&DebayerNodelet::configCb, this, _1, _2); + reconfigure_server_->setCallback(f); + + // Monitor whether anyone is subscribed to the output + typedef image_transport::SubscriberStatusCallback ConnectCB; + ConnectCB connect_cb = boost::bind(&DebayerNodelet::connectCb, this); + // Make sure we don't enter connectCb() between advertising and assigning to pub_XXX + boost::lock_guard lock(connect_mutex_); + pub_mono_ = it_->advertise("image_mono", 1, connect_cb, connect_cb); + pub_color_ = it_->advertise("image_color", 1, connect_cb, connect_cb); +} + +// Handles (un)subscribing when clients (un)subscribe +void DebayerNodelet::connectCb() +{ + boost::lock_guard lock(connect_mutex_); + if (pub_mono_.getNumSubscribers() == 0 && pub_color_.getNumSubscribers() == 0) + sub_raw_.shutdown(); + else if (!sub_raw_) + { + image_transport::TransportHints hints("raw", ros::TransportHints(), getPrivateNodeHandle()); + sub_raw_ = it_->subscribe("image_raw", 1, &DebayerNodelet::imageCb, this, hints); + } +} + +void DebayerNodelet::imageCb(const sensor_msgs::ImageConstPtr& raw_msg) +{ + int bit_depth = enc::bitDepth(raw_msg->encoding); + //@todo Fix as soon as bitDepth fixes it + if (raw_msg->encoding == enc::YUV422) + bit_depth = 8; + + // First publish to mono if needed + if (pub_mono_.getNumSubscribers()) + { + if (enc::isMono(raw_msg->encoding)) + pub_mono_.publish(raw_msg); + else + { + if ((bit_depth != 8) && (bit_depth != 16)) + { + NODELET_WARN_THROTTLE(30, + "Raw image data from topic '%s' has unsupported depth: %d", + sub_raw_.getTopic().c_str(), bit_depth); + } else { + // Use cv_bridge to convert to Mono. If a type is not supported, + // it will error out there + sensor_msgs::ImagePtr gray_msg; + try + { + if (bit_depth == 8) + gray_msg = cv_bridge::toCvCopy(raw_msg, enc::MONO8)->toImageMsg(); + else + gray_msg = cv_bridge::toCvCopy(raw_msg, enc::MONO16)->toImageMsg(); + pub_mono_.publish(gray_msg); + } + catch (cv_bridge::Exception &e) + { + NODELET_WARN_THROTTLE(30, "cv_bridge conversion error: '%s'", e.what()); + } + } + } + } + + // Next, publish to color + if (!pub_color_.getNumSubscribers()) + return; + + if (enc::isMono(raw_msg->encoding)) + { + // For monochrome, no processing needed! + pub_color_.publish(raw_msg); + + // Warn if the user asked for color + NODELET_WARN_THROTTLE(30, + "Color topic '%s' requested, but raw image data from topic '%s' is grayscale", + pub_color_.getTopic().c_str(), sub_raw_.getTopic().c_str()); + } + else if (enc::isColor(raw_msg->encoding)) + { + pub_color_.publish(raw_msg); + } + else if (enc::isBayer(raw_msg->encoding)) { + int type = bit_depth == 8 ? CV_8U : CV_16U; + const cv::Mat bayer(raw_msg->height, raw_msg->width, CV_MAKETYPE(type, 1), + const_cast(&raw_msg->data[0]), raw_msg->step); + + sensor_msgs::ImagePtr color_msg = boost::make_shared(); + color_msg->header = raw_msg->header; + color_msg->height = raw_msg->height; + color_msg->width = raw_msg->width; + color_msg->encoding = bit_depth == 8? enc::BGR8 : enc::BGR16; + color_msg->step = color_msg->width * 3 * (bit_depth / 8); + color_msg->data.resize(color_msg->height * color_msg->step); + + cv::Mat color(color_msg->height, color_msg->width, CV_MAKETYPE(type, 3), + &color_msg->data[0], color_msg->step); + + int algorithm; + { + boost::lock_guard lock(config_mutex_); + algorithm = config_.debayer; + } + + if (algorithm == Debayer_EdgeAware || + algorithm == Debayer_EdgeAwareWeighted) + { + // These algorithms are not in OpenCV yet + if (raw_msg->encoding != enc::BAYER_GRBG8) + { + NODELET_WARN_THROTTLE(30, "Edge aware algorithms currently only support GRBG8 Bayer. " + "Falling back to bilinear interpolation."); + algorithm = Debayer_Bilinear; + } + else + { + if (algorithm == Debayer_EdgeAware) + debayerEdgeAware(bayer, color); + else + debayerEdgeAwareWeighted(bayer, color); + } + } + if (algorithm == Debayer_Bilinear || + algorithm == Debayer_VNG) + { + int code = -1; + if (raw_msg->encoding == enc::BAYER_RGGB8 || + raw_msg->encoding == enc::BAYER_RGGB16) + code = cv::COLOR_BayerBG2BGR; + else if (raw_msg->encoding == enc::BAYER_BGGR8 || + raw_msg->encoding == enc::BAYER_BGGR16) + code = cv::COLOR_BayerRG2BGR; + else if (raw_msg->encoding == enc::BAYER_GBRG8 || + raw_msg->encoding == enc::BAYER_GBRG16) + code = cv::COLOR_BayerGR2BGR; + else if (raw_msg->encoding == enc::BAYER_GRBG8 || + raw_msg->encoding == enc::BAYER_GRBG16) + code = cv::COLOR_BayerGB2BGR; + + if (algorithm == Debayer_VNG) + code += cv::COLOR_BayerBG2BGR_VNG - cv::COLOR_BayerBG2BGR; + + cv::cvtColor(bayer, color, code); + } + + pub_color_.publish(color_msg); + } + else if (raw_msg->encoding == enc::YUV422) + { + // Use cv_bridge to convert to BGR8 + sensor_msgs::ImagePtr color_msg; + try + { + color_msg = cv_bridge::toCvCopy(raw_msg, enc::BGR8)->toImageMsg(); + pub_color_.publish(color_msg); + } + catch (cv_bridge::Exception &e) + { + NODELET_WARN_THROTTLE(30, "cv_bridge conversion error: '%s'", e.what()); + } + } + else if (raw_msg->encoding == enc::TYPE_8UC3) + { + // 8UC3 does not specify a color encoding. Is it BGR, RGB, HSV, XYZ, LUV...? + NODELET_ERROR_THROTTLE(10, + "Raw image topic '%s' has ambiguous encoding '8UC3'. The " + "source should set the encoding to 'bgr8' or 'rgb8'.", + sub_raw_.getTopic().c_str()); + } + else + { + NODELET_ERROR_THROTTLE(10, "Raw image topic '%s' has unsupported encoding '%s'", + sub_raw_.getTopic().c_str(), raw_msg->encoding.c_str()); + } +} + +void DebayerNodelet::configCb(Config &config, uint32_t level) +{ + config_ = config; +} + +} // namespace image_proc + +// Register nodelet +#include +PLUGINLIB_EXPORT_CLASS( image_proc::DebayerNodelet, nodelet::Nodelet) diff --git a/future pkg/image_pipeline/image_proc/src/nodelets/edge_aware.cpp b/future pkg/image_pipeline/image_proc/src/nodelets/edge_aware.cpp new file mode 100644 index 0000000..c882c8d --- /dev/null +++ b/future pkg/image_pipeline/image_proc/src/nodelets/edge_aware.cpp @@ -0,0 +1,804 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include "edge_aware.h" + +#define AVG(a,b) (((int)(a) + (int)(b)) >> 1) +#define AVG3(a,b,c) (((int)(a) + (int)(b) + (int)(c)) / 3) +#define AVG4(a,b,c,d) (((int)(a) + (int)(b) + (int)(c) + (int)(d)) >> 2) +#define WAVG4(a,b,c,d,x,y) ( ( ((int)(a) + (int)(b)) * (int)(x) + ((int)(c) + (int)(d)) * (int)(y) ) / ( 2 * ((int)(x) + (int(y))) ) ) +using namespace std; + +namespace image_proc { + +void debayerEdgeAware(const cv::Mat& bayer, cv::Mat& color) +{ + unsigned width = bayer.cols; + unsigned height = bayer.rows; + unsigned rgb_line_step = color.step[0]; + unsigned rgb_line_skip = rgb_line_step - width * 3; + int bayer_line_step = bayer.step[0]; + int bayer_line_step2 = bayer_line_step * 2; + + unsigned char* rgb_buffer = color.data; + unsigned char* bayer_pixel = bayer.data; + unsigned yIdx, xIdx; + + int dh, dv; + + // first two pixel values for first two lines + // Bayer 0 1 2 + // 0 G r g + // line_step b g b + // line_step2 g r g + + rgb_buffer[3] = rgb_buffer[0] = bayer_pixel[1]; // red pixel + rgb_buffer[1] = bayer_pixel[0]; // green pixel + rgb_buffer[rgb_line_step + 2] = rgb_buffer[2] = bayer_pixel[bayer_line_step]; // blue; + + // Bayer 0 1 2 + // 0 g R g + // line_step b g b + // line_step2 g r g + //rgb_pixel[3] = bayer_pixel[1]; + rgb_buffer[4] = AVG3 (bayer_pixel[0], bayer_pixel[2], bayer_pixel[bayer_line_step + 1]); + rgb_buffer[rgb_line_step + 5] = rgb_buffer[5] = AVG (bayer_pixel[bayer_line_step], bayer_pixel[bayer_line_step + 2]); + + // BGBG line + // Bayer 0 1 2 + // 0 g r g + // line_step B g b + // line_step2 g r g + rgb_buffer[rgb_line_step + 3] = rgb_buffer[rgb_line_step ] = AVG (bayer_pixel[1], bayer_pixel[bayer_line_step2 + 1]); + rgb_buffer[rgb_line_step + 1] = AVG3 (bayer_pixel[0], bayer_pixel[bayer_line_step + 1], bayer_pixel[bayer_line_step2]); + //rgb_pixel[rgb_line_step + 2] = bayer_pixel[line_step]; + + // pixel (1, 1) 0 1 2 + // 0 g r g + // line_step b G b + // line_step2 g r g + //rgb_pixel[rgb_line_step + 3] = AVG( bayer_pixel[1] , bayer_pixel[line_step2+1] ); + rgb_buffer[rgb_line_step + 4] = bayer_pixel[bayer_line_step + 1]; + //rgb_pixel[rgb_line_step + 5] = AVG( bayer_pixel[line_step] , bayer_pixel[line_step+2] ); + + rgb_buffer += 6; + bayer_pixel += 2; + // rest of the first two lines + for (xIdx = 2; xIdx < width - 2; xIdx += 2, rgb_buffer += 6, bayer_pixel += 2) + { + // GRGR line + // Bayer -1 0 1 2 + // 0 r G r g + // line_step g b g b + // line_step2 r g r g + rgb_buffer[0] = AVG (bayer_pixel[1], bayer_pixel[-1]); + rgb_buffer[1] = bayer_pixel[0]; + rgb_buffer[2] = bayer_pixel[bayer_line_step + 1]; + + // Bayer -1 0 1 2 + // 0 r g R g + // line_step g b g b + // line_step2 r g r g + rgb_buffer[3] = bayer_pixel[1]; + rgb_buffer[4] = AVG3 (bayer_pixel[0], bayer_pixel[2], bayer_pixel[bayer_line_step + 1]); + rgb_buffer[rgb_line_step + 5] = rgb_buffer[5] = AVG (bayer_pixel[bayer_line_step], bayer_pixel[bayer_line_step + 2]); + + // BGBG line + // Bayer -1 0 1 2 + // 0 r g r g + // line_step g B g b + // line_step2 r g r g + rgb_buffer[rgb_line_step ] = AVG4 (bayer_pixel[1], bayer_pixel[bayer_line_step2 + 1], bayer_pixel[-1], bayer_pixel[bayer_line_step2 - 1]); + rgb_buffer[rgb_line_step + 1] = AVG4 (bayer_pixel[0], bayer_pixel[bayer_line_step2], bayer_pixel[bayer_line_step - 1], bayer_pixel[bayer_line_step + 1]); + rgb_buffer[rgb_line_step + 2] = bayer_pixel[bayer_line_step]; + + // Bayer -1 0 1 2 + // 0 r g r g + // line_step g b G b + // line_step2 r g r g + rgb_buffer[rgb_line_step + 3] = AVG (bayer_pixel[1], bayer_pixel[bayer_line_step2 + 1]); + rgb_buffer[rgb_line_step + 4] = bayer_pixel[bayer_line_step + 1]; + //rgb_pixel[rgb_line_step + 5] = AVG( bayer_pixel[line_step] , bayer_pixel[line_step+2] ); + } + + // last two pixel values for first two lines + // GRGR line + // Bayer -1 0 1 + // 0 r G r + // line_step g b g + // line_step2 r g r + rgb_buffer[0] = AVG (bayer_pixel[1], bayer_pixel[-1]); + rgb_buffer[1] = bayer_pixel[0]; + rgb_buffer[rgb_line_step + 5] = rgb_buffer[rgb_line_step + 2] = rgb_buffer[5] = rgb_buffer[2] = bayer_pixel[bayer_line_step]; + + // Bayer -1 0 1 + // 0 r g R + // line_step g b g + // line_step2 r g r + rgb_buffer[3] = bayer_pixel[1]; + rgb_buffer[4] = AVG (bayer_pixel[0], bayer_pixel[bayer_line_step + 1]); + //rgb_pixel[5] = bayer_pixel[line_step]; + + // BGBG line + // Bayer -1 0 1 + // 0 r g r + // line_step g B g + // line_step2 r g r + rgb_buffer[rgb_line_step ] = AVG4 (bayer_pixel[1], bayer_pixel[bayer_line_step2 + 1], bayer_pixel[-1], bayer_pixel[bayer_line_step2 - 1]); + rgb_buffer[rgb_line_step + 1] = AVG4 (bayer_pixel[0], bayer_pixel[bayer_line_step2], bayer_pixel[bayer_line_step - 1], bayer_pixel[bayer_line_step + 1]); + //rgb_pixel[rgb_line_step + 2] = bayer_pixel[line_step]; + + // Bayer -1 0 1 + // 0 r g r + // line_step g b G + // line_step2 r g r + rgb_buffer[rgb_line_step + 3] = AVG (bayer_pixel[1], bayer_pixel[bayer_line_step2 + 1]); + rgb_buffer[rgb_line_step + 4] = bayer_pixel[bayer_line_step + 1]; + //rgb_pixel[rgb_line_step + 5] = bayer_pixel[line_step]; + + bayer_pixel += bayer_line_step + 2; + rgb_buffer += rgb_line_step + 6 + rgb_line_skip; + // main processing + for (yIdx = 2; yIdx < height - 2; yIdx += 2) + { + // first two pixel values + // Bayer 0 1 2 + // -1 b g b + // 0 G r g + // line_step b g b + // line_step2 g r g + + rgb_buffer[3] = rgb_buffer[0] = bayer_pixel[1]; // red pixel + rgb_buffer[1] = bayer_pixel[0]; // green pixel + rgb_buffer[2] = AVG (bayer_pixel[bayer_line_step], bayer_pixel[-bayer_line_step]); // blue; + + // Bayer 0 1 2 + // -1 b g b + // 0 g R g + // line_step b g b + // line_step2 g r g + //rgb_pixel[3] = bayer_pixel[1]; + rgb_buffer[4] = AVG4 (bayer_pixel[0], bayer_pixel[2], bayer_pixel[bayer_line_step + 1], bayer_pixel[1 - bayer_line_step]); + rgb_buffer[5] = AVG4 (bayer_pixel[bayer_line_step], bayer_pixel[bayer_line_step + 2], bayer_pixel[-bayer_line_step], bayer_pixel[2 - bayer_line_step]); + + // BGBG line + // Bayer 0 1 2 + // 0 g r g + // line_step B g b + // line_step2 g r g + rgb_buffer[rgb_line_step + 3] = rgb_buffer[rgb_line_step ] = AVG (bayer_pixel[1], bayer_pixel[bayer_line_step2 + 1]); + rgb_buffer[rgb_line_step + 1] = AVG3 (bayer_pixel[0], bayer_pixel[bayer_line_step + 1], bayer_pixel[bayer_line_step2]); + rgb_buffer[rgb_line_step + 2] = bayer_pixel[bayer_line_step]; + + // pixel (1, 1) 0 1 2 + // 0 g r g + // line_step b G b + // line_step2 g r g + //rgb_pixel[rgb_line_step + 3] = AVG( bayer_pixel[1] , bayer_pixel[line_step2+1] ); + rgb_buffer[rgb_line_step + 4] = bayer_pixel[bayer_line_step + 1]; + rgb_buffer[rgb_line_step + 5] = AVG (bayer_pixel[bayer_line_step], bayer_pixel[bayer_line_step + 2]); + + rgb_buffer += 6; + bayer_pixel += 2; + // continue with rest of the line + for (xIdx = 2; xIdx < width - 2; xIdx += 2, rgb_buffer += 6, bayer_pixel += 2) + { + // GRGR line + // Bayer -1 0 1 2 + // -1 g b g b + // 0 r G r g + // line_step g b g b + // line_step2 r g r g + rgb_buffer[0] = AVG (bayer_pixel[1], bayer_pixel[-1]); + rgb_buffer[1] = bayer_pixel[0]; + rgb_buffer[2] = AVG (bayer_pixel[bayer_line_step], bayer_pixel[-bayer_line_step]); + + // Bayer -1 0 1 2 + // -1 g b g b + // 0 r g R g + // line_step g b g b + // line_step2 r g r g + + dh = abs (bayer_pixel[0] - bayer_pixel[2]); + dv = abs (bayer_pixel[-bayer_line_step + 1] - bayer_pixel[bayer_line_step + 1]); + + if (dh > dv) + rgb_buffer[4] = AVG (bayer_pixel[-bayer_line_step + 1], bayer_pixel[bayer_line_step + 1]); + else if (dv > dh) + rgb_buffer[4] = AVG (bayer_pixel[0], bayer_pixel[2]); + else + rgb_buffer[4] = AVG4 (bayer_pixel[-bayer_line_step + 1], bayer_pixel[bayer_line_step + 1], bayer_pixel[0], bayer_pixel[2]); + + rgb_buffer[3] = bayer_pixel[1]; + rgb_buffer[5] = AVG4 (bayer_pixel[-bayer_line_step], bayer_pixel[2 - bayer_line_step], bayer_pixel[bayer_line_step], bayer_pixel[bayer_line_step + 2]); + + // BGBG line + // Bayer -1 0 1 2 + // -1 g b g b + // 0 r g r g + // line_step g B g b + // line_step2 r g r g + rgb_buffer[rgb_line_step ] = AVG4 (bayer_pixel[1], bayer_pixel[bayer_line_step2 + 1], bayer_pixel[-1], bayer_pixel[bayer_line_step2 - 1]); + rgb_buffer[rgb_line_step + 2] = bayer_pixel[bayer_line_step]; + + dv = abs (bayer_pixel[0] - bayer_pixel[bayer_line_step2]); + dh = abs (bayer_pixel[bayer_line_step - 1] - bayer_pixel[bayer_line_step + 1]); + + if (dv > dh) + rgb_buffer[rgb_line_step + 1] = AVG (bayer_pixel[bayer_line_step - 1], bayer_pixel[bayer_line_step + 1]); + else if (dh > dv) + rgb_buffer[rgb_line_step + 1] = AVG (bayer_pixel[0], bayer_pixel[bayer_line_step2]); + else + rgb_buffer[rgb_line_step + 1] = AVG4 (bayer_pixel[0], bayer_pixel[bayer_line_step2], bayer_pixel[bayer_line_step - 1], bayer_pixel[bayer_line_step + 1]); + + // Bayer -1 0 1 2 + // -1 g b g b + // 0 r g r g + // line_step g b G b + // line_step2 r g r g + rgb_buffer[rgb_line_step + 3] = AVG (bayer_pixel[1], bayer_pixel[bayer_line_step2 + 1]); + rgb_buffer[rgb_line_step + 4] = bayer_pixel[bayer_line_step + 1]; + rgb_buffer[rgb_line_step + 5] = AVG (bayer_pixel[bayer_line_step], bayer_pixel[bayer_line_step + 2]); + } + + // last two pixels of the line + // last two pixel values for first two lines + // GRGR line + // Bayer -1 0 1 + // 0 r G r + // line_step g b g + // line_step2 r g r + rgb_buffer[0] = AVG (bayer_pixel[1], bayer_pixel[-1]); + rgb_buffer[1] = bayer_pixel[0]; + rgb_buffer[rgb_line_step + 5] = rgb_buffer[rgb_line_step + 2] = rgb_buffer[5] = rgb_buffer[2] = bayer_pixel[bayer_line_step]; + + // Bayer -1 0 1 + // 0 r g R + // line_step g b g + // line_step2 r g r + rgb_buffer[3] = bayer_pixel[1]; + rgb_buffer[4] = AVG (bayer_pixel[0], bayer_pixel[bayer_line_step + 1]); + //rgb_pixel[5] = bayer_pixel[line_step]; + + // BGBG line + // Bayer -1 0 1 + // 0 r g r + // line_step g B g + // line_step2 r g r + rgb_buffer[rgb_line_step ] = AVG4 (bayer_pixel[1], bayer_pixel[bayer_line_step2 + 1], bayer_pixel[-1], bayer_pixel[bayer_line_step2 - 1]); + rgb_buffer[rgb_line_step + 1] = AVG4 (bayer_pixel[0], bayer_pixel[bayer_line_step2], bayer_pixel[bayer_line_step - 1], bayer_pixel[bayer_line_step + 1]); + //rgb_pixel[rgb_line_step + 2] = bayer_pixel[line_step]; + + // Bayer -1 0 1 + // 0 r g r + // line_step g b G + // line_step2 r g r + rgb_buffer[rgb_line_step + 3] = AVG (bayer_pixel[1], bayer_pixel[bayer_line_step2 + 1]); + rgb_buffer[rgb_line_step + 4] = bayer_pixel[bayer_line_step + 1]; + //rgb_pixel[rgb_line_step + 5] = bayer_pixel[line_step]; + + bayer_pixel += bayer_line_step + 2; + rgb_buffer += rgb_line_step + 6 + rgb_line_skip; + } + + //last two lines + // Bayer 0 1 2 + // -1 b g b + // 0 G r g + // line_step b g b + + rgb_buffer[rgb_line_step + 3] = rgb_buffer[rgb_line_step ] = rgb_buffer[3] = rgb_buffer[0] = bayer_pixel[1]; // red pixel + rgb_buffer[1] = bayer_pixel[0]; // green pixel + rgb_buffer[rgb_line_step + 2] = rgb_buffer[2] = bayer_pixel[bayer_line_step]; // blue; + + // Bayer 0 1 2 + // -1 b g b + // 0 g R g + // line_step b g b + //rgb_pixel[3] = bayer_pixel[1]; + rgb_buffer[4] = AVG4 (bayer_pixel[0], bayer_pixel[2], bayer_pixel[bayer_line_step + 1], bayer_pixel[1 - bayer_line_step]); + rgb_buffer[5] = AVG4 (bayer_pixel[bayer_line_step], bayer_pixel[bayer_line_step + 2], bayer_pixel[-bayer_line_step], bayer_pixel[2 - bayer_line_step]); + + // BGBG line + // Bayer 0 1 2 + // -1 b g b + // 0 g r g + // line_step B g b + //rgb_pixel[rgb_line_step ] = bayer_pixel[1]; + rgb_buffer[rgb_line_step + 1] = AVG (bayer_pixel[0], bayer_pixel[bayer_line_step + 1]); + rgb_buffer[rgb_line_step + 2] = bayer_pixel[bayer_line_step]; + + // Bayer 0 1 2 + // -1 b g b + // 0 g r g + // line_step b G b + //rgb_pixel[rgb_line_step + 3] = AVG( bayer_pixel[1] , bayer_pixel[line_step2+1] ); + rgb_buffer[rgb_line_step + 4] = bayer_pixel[bayer_line_step + 1]; + rgb_buffer[rgb_line_step + 5] = AVG (bayer_pixel[bayer_line_step], bayer_pixel[bayer_line_step + 2]); + + rgb_buffer += 6; + bayer_pixel += 2; + // rest of the last two lines + for (xIdx = 2; xIdx < width - 2; xIdx += 2, rgb_buffer += 6, bayer_pixel += 2) + { + // GRGR line + // Bayer -1 0 1 2 + // -1 g b g b + // 0 r G r g + // line_step g b g b + rgb_buffer[0] = AVG (bayer_pixel[1], bayer_pixel[-1]); + rgb_buffer[1] = bayer_pixel[0]; + rgb_buffer[2] = AVG (bayer_pixel[bayer_line_step], bayer_pixel[-bayer_line_step]); + + // Bayer -1 0 1 2 + // -1 g b g b + // 0 r g R g + // line_step g b g b + rgb_buffer[rgb_line_step + 3] = rgb_buffer[3] = bayer_pixel[1]; + rgb_buffer[4] = AVG4 (bayer_pixel[0], bayer_pixel[2], bayer_pixel[bayer_line_step + 1], bayer_pixel[1 - bayer_line_step]); + rgb_buffer[5] = AVG4 (bayer_pixel[bayer_line_step], bayer_pixel[bayer_line_step + 2], bayer_pixel[-bayer_line_step], bayer_pixel[-bayer_line_step + 2]); + + // BGBG line + // Bayer -1 0 1 2 + // -1 g b g b + // 0 r g r g + // line_step g B g b + rgb_buffer[rgb_line_step ] = AVG (bayer_pixel[-1], bayer_pixel[1]); + rgb_buffer[rgb_line_step + 1] = AVG3 (bayer_pixel[0], bayer_pixel[bayer_line_step - 1], bayer_pixel[bayer_line_step + 1]); + rgb_buffer[rgb_line_step + 2] = bayer_pixel[bayer_line_step]; + + + // Bayer -1 0 1 2 + // -1 g b g b + // 0 r g r g + // line_step g b G b + //rgb_pixel[rgb_line_step + 3] = bayer_pixel[1]; + rgb_buffer[rgb_line_step + 4] = bayer_pixel[bayer_line_step + 1]; + rgb_buffer[rgb_line_step + 5] = AVG (bayer_pixel[bayer_line_step], bayer_pixel[bayer_line_step + 2]); + } + + // last two pixel values for first two lines + // GRGR line + // Bayer -1 0 1 + // -1 g b g + // 0 r G r + // line_step g b g + rgb_buffer[rgb_line_step ] = rgb_buffer[0] = AVG (bayer_pixel[1], bayer_pixel[-1]); + rgb_buffer[1] = bayer_pixel[0]; + rgb_buffer[5] = rgb_buffer[2] = AVG (bayer_pixel[bayer_line_step], bayer_pixel[-bayer_line_step]); + + // Bayer -1 0 1 + // -1 g b g + // 0 r g R + // line_step g b g + rgb_buffer[rgb_line_step + 3] = rgb_buffer[3] = bayer_pixel[1]; + rgb_buffer[4] = AVG3 (bayer_pixel[0], bayer_pixel[bayer_line_step + 1], bayer_pixel[-bayer_line_step + 1]); + //rgb_pixel[5] = AVG( bayer_pixel[line_step], bayer_pixel[-line_step] ); + + // BGBG line + // Bayer -1 0 1 + // -1 g b g + // 0 r g r + // line_step g B g + //rgb_pixel[rgb_line_step ] = AVG2( bayer_pixel[-1], bayer_pixel[1] ); + rgb_buffer[rgb_line_step + 1] = AVG3 (bayer_pixel[0], bayer_pixel[bayer_line_step - 1], bayer_pixel[bayer_line_step + 1]); + rgb_buffer[rgb_line_step + 5] = rgb_buffer[rgb_line_step + 2] = bayer_pixel[bayer_line_step]; + + // Bayer -1 0 1 + // -1 g b g + // 0 r g r + // line_step g b G + //rgb_pixel[rgb_line_step + 3] = bayer_pixel[1]; + rgb_buffer[rgb_line_step + 4] = bayer_pixel[bayer_line_step + 1]; + //rgb_pixel[rgb_line_step + 5] = bayer_pixel[line_step]; +} + +void debayerEdgeAwareWeighted(const cv::Mat& bayer, cv::Mat& color) +{ + unsigned width = bayer.cols; + unsigned height = bayer.rows; + unsigned rgb_line_step = color.step[0]; + unsigned rgb_line_skip = rgb_line_step - width * 3; + int bayer_line_step = bayer.step[0]; + int bayer_line_step2 = bayer_line_step * 2; + + unsigned char* rgb_buffer = color.data; + unsigned char* bayer_pixel = bayer.data; + unsigned yIdx, xIdx; + + int dh, dv; + + // first two pixel values for first two lines + // Bayer 0 1 2 + // 0 G r g + // line_step b g b + // line_step2 g r g + + rgb_buffer[3] = rgb_buffer[0] = bayer_pixel[1]; // red pixel + rgb_buffer[1] = bayer_pixel[0]; // green pixel + rgb_buffer[rgb_line_step + 2] = rgb_buffer[2] = bayer_pixel[bayer_line_step]; // blue; + + // Bayer 0 1 2 + // 0 g R g + // line_step b g b + // line_step2 g r g + //rgb_pixel[3] = bayer_pixel[1]; + rgb_buffer[4] = AVG3 (bayer_pixel[0], bayer_pixel[2], bayer_pixel[bayer_line_step + 1]); + rgb_buffer[rgb_line_step + 5] = rgb_buffer[5] = AVG (bayer_pixel[bayer_line_step], bayer_pixel[bayer_line_step + 2]); + + // BGBG line + // Bayer 0 1 2 + // 0 g r g + // line_step B g b + // line_step2 g r g + rgb_buffer[rgb_line_step + 3] = rgb_buffer[rgb_line_step ] = AVG (bayer_pixel[1], bayer_pixel[bayer_line_step2 + 1]); + rgb_buffer[rgb_line_step + 1] = AVG3 (bayer_pixel[0], bayer_pixel[bayer_line_step + 1], bayer_pixel[bayer_line_step2]); + //rgb_pixel[rgb_line_step + 2] = bayer_pixel[line_step]; + + // pixel (1, 1) 0 1 2 + // 0 g r g + // line_step b G b + // line_step2 g r g + //rgb_pixel[rgb_line_step + 3] = AVG( bayer_pixel[1] , bayer_pixel[line_step2+1] ); + rgb_buffer[rgb_line_step + 4] = bayer_pixel[bayer_line_step + 1]; + //rgb_pixel[rgb_line_step + 5] = AVG( bayer_pixel[line_step] , bayer_pixel[line_step+2] ); + + rgb_buffer += 6; + bayer_pixel += 2; + // rest of the first two lines + for (xIdx = 2; xIdx < width - 2; xIdx += 2, rgb_buffer += 6, bayer_pixel += 2) + { + // GRGR line + // Bayer -1 0 1 2 + // 0 r G r g + // line_step g b g b + // line_step2 r g r g + rgb_buffer[0] = AVG (bayer_pixel[1], bayer_pixel[-1]); + rgb_buffer[1] = bayer_pixel[0]; + rgb_buffer[2] = bayer_pixel[bayer_line_step + 1]; + + // Bayer -1 0 1 2 + // 0 r g R g + // line_step g b g b + // line_step2 r g r g + rgb_buffer[3] = bayer_pixel[1]; + rgb_buffer[4] = AVG3 (bayer_pixel[0], bayer_pixel[2], bayer_pixel[bayer_line_step + 1]); + rgb_buffer[rgb_line_step + 5] = rgb_buffer[5] = AVG (bayer_pixel[bayer_line_step], bayer_pixel[bayer_line_step + 2]); + + // BGBG line + // Bayer -1 0 1 2 + // 0 r g r g + // line_step g B g b + // line_step2 r g r g + rgb_buffer[rgb_line_step ] = AVG4 (bayer_pixel[1], bayer_pixel[bayer_line_step2 + 1], bayer_pixel[-1], bayer_pixel[bayer_line_step2 - 1]); + rgb_buffer[rgb_line_step + 1] = AVG4 (bayer_pixel[0], bayer_pixel[bayer_line_step2], bayer_pixel[bayer_line_step - 1], bayer_pixel[bayer_line_step + 1]); + rgb_buffer[rgb_line_step + 2] = bayer_pixel[bayer_line_step]; + + // Bayer -1 0 1 2 + // 0 r g r g + // line_step g b G b + // line_step2 r g r g + rgb_buffer[rgb_line_step + 3] = AVG (bayer_pixel[1], bayer_pixel[bayer_line_step2 + 1]); + rgb_buffer[rgb_line_step + 4] = bayer_pixel[bayer_line_step + 1]; + //rgb_pixel[rgb_line_step + 5] = AVG( bayer_pixel[line_step] , bayer_pixel[line_step+2] ); + } + + // last two pixel values for first two lines + // GRGR line + // Bayer -1 0 1 + // 0 r G r + // line_step g b g + // line_step2 r g r + rgb_buffer[0] = AVG (bayer_pixel[1], bayer_pixel[-1]); + rgb_buffer[1] = bayer_pixel[0]; + rgb_buffer[rgb_line_step + 5] = rgb_buffer[rgb_line_step + 2] = rgb_buffer[5] = rgb_buffer[2] = bayer_pixel[bayer_line_step]; + + // Bayer -1 0 1 + // 0 r g R + // line_step g b g + // line_step2 r g r + rgb_buffer[3] = bayer_pixel[1]; + rgb_buffer[4] = AVG (bayer_pixel[0], bayer_pixel[bayer_line_step + 1]); + //rgb_pixel[5] = bayer_pixel[line_step]; + + // BGBG line + // Bayer -1 0 1 + // 0 r g r + // line_step g B g + // line_step2 r g r + rgb_buffer[rgb_line_step ] = AVG4 (bayer_pixel[1], bayer_pixel[bayer_line_step2 + 1], bayer_pixel[-1], bayer_pixel[bayer_line_step2 - 1]); + rgb_buffer[rgb_line_step + 1] = AVG4 (bayer_pixel[0], bayer_pixel[bayer_line_step2], bayer_pixel[bayer_line_step - 1], bayer_pixel[bayer_line_step + 1]); + //rgb_pixel[rgb_line_step + 2] = bayer_pixel[line_step]; + + // Bayer -1 0 1 + // 0 r g r + // line_step g b G + // line_step2 r g r + rgb_buffer[rgb_line_step + 3] = AVG (bayer_pixel[1], bayer_pixel[bayer_line_step2 + 1]); + rgb_buffer[rgb_line_step + 4] = bayer_pixel[bayer_line_step + 1]; + //rgb_pixel[rgb_line_step + 5] = bayer_pixel[line_step]; + + bayer_pixel += bayer_line_step + 2; + rgb_buffer += rgb_line_step + 6 + rgb_line_skip; + // main processing + for (yIdx = 2; yIdx < height - 2; yIdx += 2) + { + // first two pixel values + // Bayer 0 1 2 + // -1 b g b + // 0 G r g + // line_step b g b + // line_step2 g r g + + rgb_buffer[3] = rgb_buffer[0] = bayer_pixel[1]; // red pixel + rgb_buffer[1] = bayer_pixel[0]; // green pixel + rgb_buffer[2] = AVG (bayer_pixel[bayer_line_step], bayer_pixel[-bayer_line_step]); // blue; + + // Bayer 0 1 2 + // -1 b g b + // 0 g R g + // line_step b g b + // line_step2 g r g + //rgb_pixel[3] = bayer_pixel[1]; + rgb_buffer[4] = AVG4 (bayer_pixel[0], bayer_pixel[2], bayer_pixel[bayer_line_step + 1], bayer_pixel[1 - bayer_line_step]); + rgb_buffer[5] = AVG4 (bayer_pixel[bayer_line_step], bayer_pixel[bayer_line_step + 2], bayer_pixel[-bayer_line_step], bayer_pixel[2 - bayer_line_step]); + + // BGBG line + // Bayer 0 1 2 + // 0 g r g + // line_step B g b + // line_step2 g r g + rgb_buffer[rgb_line_step + 3] = rgb_buffer[rgb_line_step ] = AVG (bayer_pixel[1], bayer_pixel[bayer_line_step2 + 1]); + rgb_buffer[rgb_line_step + 1] = AVG3 (bayer_pixel[0], bayer_pixel[bayer_line_step + 1], bayer_pixel[bayer_line_step2]); + rgb_buffer[rgb_line_step + 2] = bayer_pixel[bayer_line_step]; + + // pixel (1, 1) 0 1 2 + // 0 g r g + // line_step b G b + // line_step2 g r g + //rgb_pixel[rgb_line_step + 3] = AVG( bayer_pixel[1] , bayer_pixel[line_step2+1] ); + rgb_buffer[rgb_line_step + 4] = bayer_pixel[bayer_line_step + 1]; + rgb_buffer[rgb_line_step + 5] = AVG (bayer_pixel[bayer_line_step], bayer_pixel[bayer_line_step + 2]); + + rgb_buffer += 6; + bayer_pixel += 2; + // continue with rest of the line + for (xIdx = 2; xIdx < width - 2; xIdx += 2, rgb_buffer += 6, bayer_pixel += 2) + { + // GRGR line + // Bayer -1 0 1 2 + // -1 g b g b + // 0 r G r g + // line_step g b g b + // line_step2 r g r g + rgb_buffer[0] = AVG (bayer_pixel[1], bayer_pixel[-1]); + rgb_buffer[1] = bayer_pixel[0]; + rgb_buffer[2] = AVG (bayer_pixel[bayer_line_step], bayer_pixel[-bayer_line_step]); + + // Bayer -1 0 1 2 + // -1 g b g b + // 0 r g R g + // line_step g b g b + // line_step2 r g r g + + dh = abs (bayer_pixel[0] - bayer_pixel[2]); + dv = abs (bayer_pixel[-bayer_line_step + 1] - bayer_pixel[bayer_line_step + 1]); + + if (dv == 0 && dh == 0) + rgb_buffer[4] = AVG4 (bayer_pixel[1 - bayer_line_step], bayer_pixel[1 + bayer_line_step], bayer_pixel[0], bayer_pixel[2]); + else + rgb_buffer[4] = WAVG4 (bayer_pixel[1 - bayer_line_step], bayer_pixel[1 + bayer_line_step], bayer_pixel[0], bayer_pixel[2], dh, dv); + rgb_buffer[3] = bayer_pixel[1]; + rgb_buffer[5] = AVG4 (bayer_pixel[-bayer_line_step], bayer_pixel[2 - bayer_line_step], bayer_pixel[bayer_line_step], bayer_pixel[bayer_line_step + 2]); + + // BGBG line + // Bayer -1 0 1 2 + // -1 g b g b + // 0 r g r g + // line_step g B g b + // line_step2 r g r g + rgb_buffer[rgb_line_step ] = AVG4 (bayer_pixel[1], bayer_pixel[bayer_line_step2 + 1], bayer_pixel[-1], bayer_pixel[bayer_line_step2 - 1]); + rgb_buffer[rgb_line_step + 2] = bayer_pixel[bayer_line_step]; + + dv = abs (bayer_pixel[0] - bayer_pixel[bayer_line_step2]); + dh = abs (bayer_pixel[bayer_line_step - 1] - bayer_pixel[bayer_line_step + 1]); + + if (dv == 0 && dh == 0) + rgb_buffer[rgb_line_step + 1] = AVG4 (bayer_pixel[0], bayer_pixel[bayer_line_step2], bayer_pixel[bayer_line_step - 1], bayer_pixel[bayer_line_step + 1]); + else + rgb_buffer[rgb_line_step + 1] = WAVG4 (bayer_pixel[0], bayer_pixel[bayer_line_step2], bayer_pixel[bayer_line_step - 1], bayer_pixel[bayer_line_step + 1], dh, dv); + + // Bayer -1 0 1 2 + // -1 g b g b + // 0 r g r g + // line_step g b G b + // line_step2 r g r g + rgb_buffer[rgb_line_step + 3] = AVG (bayer_pixel[1], bayer_pixel[bayer_line_step2 + 1]); + rgb_buffer[rgb_line_step + 4] = bayer_pixel[bayer_line_step + 1]; + rgb_buffer[rgb_line_step + 5] = AVG (bayer_pixel[bayer_line_step], bayer_pixel[bayer_line_step + 2]); + } + + // last two pixels of the line + // last two pixel values for first two lines + // GRGR line + // Bayer -1 0 1 + // 0 r G r + // line_step g b g + // line_step2 r g r + rgb_buffer[0] = AVG (bayer_pixel[1], bayer_pixel[-1]); + rgb_buffer[1] = bayer_pixel[0]; + rgb_buffer[rgb_line_step + 5] = rgb_buffer[rgb_line_step + 2] = rgb_buffer[5] = rgb_buffer[2] = bayer_pixel[bayer_line_step]; + + // Bayer -1 0 1 + // 0 r g R + // line_step g b g + // line_step2 r g r + rgb_buffer[3] = bayer_pixel[1]; + rgb_buffer[4] = AVG (bayer_pixel[0], bayer_pixel[bayer_line_step + 1]); + //rgb_pixel[5] = bayer_pixel[line_step]; + + // BGBG line + // Bayer -1 0 1 + // 0 r g r + // line_step g B g + // line_step2 r g r + rgb_buffer[rgb_line_step ] = AVG4 (bayer_pixel[1], bayer_pixel[bayer_line_step2 + 1], bayer_pixel[-1], bayer_pixel[bayer_line_step2 - 1]); + rgb_buffer[rgb_line_step + 1] = AVG4 (bayer_pixel[0], bayer_pixel[bayer_line_step2], bayer_pixel[bayer_line_step - 1], bayer_pixel[bayer_line_step + 1]); + //rgb_pixel[rgb_line_step + 2] = bayer_pixel[line_step]; + + // Bayer -1 0 1 + // 0 r g r + // line_step g b G + // line_step2 r g r + rgb_buffer[rgb_line_step + 3] = AVG (bayer_pixel[1], bayer_pixel[bayer_line_step2 + 1]); + rgb_buffer[rgb_line_step + 4] = bayer_pixel[bayer_line_step + 1]; + //rgb_pixel[rgb_line_step + 5] = bayer_pixel[line_step]; + + bayer_pixel += bayer_line_step + 2; + rgb_buffer += rgb_line_step + 6 + rgb_line_skip; + } + + //last two lines + // Bayer 0 1 2 + // -1 b g b + // 0 G r g + // line_step b g b + + rgb_buffer[rgb_line_step + 3] = rgb_buffer[rgb_line_step ] = rgb_buffer[3] = rgb_buffer[0] = bayer_pixel[1]; // red pixel + rgb_buffer[1] = bayer_pixel[0]; // green pixel + rgb_buffer[rgb_line_step + 2] = rgb_buffer[2] = bayer_pixel[bayer_line_step]; // blue; + + // Bayer 0 1 2 + // -1 b g b + // 0 g R g + // line_step b g b + //rgb_pixel[3] = bayer_pixel[1]; + rgb_buffer[4] = AVG4 (bayer_pixel[0], bayer_pixel[2], bayer_pixel[bayer_line_step + 1], bayer_pixel[1 - bayer_line_step]); + rgb_buffer[5] = AVG4 (bayer_pixel[bayer_line_step], bayer_pixel[bayer_line_step + 2], bayer_pixel[-bayer_line_step], bayer_pixel[2 - bayer_line_step]); + + // BGBG line + // Bayer 0 1 2 + // -1 b g b + // 0 g r g + // line_step B g b + //rgb_pixel[rgb_line_step ] = bayer_pixel[1]; + rgb_buffer[rgb_line_step + 1] = AVG (bayer_pixel[0], bayer_pixel[bayer_line_step + 1]); + rgb_buffer[rgb_line_step + 2] = bayer_pixel[bayer_line_step]; + + // Bayer 0 1 2 + // -1 b g b + // 0 g r g + // line_step b G b + //rgb_pixel[rgb_line_step + 3] = AVG( bayer_pixel[1] , bayer_pixel[line_step2+1] ); + rgb_buffer[rgb_line_step + 4] = bayer_pixel[bayer_line_step + 1]; + rgb_buffer[rgb_line_step + 5] = AVG (bayer_pixel[bayer_line_step], bayer_pixel[bayer_line_step + 2]); + + rgb_buffer += 6; + bayer_pixel += 2; + // rest of the last two lines + for (xIdx = 2; xIdx < width - 2; xIdx += 2, rgb_buffer += 6, bayer_pixel += 2) + { + // GRGR line + // Bayer -1 0 1 2 + // -1 g b g b + // 0 r G r g + // line_step g b g b + rgb_buffer[0] = AVG (bayer_pixel[1], bayer_pixel[-1]); + rgb_buffer[1] = bayer_pixel[0]; + rgb_buffer[2] = AVG (bayer_pixel[bayer_line_step], bayer_pixel[-bayer_line_step]); + + // Bayer -1 0 1 2 + // -1 g b g b + // 0 r g R g + // line_step g b g b + rgb_buffer[rgb_line_step + 3] = rgb_buffer[3] = bayer_pixel[1]; + rgb_buffer[4] = AVG4 (bayer_pixel[0], bayer_pixel[2], bayer_pixel[bayer_line_step + 1], bayer_pixel[1 - bayer_line_step]); + rgb_buffer[5] = AVG4 (bayer_pixel[bayer_line_step], bayer_pixel[bayer_line_step + 2], bayer_pixel[-bayer_line_step], bayer_pixel[-bayer_line_step + 2]); + + // BGBG line + // Bayer -1 0 1 2 + // -1 g b g b + // 0 r g r g + // line_step g B g b + rgb_buffer[rgb_line_step ] = AVG (bayer_pixel[-1], bayer_pixel[1]); + rgb_buffer[rgb_line_step + 1] = AVG3 (bayer_pixel[0], bayer_pixel[bayer_line_step - 1], bayer_pixel[bayer_line_step + 1]); + rgb_buffer[rgb_line_step + 2] = bayer_pixel[bayer_line_step]; + + + // Bayer -1 0 1 2 + // -1 g b g b + // 0 r g r g + // line_step g b G b + //rgb_pixel[rgb_line_step + 3] = bayer_pixel[1]; + rgb_buffer[rgb_line_step + 4] = bayer_pixel[bayer_line_step + 1]; + rgb_buffer[rgb_line_step + 5] = AVG (bayer_pixel[bayer_line_step], bayer_pixel[bayer_line_step + 2]); + } + + // last two pixel values for first two lines + // GRGR line + // Bayer -1 0 1 + // -1 g b g + // 0 r G r + // line_step g b g + rgb_buffer[rgb_line_step ] = rgb_buffer[0] = AVG (bayer_pixel[1], bayer_pixel[-1]); + rgb_buffer[1] = bayer_pixel[0]; + rgb_buffer[5] = rgb_buffer[2] = AVG (bayer_pixel[bayer_line_step], bayer_pixel[-bayer_line_step]); + + // Bayer -1 0 1 + // -1 g b g + // 0 r g R + // line_step g b g + rgb_buffer[rgb_line_step + 3] = rgb_buffer[3] = bayer_pixel[1]; + rgb_buffer[4] = AVG3 (bayer_pixel[0], bayer_pixel[bayer_line_step + 1], bayer_pixel[-bayer_line_step + 1]); + //rgb_pixel[5] = AVG( bayer_pixel[line_step], bayer_pixel[-line_step] ); + + // BGBG line + // Bayer -1 0 1 + // -1 g b g + // 0 r g r + // line_step g B g + //rgb_pixel[rgb_line_step ] = AVG2( bayer_pixel[-1], bayer_pixel[1] ); + rgb_buffer[rgb_line_step + 1] = AVG3 (bayer_pixel[0], bayer_pixel[bayer_line_step - 1], bayer_pixel[bayer_line_step + 1]); + rgb_buffer[rgb_line_step + 5] = rgb_buffer[rgb_line_step + 2] = bayer_pixel[bayer_line_step]; + + // Bayer -1 0 1 + // -1 g b g + // 0 r g r + // line_step g b G + //rgb_pixel[rgb_line_step + 3] = bayer_pixel[1]; + rgb_buffer[rgb_line_step + 4] = bayer_pixel[bayer_line_step + 1]; + //rgb_pixel[rgb_line_step + 5] = bayer_pixel[line_step]; + +} + +} // namespace image_proc diff --git a/future pkg/image_pipeline/image_proc/src/nodelets/edge_aware.h b/future pkg/image_pipeline/image_proc/src/nodelets/edge_aware.h new file mode 100644 index 0000000..dbc4ec1 --- /dev/null +++ b/future pkg/image_pipeline/image_proc/src/nodelets/edge_aware.h @@ -0,0 +1,49 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#ifndef IMAGE_PROC_EDGE_AWARE +#define IMAGE_PROC_EDGE_AWARE + +#include + +// Edge-aware debayering algorithms, intended for eventual inclusion in OpenCV. + +namespace image_proc { + +void debayerEdgeAware(const cv::Mat& bayer, cv::Mat& color); + +void debayerEdgeAwareWeighted(const cv::Mat& bayer, cv::Mat& color); + +} // namespace image_proc + +#endif diff --git a/future pkg/image_pipeline/image_proc/src/nodelets/rectify.cpp b/future pkg/image_pipeline/image_proc/src/nodelets/rectify.cpp new file mode 100644 index 0000000..dd2490c --- /dev/null +++ b/future pkg/image_pipeline/image_proc/src/nodelets/rectify.cpp @@ -0,0 +1,170 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include +#if ((BOOST_VERSION / 100) % 1000) >= 53 +#include +#endif + +#include +#include +#include +#include +#include +#include +#include + +namespace image_proc { + +class RectifyNodelet : public nodelet::Nodelet +{ + // ROS communication + boost::shared_ptr it_; + image_transport::CameraSubscriber sub_camera_; + int queue_size_; + + boost::mutex connect_mutex_; + image_transport::Publisher pub_rect_; + + // Dynamic reconfigure + boost::recursive_mutex config_mutex_; + typedef image_proc::RectifyConfig Config; + typedef dynamic_reconfigure::Server ReconfigureServer; + boost::shared_ptr reconfigure_server_; + Config config_; + + // Processing state (note: only safe because we're using single-threaded NodeHandle!) + image_geometry::PinholeCameraModel model_; + + virtual void onInit(); + + void connectCb(); + + void imageCb(const sensor_msgs::ImageConstPtr& image_msg, + const sensor_msgs::CameraInfoConstPtr& info_msg); + + void configCb(Config &config, uint32_t level); +}; + +void RectifyNodelet::onInit() +{ + ros::NodeHandle &nh = getNodeHandle(); + ros::NodeHandle &private_nh = getPrivateNodeHandle(); + it_.reset(new image_transport::ImageTransport(nh)); + + // Read parameters + private_nh.param("queue_size", queue_size_, 5); + + // Set up dynamic reconfigure + reconfigure_server_.reset(new ReconfigureServer(config_mutex_, private_nh)); + ReconfigureServer::CallbackType f = boost::bind(&RectifyNodelet::configCb, this, _1, _2); + reconfigure_server_->setCallback(f); + + // Monitor whether anyone is subscribed to the output + image_transport::SubscriberStatusCallback connect_cb = boost::bind(&RectifyNodelet::connectCb, this); + // Make sure we don't enter connectCb() between advertising and assigning to pub_rect_ + boost::lock_guard lock(connect_mutex_); + pub_rect_ = it_->advertise("image_rect", 1, connect_cb, connect_cb); +} + +// Handles (un)subscribing when clients (un)subscribe +void RectifyNodelet::connectCb() +{ + boost::lock_guard lock(connect_mutex_); + if (pub_rect_.getNumSubscribers() == 0) + sub_camera_.shutdown(); + else if (!sub_camera_) + { + image_transport::TransportHints hints("raw", ros::TransportHints(), getPrivateNodeHandle()); + sub_camera_ = it_->subscribeCamera("image_mono", queue_size_, &RectifyNodelet::imageCb, this, hints); + } +} + +void RectifyNodelet::imageCb(const sensor_msgs::ImageConstPtr& image_msg, + const sensor_msgs::CameraInfoConstPtr& info_msg) +{ + // Verify camera is actually calibrated + if (info_msg->K[0] == 0.0) { + NODELET_ERROR_THROTTLE(30, "Rectified topic '%s' requested but camera publishing '%s' " + "is uncalibrated", pub_rect_.getTopic().c_str(), + sub_camera_.getInfoTopic().c_str()); + return; + } + + // If zero distortion, just pass the message along + bool zero_distortion = true; + for (size_t i = 0; i < info_msg->D.size(); ++i) + { + if (info_msg->D[i] != 0.0) + { + zero_distortion = false; + break; + } + } + // This will be true if D is empty/zero sized + if (zero_distortion) + { + pub_rect_.publish(image_msg); + return; + } + + // Update the camera model + model_.fromCameraInfo(info_msg); + + // Create cv::Mat views onto both buffers + const cv::Mat image = cv_bridge::toCvShare(image_msg)->image; + cv::Mat rect; + + // Rectify and publish + int interpolation; + { + boost::lock_guard lock(config_mutex_); + interpolation = config_.interpolation; + } + model_.rectifyImage(image, rect, interpolation); + + // Allocate new rectified image message + sensor_msgs::ImagePtr rect_msg = cv_bridge::CvImage(image_msg->header, image_msg->encoding, rect).toImageMsg(); + pub_rect_.publish(rect_msg); +} + +void RectifyNodelet::configCb(Config &config, uint32_t level) +{ + config_ = config; +} + +} // namespace image_proc + +// Register nodelet +#include +PLUGINLIB_EXPORT_CLASS( image_proc::RectifyNodelet, nodelet::Nodelet) diff --git a/future pkg/image_pipeline/image_proc/src/nodelets/resize.cpp b/future pkg/image_pipeline/image_proc/src/nodelets/resize.cpp new file mode 100644 index 0000000..44fc9a3 --- /dev/null +++ b/future pkg/image_pipeline/image_proc/src/nodelets/resize.cpp @@ -0,0 +1,174 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2017, Kentaro Wada. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Kentaro Wada nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include +#include +#include +#include +#include +#include +#include + +#include "image_proc/ResizeConfig.h" + +namespace image_proc +{ + +class ResizeNodelet : public nodelet_topic_tools::NodeletLazy +{ +protected: + // ROS communication + ros::Publisher pub_image_; + ros::Publisher pub_info_; + ros::Subscriber sub_info_; + ros::Subscriber sub_image_; + + // Dynamic reconfigure + boost::recursive_mutex config_mutex_; + typedef image_proc::ResizeConfig Config; + typedef dynamic_reconfigure::Server ReconfigureServer; + boost::shared_ptr reconfigure_server_; + Config config_; + + virtual void onInit(); + virtual void subscribe(); + virtual void unsubscribe(); + + void imageCb(const sensor_msgs::ImageConstPtr& image_msg); + void infoCb(const sensor_msgs::CameraInfoConstPtr& info_msg); + + void configCb(Config &config, uint32_t level); +}; + +void ResizeNodelet::onInit() +{ + nodelet_topic_tools::NodeletLazy::onInit(); + + // Set up dynamic reconfigure + reconfigure_server_.reset(new ReconfigureServer(config_mutex_, *pnh_)); + ReconfigureServer::CallbackType f = boost::bind(&ResizeNodelet::configCb, this, _1, _2); + reconfigure_server_->setCallback(f); + + pub_info_ = advertise(*pnh_, "camera_info", 1); + pub_image_ = advertise(*pnh_, "image", 1); + + onInitPostProcess(); +} + +void ResizeNodelet::configCb(Config &config, uint32_t level) +{ + config_ = config; +} + +void ResizeNodelet::subscribe() +{ + sub_info_ = nh_->subscribe("camera_info", 1, &ResizeNodelet::infoCb, this); + sub_image_ = nh_->subscribe("image", 1, &ResizeNodelet::imageCb, this); +} + +void ResizeNodelet::unsubscribe() +{ + sub_info_.shutdown(); + sub_image_.shutdown(); +} + +void ResizeNodelet::infoCb(const sensor_msgs::CameraInfoConstPtr& info_msg) +{ + Config config; + { + boost::lock_guard lock(config_mutex_); + config = config_; + } + + sensor_msgs::CameraInfo dst_info_msg = *info_msg; + + double scale_y; + double scale_x; + if (config.use_scale) + { + scale_y = config.scale_height; + scale_x = config.scale_width; + dst_info_msg.height = static_cast(info_msg->height * config.scale_height); + dst_info_msg.width = static_cast(info_msg->width * config.scale_width); + } + else + { + scale_y = static_cast(config.height) / info_msg->height; + scale_x = static_cast(config.width) / info_msg->width; + dst_info_msg.height = config.height; + dst_info_msg.width = config.width; + } + + dst_info_msg.K[0] = dst_info_msg.K[0] * scale_x; // fx + dst_info_msg.K[2] = dst_info_msg.K[2] * scale_x; // cx + dst_info_msg.K[4] = dst_info_msg.K[4] * scale_y; // fy + dst_info_msg.K[5] = dst_info_msg.K[5] * scale_y; // cy + + dst_info_msg.P[0] = dst_info_msg.P[0] * scale_x; // fx + dst_info_msg.P[2] = dst_info_msg.P[2] * scale_x; // cx + dst_info_msg.P[3] = dst_info_msg.P[3] * scale_x; // T + dst_info_msg.P[5] = dst_info_msg.P[5] * scale_y; // fy + dst_info_msg.P[6] = dst_info_msg.P[6] * scale_y; // cy + + pub_info_.publish(dst_info_msg); +} + +void ResizeNodelet::imageCb(const sensor_msgs::ImageConstPtr& image_msg) +{ + Config config; + { + boost::lock_guard lock(config_mutex_); + config = config_; + } + + cv_bridge::CvImagePtr cv_ptr = cv_bridge::toCvCopy(image_msg); + + if (config.use_scale) + { + cv::resize(cv_ptr->image, cv_ptr->image, cv::Size(0, 0), + config.scale_width, config.scale_height, config.interpolation); + } + else + { + int height = config.height == -1 ? image_msg->height : config.height; + int width = config.width == -1 ? image_msg->width : config.width; + cv::resize(cv_ptr->image, cv_ptr->image, cv::Size(width, height), 0, 0, config.interpolation); + } + + pub_image_.publish(cv_ptr->toImageMsg()); +} + +} // namespace image_proc + +#include +PLUGINLIB_EXPORT_CLASS(image_proc::ResizeNodelet, nodelet::Nodelet) diff --git a/future pkg/image_pipeline/image_proc/src/nodes/image_proc.cpp b/future pkg/image_pipeline/image_proc/src/nodes/image_proc.cpp new file mode 100644 index 0000000..52612ab --- /dev/null +++ b/future pkg/image_pipeline/image_proc/src/nodes/image_proc.cpp @@ -0,0 +1,97 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ + +#include +#include +#include + +int main(int argc, char **argv) +{ + ros::init(argc, argv, "image_proc"); + + // Check for common user errors + if (ros::names::remap("camera") != "camera") + { + ROS_WARN("Remapping 'camera' has no effect! Start image_proc in the " + "camera namespace instead.\nExample command-line usage:\n" + "\t$ ROS_NAMESPACE=%s rosrun image_proc image_proc", + ros::names::remap("camera").c_str()); + } + if (ros::this_node::getNamespace() == "/") + { + ROS_WARN("Started in the global namespace! This is probably wrong. Start image_proc " + "in the camera namespace.\nExample command-line usage:\n" + "\t$ ROS_NAMESPACE=my_camera rosrun image_proc image_proc"); + } + + // Shared parameters to be propagated to nodelet private namespaces + ros::NodeHandle private_nh("~"); + XmlRpc::XmlRpcValue shared_params; + int queue_size; + if (private_nh.getParam("queue_size", queue_size)) + shared_params["queue_size"] = queue_size; + + nodelet::Loader manager(false); // Don't bring up the manager ROS API + nodelet::M_string remappings; + nodelet::V_string my_argv; + + // Debayer nodelet, image_raw -> image_mono, image_color + std::string debayer_name = ros::this_node::getName() + "_debayer"; + manager.load(debayer_name, "image_proc/debayer", remappings, my_argv); + + // Rectify nodelet, image_mono -> image_rect + std::string rectify_mono_name = ros::this_node::getName() + "_rectify_mono"; + if (shared_params.valid()) + ros::param::set(rectify_mono_name, shared_params); + manager.load(rectify_mono_name, "image_proc/rectify", remappings, my_argv); + + // Rectify nodelet, image_color -> image_rect_color + // NOTE: Explicitly resolve any global remappings here, so they don't get hidden. + remappings["image_mono"] = ros::names::resolve("image_color"); + remappings["image_rect"] = ros::names::resolve("image_rect_color"); + std::string rectify_color_name = ros::this_node::getName() + "_rectify_color"; + if (shared_params.valid()) + ros::param::set(rectify_color_name, shared_params); + manager.load(rectify_color_name, "image_proc/rectify", remappings, my_argv); + + // Check for only the original camera topics + ros::V_string topics; + topics.push_back(ros::names::resolve("image_raw")); + topics.push_back(ros::names::resolve("camera_info")); + image_proc::AdvertisementChecker check_inputs(ros::NodeHandle(), ros::this_node::getName()); + check_inputs.start(topics, 60.0); + + ros::spin(); + return 0; +} diff --git a/future pkg/image_pipeline/image_proc/test/CMakeLists.txt b/future pkg/image_pipeline/image_proc/test/CMakeLists.txt new file mode 100644 index 0000000..52fa285 --- /dev/null +++ b/future pkg/image_pipeline/image_proc/test/CMakeLists.txt @@ -0,0 +1,5 @@ +find_package(rostest REQUIRED) +#catkin_add_gtest(image_proc_rostest rostest.cpp) +#target_link_libraries(image_proc_rostest ${catkin_LIBRARIES} ${Boost_LIBRARIES}) +add_rostest_gtest(image_proc_test_rectify test_rectify.xml test_rectify.cpp) +target_link_libraries(image_proc_test_rectify ${catkin_LIBRARIES}) diff --git a/future pkg/image_pipeline/image_proc/test/rostest.cpp b/future pkg/image_pipeline/image_proc/test/rostest.cpp new file mode 100644 index 0000000..b389fdb --- /dev/null +++ b/future pkg/image_pipeline/image_proc/test/rostest.cpp @@ -0,0 +1,99 @@ +#include +#include +#include +#include +#include +#include + +#include + +class ImageProcTest : public testing::Test +{ +protected: + virtual void SetUp() + { + ros::NodeHandle local_nh("~"); + + // Determine topic names + std::string camera_ns = nh.resolveName("camera") + "/"; + if (camera_ns == "/camera") + throw "Must remap 'camera' to the camera namespace."; + topic_raw = camera_ns + "image_raw"; + topic_mono = camera_ns + "image_mono"; + topic_rect = camera_ns + "image_rect"; + topic_color = camera_ns + "image_color"; + topic_rect_color = camera_ns + "image_rect_color"; + + // Load raw image and cam info + /// @todo Make these cmd-line args instead? + std::string raw_image_file, cam_info_file; + if (!local_nh.getParam("raw_image_file", raw_image_file)) + throw "Must set parameter ~raw_image_file."; + if (!local_nh.getParam("camera_info_file", cam_info_file)) + throw "Must set parameter ~camera_info_file."; + + /// @todo Test variety of encodings for raw image (bayer, mono, color) + cv::Mat img = cv::imread(raw_image_file, 0); + raw_image = cv_bridge::CvImage(std_msgs::Header(), "mono8", img).toImageMsg(); + std::string cam_name; + if (!camera_calibration_parsers::readCalibration(cam_info_file, cam_name, cam_info)) + throw "Failed to read camera info file."; + + // Create raw camera publisher + image_transport::ImageTransport it(nh); + cam_pub = it.advertiseCamera(topic_raw, 1); + + // Wait for image_proc to be operational + ros::master::V_TopicInfo topics; + while (true) { + if (ros::master::getTopics(topics)) { + BOOST_FOREACH(ros::master::TopicInfo& topic, topics) { + if (topic.name == topic_rect_color) + return; + } + } + ros::Duration(0.5).sleep(); + } + } + + ros::NodeHandle nh; + std::string topic_raw; + std::string topic_mono; + std::string topic_rect; + std::string topic_color; + std::string topic_rect_color; + + sensor_msgs::ImagePtr raw_image; + sensor_msgs::CameraInfo cam_info; + image_transport::CameraPublisher cam_pub; + + void publishRaw() + { + cam_pub.publish(*raw_image, cam_info); + } +}; + +void callback(const sensor_msgs::ImageConstPtr& msg) +{ + ROS_FATAL("Got an image"); + ros::shutdown(); +} + +TEST_F(ImageProcTest, monoSubscription) +{ + ROS_INFO("In test. Subscribing."); + ros::Subscriber mono_sub = nh.subscribe(topic_mono, 1, callback); + ROS_INFO("Publishing."); + publishRaw(); + + ROS_INFO("Spinning."); + ros::spin(); + ROS_INFO("Done."); +} + +int main(int argc, char** argv) +{ + ros::init(argc, argv, "imageproc_rostest"); + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/future pkg/image_pipeline/image_proc/test/test_bayer.xml b/future pkg/image_pipeline/image_proc/test/test_bayer.xml new file mode 100644 index 0000000..a6d2a1a --- /dev/null +++ b/future pkg/image_pipeline/image_proc/test/test_bayer.xml @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/future pkg/image_pipeline/image_proc/test/test_rectify.cpp b/future pkg/image_pipeline/image_proc/test/test_rectify.cpp new file mode 100644 index 0000000..f03832d --- /dev/null +++ b/future pkg/image_pipeline/image_proc/test/test_rectify.cpp @@ -0,0 +1,205 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +class ImageProcRectifyTest : public testing::Test +{ +protected: + virtual void SetUp() + { + // Determine topic names + std::string camera_ns = nh_.resolveName("camera") + "/"; + if (camera_ns == "/camera") + throw "Must remap 'camera' to the camera namespace."; + topic_raw_ = camera_ns + "image_raw"; + topic_mono_ = camera_ns + "image_mono"; + topic_rect_ = camera_ns + "image_rect"; + topic_color_ = camera_ns + "image_color"; + topic_rect_color_ = camera_ns + "image_rect_color"; + + // Taken from vision_opencv/image_geometry/test/utest.cpp + double D[] = {-0.363528858080088, 0.16117037733986861, -8.1109585007538829e-05, -0.00044776712298447841, 0.0}; + double K[] = {430.15433020105519, 0.0, 311.71339830549732, + 0.0, 430.60920415473657, 221.06824942698509, + 0.0, 0.0, 1.0}; + double R[] = {0.99806560714807102, 0.0068562422224214027, 0.061790256276695904, + -0.0067522959054715113, 0.99997541519165112, -0.0018909025066874664, + -0.061801701660692349, 0.0014700186639396652, 0.99808736527268516}; + double P[] = {295.53402059708782, 0.0, 285.55760765075684, 0.0, + 0.0, 295.53402059708782, 223.29617881774902, 0.0, + 0.0, 0.0, 1.0, 0.0}; + + cam_info_.header.frame_id = "tf_frame"; + cam_info_.height = 480; + cam_info_.width = 640; + // No ROI + cam_info_.D.resize(5); + std::copy(D, D+5, cam_info_.D.begin()); + std::copy(K, K+9, cam_info_.K.begin()); + std::copy(R, R+9, cam_info_.R.begin()); + std::copy(P, P+12, cam_info_.P.begin()); + cam_info_.distortion_model = sensor_msgs::distortion_models::PLUMB_BOB; + + distorted_image_ = cv::Mat(cv::Size(cam_info_.width, cam_info_.height), CV_8UC3); + // draw a grid + const cv::Scalar color = cv::Scalar(255, 255, 255); + // draw the lines thick so the proportion of error due to + // interpolation is reduced + const int thickness = 7; + const int type = 8; + for (size_t y = 0; y <= cam_info_.height; y += cam_info_.height/10) + { + cv::line(distorted_image_, + cv::Point(0, y), cv::Point(cam_info_.width, y), + color, type, thickness); + } + for (size_t x = 0; x <= cam_info_.width; x += cam_info_.width/10) + { + // draw the lines thick so the prorportion of interpolation error is reduced + cv::line(distorted_image_, + cv::Point(x, 0), cv::Point(x, cam_info_.height), + color, type, thickness); + } + + raw_image_ = cv_bridge::CvImage(std_msgs::Header(), "bgr8", + distorted_image_).toImageMsg(); + + // Create raw camera subscriber and publisher + image_transport::ImageTransport it(nh_); + cam_pub_ = it.advertiseCamera(topic_raw_, 1); + } + + ros::NodeHandle nh_; + std::string topic_raw_; + std::string topic_mono_; + std::string topic_rect_; + std::string topic_color_; + std::string topic_rect_color_; + + cv::Mat distorted_image_; + sensor_msgs::ImagePtr raw_image_; + bool has_new_image_; + cv::Mat received_image_; + sensor_msgs::CameraInfo cam_info_; + image_transport::CameraPublisher cam_pub_; + image_transport::Subscriber cam_sub_; + +public: + void imageCallback(const sensor_msgs::ImageConstPtr& msg) + { + cv_bridge::CvImageConstPtr cv_ptr; + try + { + cv_ptr = cv_bridge::toCvShare(msg, sensor_msgs::image_encodings::BGR8); + } + catch (cv_bridge::Exception& e) + { + ROS_FATAL("cv_bridge exception: %s", e.what()); + return; + } + received_image_ = cv_ptr->image.clone(); + has_new_image_ = true; + } + + void publishRaw() + { + has_new_image_ = false; + cam_pub_.publish(*raw_image_, cam_info_); + } +}; + +TEST_F(ImageProcRectifyTest, rectifyTest) +{ + ROS_INFO("In test. Subscribing."); + image_transport::ImageTransport it(nh_); + cam_sub_ = it.subscribe(topic_rect_, 1, &ImageProcRectifyTest::imageCallback, + dynamic_cast(this)); + // Wait for image_proc to be operational + bool wait_for_topic = true; + while (wait_for_topic) + { + // @todo this fails without the additional 0.5 second sleep after the + // publisher comes online, which means on a slower or more heavily + // loaded system it may take longer than 0.5 seconds, and the test + // would hang until the timeout is reached and fail. + if (cam_sub_.getNumPublishers() > 0) + wait_for_topic = false; + ros::Duration(0.5).sleep(); + } + + // All the tests are the same as from + // vision_opencv/image_geometry/test/utest.cpp + // default cam info + + // Just making this number up, maybe ought to be larger + // since a completely different image would be on the order of + // width * height * 255 = 78e6 + const double diff_threshold = 10000.0; + double error; + + // use original cam_info + publishRaw(); + while (!has_new_image_) + { + ros::spinOnce(); + ros::Duration(0.5).sleep(); + } + // Test that rectified image is sufficiently different + // using default distortion + error = cv::norm(distorted_image_, received_image_, cv::NORM_L1); + // Just making this number up, maybe ought to be larger + EXPECT_GT(error, diff_threshold); + + // Test that rectified image is sufficiently different + // using default distortion but with first element zeroed + // out. + sensor_msgs::CameraInfo cam_info_orig = cam_info_; + cam_info_.D[0] = 0.0; + publishRaw(); + while (!has_new_image_) + { + ros::spinOnce(); + ros::Duration(0.5).sleep(); + } + error = cv::norm(distorted_image_, received_image_, cv::NORM_L1); + EXPECT_GT(error, diff_threshold); + + // Test that rectified image is the same using zero distortion + cam_info_.D.assign(cam_info_.D.size(), 0); + publishRaw(); + while (!has_new_image_) + { + ros::spinOnce(); + ros::Duration(0.5).sleep(); + } + error = cv::norm(distorted_image_, received_image_, cv::NORM_L1); + EXPECT_EQ(error, 0); + + + // Test that rectified image is the same using empty distortion + cam_info_.D.clear(); + publishRaw(); + while (!has_new_image_) + { + ros::spinOnce(); + ros::Duration(0.5).sleep(); + } + error = cv::norm(distorted_image_, received_image_, cv::NORM_L1); + + EXPECT_EQ(error, 0); + + // restore the original cam_info for other tests added in the future + cam_info_ = cam_info_orig; +} + +int main(int argc, char** argv) +{ + ros::init(argc, argv, "image_proc_test_rectify"); + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/future pkg/image_pipeline/image_proc/test/test_rectify.xml b/future pkg/image_pipeline/image_proc/test/test_rectify.xml new file mode 100644 index 0000000..f1a451c --- /dev/null +++ b/future pkg/image_pipeline/image_proc/test/test_rectify.xml @@ -0,0 +1,9 @@ + + + + + + + + diff --git a/future pkg/image_pipeline/image_publisher/.DS_Store b/future pkg/image_pipeline/image_publisher/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5546fc1bd0255abc5e805b605b4d4ccb514c4994 GIT binary patch literal 6148 zcmeHK%Wl&^6ur{~a8eZ_fz&R&L1Ga_sY2FnNTf4n6fg?>>k5c(w?ch-MZ<%r@cnLK<<>W8vWOMhrhU3c4{48f z={`LIwkV;LvLchMmY7$;d_W!@(*V3sFyJk;rwiP7CGK^KiBkY(1kb2PBlN~JM4nh? zIi(YdN^kJBlw}z#4^D}+MKZ?RM zYq!6!rAqbEjX<%;TyB%c@zmUSn-K?Q}Mpc4wnKYueMz&34ni-MKTH z)vVQ<>vs+jw3aXE z`9lTSt-{bZ(ZC`?~Hm@UJJISNf{6fg=D6{x6TL!AG+zrX(%NoHge zFbe!v3b5*qyVFHU`fOb&PMozi{17fo#N`Sl1&uzAWk(#vYj9~;vqcB!Xv`I224?;U NNEu9H6!@zOd`_) + * fix 'VideoCapture' undefined symbol error + The following error occured when trying to run image_publisher: + [...]/devel/lib/image_publisher/image_publisher: symbol lookup error: [...]/devel/lib//libimage_publisher.so: undefined symbol: _ZN2cv12VideoCaptureC1Ev + Probably, changes in cv_bridge reducing the OpenCV component dependencies led to the error. See + https://github.com/ros-perception/vision_opencv/commit/8b5bbcbc1ce65734dc600695487909e0c67c1033 + This is fixed by manually finding OpenCV with the required components and adding the dependencies to the library, not just the node. + * add image_publisher opencv 2 compatibility +* Contributors: hannometer + +1.12.22 (2017-12-08) +-------------------- + +1.12.21 (2017-11-05) +-------------------- + +1.12.20 (2017-04-30) +-------------------- +* explicitly cast to std::vector to make gcc6 happy + With gcc6, compiling image_publisher fails with this error: + ``` + /[...]/image_publisher/src/nodelet/image_publisher_nodelet.cpp: In member function 'virtual void image_publisher::ImagePublisherNodelet::onInit()': + /[...]/image_publisher/src/nodelet/image_publisher_nodelet.cpp:180:43: error: ambiguous overload for 'operator=' (operand types are 'sensor_msgs::CameraInfo\_ >::_D_type {aka std::vector}' and 'boost::assign_detail::generic_list') + camera_info\_.D = list_of(0)(0)(0)(0)(0); + ``` + After adding an initial explicit type cast for the assignment, + compiling fails further with: + ``` + | /[...]/image_publisher/src/nodelet/image_publisher_nodelet.cpp: In member function 'virtual void image_publisher::ImagePublisherNodelet::onInit()': + | /[...]/image_publisher/src/nodelet/image_publisher_nodelet.cpp:180:65: error: call of overloaded 'vector(boost::assign_detail::generic_list&)' is ambiguous + | camera_info\_.D = std::vector (list_of(0)(0)(0)(0)(0)); + ``` + Various sources on the internet [1, 2, 3] point to use the + `convert_to_container` method; hence, this commit follows those + suggestions and with that image_publisher compiles with gcc6. + [1] http://stackoverflow.com/questions/16211410/ambiguity-when-using-boostassignlist-of-to-construct-a-stdvector + [2] http://stackoverflow.com/questions/12352692/`ambiguous-call-with-list-of-in-vs2010/12362548#12362548 `_ + [3] http://stackoverflow.com/questions/13285272/using-boostassignlist-of?rq=1 + Signed-off-by: Lukas Bulwahn +* address gcc6 build error + With gcc6, compiling fails with `stdlib.h: No such file or directory`, + as including '-isystem /usr/include' breaks with gcc6, cf., + https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70129. + This commit addresses this issue for this package in the same way + it was addressed in various other ROS packages. A list of related + commits and pull requests is at: + https://github.com/ros/rosdistro/issues/12783 + Signed-off-by: Lukas Bulwahn +* Contributors: Lukas Bulwahn + +1.12.19 (2016-07-24) +-------------------- +* add image_publisher +* Contributors: Kei Okada + +* add image_publisher +* Contributors: Kei Okada diff --git a/future pkg/image_pipeline/image_publisher/CMakeLists.txt b/future pkg/image_pipeline/image_publisher/CMakeLists.txt new file mode 100644 index 0000000..8f99fc6 --- /dev/null +++ b/future pkg/image_pipeline/image_publisher/CMakeLists.txt @@ -0,0 +1,39 @@ +cmake_minimum_required(VERSION 2.8) +project(image_publisher) + +find_package(catkin REQUIRED COMPONENTS cv_bridge dynamic_reconfigure camera_info_manager sensor_msgs image_transport nodelet roscpp) + +set(opencv_2_components core highgui) +set(opencv_3_components core imgcodecs videoio) +find_package(OpenCV REQUIRED COMPONENTS core) +message(STATUS "opencv version ${OpenCV_VERSION}") +if(OpenCV_VERSION VERSION_LESS "3.0.0") + find_package(OpenCV 2 REQUIRED COMPONENTS ${opencv_2_components}) +else() + find_package(OpenCV 4 REQUIRED COMPONENTS ${opencv_3_components}) +endif() + +# generate the dynamic_reconfigure config file +generate_dynamic_reconfigure_options(cfg/ImagePublisher.cfg) + +catkin_package() + +include_directories(${catkin_INCLUDE_DIRS}) + +add_library(${PROJECT_NAME} SHARED src/nodelet/image_publisher_nodelet.cpp) +target_link_libraries(${PROJECT_NAME} ${catkin_LIBRARIES} ${OpenCV_LIBRARIES}) +add_dependencies(${PROJECT_NAME} ${PROJECT_NAME}_gencfg) +install(TARGETS image_publisher + DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +) + +add_executable(image_publisher_exe src/node/image_publisher.cpp) +SET_TARGET_PROPERTIES(image_publisher_exe PROPERTIES OUTPUT_NAME image_publisher) +target_link_libraries(image_publisher_exe ${catkin_LIBRARIES}) + +install(TARGETS image_publisher_exe + DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +) +install(FILES nodelet_plugins.xml + DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} +) diff --git a/future pkg/image_pipeline/image_publisher/cfg/ImagePublisher.cfg b/future pkg/image_pipeline/image_publisher/cfg/ImagePublisher.cfg new file mode 100755 index 0000000..6b17660 --- /dev/null +++ b/future pkg/image_pipeline/image_publisher/cfg/ImagePublisher.cfg @@ -0,0 +1,44 @@ +#! /usr/bin/env python +# Software License Agreement (BSD License) +# +# Copyright (c) 2010, Willow Garage, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of Willow Garage, Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + + +PACKAGE='image_publisher' + +from dynamic_reconfigure.parameter_generator_catkin import * + +gen = ParameterGenerator() +gen.add("frame_id", str_t, 0, "Frame to use for camera image", "camera") +gen.add("publish_rate", double_t, 0, "Rate to publish image", 10, 0.1, 30) +gen.add("camera_info_url", str_t, 0, "Path to camera_info", "") + +exit(gen.generate(PACKAGE, "image_publisher", "ImagePublisher")) diff --git a/future pkg/image_pipeline/image_publisher/nodelet_plugins.xml b/future pkg/image_pipeline/image_publisher/nodelet_plugins.xml new file mode 100644 index 0000000..8d3a82c --- /dev/null +++ b/future pkg/image_pipeline/image_publisher/nodelet_plugins.xml @@ -0,0 +1,7 @@ + + + + Nodelet to publish sensor_msgs/Image + + + diff --git a/future pkg/image_pipeline/image_publisher/package.xml b/future pkg/image_pipeline/image_publisher/package.xml new file mode 100644 index 0000000..e78875a --- /dev/null +++ b/future pkg/image_pipeline/image_publisher/package.xml @@ -0,0 +1,35 @@ + + image_publisher + 1.12.23 + +

+ Contains a node publish an image stream from single image file + or avi motion file. +

+
+ Kei Okada + Vincent Rabaud + BSD + http://ros.org/wiki/image_publisher + + catkin + + cv_bridge + dynamic_reconfigure + camera_info_manager + image_transport + nodelet + roscpp + sensor_msgs + + cv_bridge + dynamic_reconfigure + camera_info_manager + image_transport + nodelet + roscpp + sensor_msgs + + + +
diff --git a/future pkg/image_pipeline/image_publisher/src/.DS_Store b/future pkg/image_pipeline/image_publisher/src/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..e1abf6e34137f5e0789ff11ca26c29f58b09a468 GIT binary patch literal 6148 zcmeHK%}T>S5T317w-vDmL65lz9(t(MpMwaY)`K@8q6ZaCOwd5gN}F1=R`MG920n^U z;N$4b?zYhSbCe=8F#FB!%xtn>!tM?Lh(;$|1E>IiL?tYgu=z&FPx?g)cBF_XXpa3v zT?UB?uul`ROg;nj?TV0sf)Jvy@3)P<)ekLtA+v)xNFai2Ec*5L=%Cxawg1LjaGS*K ztX6v$g{k87%&e4US)MPSdDp7zWq#IaH~iKK^$z^(IvxkRhs#6 z8ntwC97Y&&dK{;r>NZp-4cmGi&p4zk$ys3qt>rFy{)ZU)mg7^_WLEd zw6eCde{pmF(0lAZJ?mwIz@J;og2e&6pfN_uo!3eem0qD|kTr;AWCoZ4W?*6%up5+| zo0vD^7hwjNfk|P2_6G-*(6yKw)LREOx&%O^Yor#m_0=EIp#enKVr~#eP=rZEG^xUt z7{XQ#!la|$weh(@lMX^Z8RxN27PdkWw(t;+4hP{HqI) +#include + +int main(int argc, char **argv) +{ + ros::init(argc, argv, "image_publisher", ros::init_options::AnonymousName); + + if (argc <= 1) { + ROS_ERROR("image_publisher requires filename. Typical command-line usage:\n" + "\t$ rosrun image_publisher image_publisher "); + return 1; + } + + ros::param::set("~filename", argv[1]); + nodelet::Loader manager(false); + nodelet::M_string remappings; + nodelet::V_string my_argv(argv + 1, argv + argc); + my_argv.push_back("--shutdown-on-close"); // Internal + + manager.load(ros::this_node::getName(), "image_publisher/image_publisher", remappings, my_argv); + + ros::spin(); + return 0; +} diff --git a/future pkg/image_pipeline/image_publisher/src/nodelet/image_publisher_nodelet.cpp b/future pkg/image_pipeline/image_publisher/src/nodelet/image_publisher_nodelet.cpp new file mode 100644 index 0000000..5dd2403 --- /dev/null +++ b/future pkg/image_pipeline/image_publisher/src/nodelet/image_publisher_nodelet.cpp @@ -0,0 +1,196 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2014, JSK Lab. +* 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +using namespace boost::assign; +#define CV_LOAD_IMAGE_COLOR cv::IMREAD_COLOR +#define CV_LOAD_IMAGE_GRAYSCALE cv::IMREAD_GRAYSCALE + +namespace image_publisher { +class ImagePublisherNodelet : public nodelet::Nodelet +{ + dynamic_reconfigure::Server srv; + + image_transport::CameraPublisher pub_; + + boost::shared_ptr it_; + ros::NodeHandle nh_; + + cv::VideoCapture cap_; + cv::Mat image_; + int subscriber_count_; + ros::Timer timer_; + + std::string frame_id_; + std::string filename_; + bool flip_image_; + int flip_value_; + sensor_msgs::CameraInfo camera_info_; + + + void reconfigureCallback(image_publisher::ImagePublisherConfig &new_config, uint32_t level) + { + frame_id_ = new_config.frame_id; + + timer_ = nh_.createTimer(ros::Duration(1.0/new_config.publish_rate), &ImagePublisherNodelet::do_work, this); + + camera_info_manager::CameraInfoManager c(nh_); + if ( !new_config.camera_info_url.empty() ) { + try { + c.validateURL(new_config.camera_info_url); + c.loadCameraInfo(new_config.camera_info_url); + camera_info_ = c.getCameraInfo(); + } catch(cv::Exception &e) { + NODELET_ERROR("camera calibration failed to load: %s %s %s %i", e.err.c_str(), e.func.c_str(), e.file.c_str(), e.line); + } + } + + + } + + void do_work(const ros::TimerEvent& event) + { + // Transform the image. + try + { + if ( cap_.isOpened() ) { + if ( ! cap_.read(image_) ) { + cap_.set(cv::CAP_PROP_POS_FRAMES, 0); + } + } + if (flip_image_) + cv::flip(image_, image_, flip_value_); + + sensor_msgs::ImagePtr out_img = cv_bridge::CvImage(std_msgs::Header(), "bgr8", image_).toImageMsg(); + out_img->header.frame_id = frame_id_; + out_img->header.stamp = ros::Time::now(); + camera_info_.header.frame_id = out_img->header.frame_id; + camera_info_.header.stamp = out_img->header.stamp; + + pub_.publish(*out_img, camera_info_); + } + catch (cv::Exception &e) + { + NODELET_ERROR("Image processing error: %s %s %s %i", e.err.c_str(), e.func.c_str(), e.file.c_str(), e.line); + } + } + + void connectCb(const image_transport::SingleSubscriberPublisher& ssp) + { + subscriber_count_++; + } + + void disconnectCb(const image_transport::SingleSubscriberPublisher&) + { + subscriber_count_--; + } + +public: + virtual void onInit() + { + subscriber_count_ = 0; + nh_ = getPrivateNodeHandle(); + it_ = boost::shared_ptr(new image_transport::ImageTransport(nh_)); + pub_ = image_transport::ImageTransport(nh_).advertiseCamera("image_raw", 1); + + nh_.param("filename", filename_, std::string("")); + NODELET_INFO("File name for publishing image is : %s", filename_.c_str()); + try { + image_ = cv::imread(filename_, CV_LOAD_IMAGE_COLOR); + if ( image_.empty() ) { // if filename is motion file or device file + try { // if filename is number + int num = boost::lexical_cast(filename_);//num is 1234798797 + cap_.open(num); + } catch(boost::bad_lexical_cast &) { // if file name is string + cap_.open(filename_); + } + CV_Assert(cap_.isOpened()); + cap_.read(image_); + cap_.set(cv::CAP_PROP_POS_FRAMES, 0); + } + CV_Assert(!image_.empty()); + } + catch (cv::Exception &e) + { + NODELET_ERROR("Failed to load image (%s): %s %s %s %i", filename_.c_str(), e.err.c_str(), e.func.c_str(), e.file.c_str(), e.line); + } + + bool flip_horizontal; + nh_.param("flip_horizontal", flip_horizontal, false); + NODELET_INFO("Flip horizontal image is : %s", ((flip_horizontal)?"true":"false")); + + bool flip_vertical; + nh_.param("flip_vertical", flip_vertical, false); + NODELET_INFO("Flip flip_vertical image is : %s", ((flip_vertical)?"true":"false")); + + // From http://docs.opencv.org/modules/core/doc/operations_on_arrays.html#void flip(InputArray src, OutputArray dst, int flipCode) + // FLIP_HORIZONTAL == 1, FLIP_VERTICAL == 0 or FLIP_BOTH == -1 + flip_image_ = true; + if (flip_horizontal && flip_vertical) + flip_value_ = 0; // flip both, horizontal and vertical + else if (flip_horizontal) + flip_value_ = 1; + else if (flip_vertical) + flip_value_ = -1; + else + flip_image_ = false; + + camera_info_.width = image_.cols; + camera_info_.height = image_.rows; + camera_info_.distortion_model = "plumb_bob"; + camera_info_.D = list_of(0)(0)(0)(0)(0).convert_to_container >(); + camera_info_.K = list_of(1)(0)(camera_info_.width/2)(0)(1)(camera_info_.height/2)(0)(0)(1); + camera_info_.R = list_of(1)(0)(0)(0)(1)(0)(0)(0)(1); + camera_info_.P = list_of(1)(0)(camera_info_.width/2)(0)(0)(1)(camera_info_.height/2)(0)(0)(0)(1)(0); + + timer_ = nh_.createTimer(ros::Duration(1), &ImagePublisherNodelet::do_work, this); + + dynamic_reconfigure::Server::CallbackType f = + boost::bind(&ImagePublisherNodelet::reconfigureCallback, this, _1, _2); + srv.setCallback(f); + } +}; +} +#include +PLUGINLIB_EXPORT_CLASS(image_publisher::ImagePublisherNodelet, nodelet::Nodelet); diff --git a/future pkg/image_pipeline/image_rotate/.tar b/future pkg/image_pipeline/image_rotate/.tar new file mode 100644 index 0000000..5beb888 --- /dev/null +++ b/future pkg/image_pipeline/image_rotate/.tar @@ -0,0 +1,2 @@ +{!!python/unicode 'url': 'https://github.com/ros-gbp/image_pipeline-release/archive/release/kinetic/image_rotate/1.12.23-0.tar.gz', + !!python/unicode 'version': image_pipeline-release-release-kinetic-image_rotate-1.12.23-0} diff --git a/future pkg/image_pipeline/image_rotate/CHANGELOG.rst b/future pkg/image_pipeline/image_rotate/CHANGELOG.rst new file mode 100644 index 0000000..228c9ed --- /dev/null +++ b/future pkg/image_pipeline/image_rotate/CHANGELOG.rst @@ -0,0 +1,93 @@ +1.12.23 (2018-05-10) +-------------------- + +1.12.22 (2017-12-08) +-------------------- + +1.12.21 (2017-11-05) +-------------------- +* [image_rotate] Added TF timeout so that transforms only need to be newer than last frame. (`#293 `_) +* Contributors: mhosmar-cpr + +1.12.20 (2017-04-30) +-------------------- +* Fix CMake warnings about Eigen. +* address gcc6 build error + With gcc6, compiling fails with `stdlib.h: No such file or directory`, + as including '-isystem /usr/include' breaks with gcc6, cf., + https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70129. + This commit addresses this issue for this package in the same way + it was addressed in various other ROS packages. A list of related + commits and pull requests is at: + https://github.com/ros/rosdistro/issues/12783 + Signed-off-by: Lukas Bulwahn +* Contributors: Lukas Bulwahn, Vincent Rabaud + +1.12.19 (2016-07-24) +-------------------- +* Fix frames if it is empty to rotate image +* Contributors: Kentaro Wada + +1.12.18 (2016-07-12) +-------------------- + +1.12.17 (2016-07-11) +-------------------- + +1.12.16 (2016-03-19) +-------------------- +* clean OpenCV dependency in package.xml +* Contributors: Vincent Rabaud + +1.12.15 (2016-01-17) +-------------------- + +1.12.14 (2015-07-22) +-------------------- + +1.12.13 (2015-04-06) +-------------------- + +1.12.12 (2014-12-31) +-------------------- + +1.12.11 (2014-10-26) +-------------------- + +1.12.10 (2014-09-28) +-------------------- + +1.12.9 (2014-09-21) +------------------- + +1.12.8 (2014-08-19) +------------------- + +1.12.6 (2014-07-27) +------------------- + +1.12.4 (2014-04-28) +------------------- + +1.12.3 (2014-04-12) +------------------- + +1.12.2 (2014-04-08) +------------------- +* use NODELET_** macros instead of ROS_** macros +* use getNodeHandle rather than getPrivateNodeHandle +* add executable to load image_rotate/image_rotate nodelet. + add xml file to export nodelet definition. + Conflicts: + image_rotate/package.xml +* make image_rotate nodelet class + Conflicts: + image_rotate/CMakeLists.txt + image_rotate/package.xml + image_rotate/src/nodelet/image_rotate_nodelet.cpp +* move image_rotate.cpp to nodelet directory according to the directory convenstion of image_pipeline +* Contributors: Ryohei Ueda + +1.12.1 (2014-04-06) +------------------- +* replace tf usage by tf2 usage diff --git a/future pkg/image_pipeline/image_rotate/CMakeLists.txt b/future pkg/image_pipeline/image_rotate/CMakeLists.txt new file mode 100644 index 0000000..147b1db --- /dev/null +++ b/future pkg/image_pipeline/image_rotate/CMakeLists.txt @@ -0,0 +1,32 @@ +cmake_minimum_required(VERSION 2.8) +project(image_rotate) + +find_package(catkin REQUIRED COMPONENTS cmake_modules cv_bridge dynamic_reconfigure image_transport nodelet roscpp tf2 tf2_geometry_msgs) + +# generate the dynamic_reconfigure config file +generate_dynamic_reconfigure_options(cfg/ImageRotate.cfg) + +catkin_package() + +find_package(OpenCV REQUIRED core imgproc) + +# add the executable +include_directories(${catkin_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS}) + +add_library(${PROJECT_NAME} SHARED src/nodelet/image_rotate_nodelet.cpp) +target_link_libraries(${PROJECT_NAME} ${catkin_LIBRARIES} ${OpenCV_LIBRARIES}) +add_dependencies(${PROJECT_NAME} ${PROJECT_NAME}_gencfg) +install(TARGETS image_rotate + DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +) + +add_executable(image_rotate_exe src/node/image_rotate.cpp) +SET_TARGET_PROPERTIES(image_rotate_exe PROPERTIES OUTPUT_NAME image_rotate) +target_link_libraries(image_rotate_exe ${catkin_LIBRARIES} ${OpenCV_LIBRARIES}) + +install(TARGETS image_rotate_exe + DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +) +install(FILES nodelet_plugins.xml + DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} +) diff --git a/future pkg/image_pipeline/image_rotate/cfg/ImageRotate.cfg b/future pkg/image_pipeline/image_rotate/cfg/ImageRotate.cfg new file mode 100755 index 0000000..892b231 --- /dev/null +++ b/future pkg/image_pipeline/image_rotate/cfg/ImageRotate.cfg @@ -0,0 +1,59 @@ +#! /usr/bin/env python +# Software License Agreement (BSD License) +# +# Copyright (c) 2010, Willow Garage, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of Willow Garage, Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + + +PACKAGE='image_rotate' + +from dynamic_reconfigure.parameter_generator_catkin import * + +gen = ParameterGenerator() +gen.add("target_frame_id", str_t, 0, "Frame in which the target vector is specified. Empty means the input frame.", "base_link") +gen.add("target_x", double_t, 0, "X coordinate of the target vector", 0, -10, 10) +gen.add("target_y", double_t, 0, "Y coordinate of the target vector", 0, -10, 10) +gen.add("target_z", double_t, 0, "Z coordinate of the target vector", 1, -10, 10) + +gen.add("source_frame_id", str_t, 0, "Frame in which the source vector is specified. Empty means the input frame.", "") +gen.add("source_x", double_t, 0, "X coordinate of the direction the target should be aligned with.", 0, -10, 10) +gen.add("source_y", double_t, 0, "Y coordinate of the direction the target should be aligned with.", -1, -10, 10) +gen.add("source_z", double_t, 0, "Z coordinate of the direction the target should be aligned with.", 0, -10, 10) + +gen.add("output_frame_id", str_t, 0, "Frame to publish for the image's new orientation. Empty means add '_rotated' suffix to the image frame.", "") +gen.add("input_frame_id", str_t, 0, "Frame to use for the original camera image. Empty means that the frame in the image or camera_info should be used depending on use_camera_info.", "") + +gen.add("use_camera_info", bool_t, 0, "Indicates that the camera_info topic should be subscribed to to get the default input_frame_id. Otherwise the frame from the image message will be used.", True) + +gen.add("max_angular_rate", double_t, 0, "Limits the rate at which the image can rotate (rad/s). Zero means no limit.", 10, 0, 100) + +gen.add("output_image_size", double_t, 0, "Size of the output image as a function of the input image size. Can be varied continuously between the following special settings: 0 ensures no black ever appears, 1 is small image dimension, 2 is large image dimension, 3 is image diagonal.", 2, 0, 3) + +exit(gen.generate(PACKAGE, "image_rotate", "ImageRotate")) diff --git a/future pkg/image_pipeline/image_rotate/mainpage.dox b/future pkg/image_pipeline/image_rotate/mainpage.dox new file mode 100644 index 0000000..43b15c9 --- /dev/null +++ b/future pkg/image_pipeline/image_rotate/mainpage.dox @@ -0,0 +1,26 @@ +/** +\mainpage +\htmlinclude manifest.html + +\b image_rotate is ... + + + + +\section codeapi Code API + + + + +*/ diff --git a/future pkg/image_pipeline/image_rotate/nodelet_plugins.xml b/future pkg/image_pipeline/image_rotate/nodelet_plugins.xml new file mode 100644 index 0000000..862f136 --- /dev/null +++ b/future pkg/image_pipeline/image_rotate/nodelet_plugins.xml @@ -0,0 +1,7 @@ + + + + Nodelet to rotate sensor_msgs/Image + + + diff --git a/future pkg/image_pipeline/image_rotate/package.xml b/future pkg/image_pipeline/image_rotate/package.xml new file mode 100644 index 0000000..a7fbb74 --- /dev/null +++ b/future pkg/image_pipeline/image_rotate/package.xml @@ -0,0 +1,55 @@ + + image_rotate + 1.12.23 + +

+ Contains a node that rotates an image stream in a way that minimizes + the angle between a vector in some arbitrary frame and a vector in the + camera frame. The frame of the outgoing image is published by the node. +

+

+ This node is intended to allow camera images to be visualized in an + orientation that is more intuitive than the hardware-constrained + orientation of the physical camera. This is particularly helpful, for + example, to show images from the PR2's forearm cameras with a + consistent up direction, despite the fact that the forearms need to + rotate in arbitrary ways during manipulation. +

+

+ It is not recommended to use the output from this node for further + computation, as it interpolates the source image, introduces black + borders, and does not output a camera_info. +

+
+ Blaise Gassend + Vincent Rabaud + BSD + http://ros.org/wiki/image_rotate + + catkin + + rostest + + cmake_modules + cv_bridge + dynamic_reconfigure + geometry_msgs + image_transport + nodelet + roscpp + tf2 + tf2_geometry_msgs + tf2_ros + + cv_bridge + dynamic_reconfigure + image_transport + nodelet + roscpp + tf2 + tf2_geometry_msgs + tf2_ros + + + +
diff --git a/future pkg/image_pipeline/image_rotate/src/node/image_rotate.cpp b/future pkg/image_pipeline/image_rotate/src/node/image_rotate.cpp new file mode 100644 index 0000000..d7e573b --- /dev/null +++ b/future pkg/image_pipeline/image_rotate/src/node/image_rotate.cpp @@ -0,0 +1,54 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include +#include + +int main(int argc, char **argv) +{ + ros::init(argc, argv, "image_rotate", ros::init_options::AnonymousName); + if (ros::names::remap("image") == "image") { + ROS_WARN("Topic 'image' has not been remapped! Typical command-line usage:\n" + "\t$ rosrun image_rotate image_rotate image:= [transport]"); + } + + nodelet::Loader manager(false); + nodelet::M_string remappings; + nodelet::V_string my_argv(argv + 1, argv + argc); + my_argv.push_back("--shutdown-on-close"); // Internal + + manager.load(ros::this_node::getName(), "image_rotate/image_rotate", remappings, my_argv); + + ros::spin(); + return 0; +} diff --git a/future pkg/image_pipeline/image_rotate/src/nodelet/image_rotate_nodelet.cpp b/future pkg/image_pipeline/image_rotate/src/nodelet/image_rotate_nodelet.cpp new file mode 100644 index 0000000..066ef4e --- /dev/null +++ b/future pkg/image_pipeline/image_rotate/src/nodelet/image_rotate_nodelet.cpp @@ -0,0 +1,281 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2014, JSK Lab. +* 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ + +/******************************************************************** +* image_rotate_nodelet.cpp +* this is a forked version of image_rotate. +* this image_rotate_nodelet supports: +* 1) nodelet +* 2) tf and tf2 +*********************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace image_rotate { +class ImageRotateNodelet : public nodelet::Nodelet +{ + tf2_ros::Buffer tf_buffer_; + boost::shared_ptr tf_sub_; + tf2_ros::TransformBroadcaster tf_pub_; + + image_rotate::ImageRotateConfig config_; + dynamic_reconfigure::Server srv; + + image_transport::Publisher img_pub_; + image_transport::Subscriber img_sub_; + image_transport::CameraSubscriber cam_sub_; + + geometry_msgs::Vector3Stamped target_vector_; + geometry_msgs::Vector3Stamped source_vector_; + + boost::shared_ptr it_; + ros::NodeHandle nh_; + + int subscriber_count_; + double angle_; + ros::Time prev_stamp_; + + void reconfigureCallback(image_rotate::ImageRotateConfig &new_config, uint32_t level) + { + config_ = new_config; + target_vector_.vector.x = config_.target_x; + target_vector_.vector.y = config_.target_y; + target_vector_.vector.z = config_.target_z; + + source_vector_.vector.x = config_.source_x; + source_vector_.vector.y = config_.source_y; + source_vector_.vector.z = config_.source_z; + if (subscriber_count_) + { // @todo Could do this without an interruption at some point. + unsubscribe(); + subscribe(); + } + } + + const std::string &frameWithDefault(const std::string &frame, const std::string &image_frame) + { + if (frame.empty()) + return image_frame; + return frame; + } + + void imageCallbackWithInfo(const sensor_msgs::ImageConstPtr& msg, const sensor_msgs::CameraInfoConstPtr& cam_info) + { + do_work(msg, cam_info->header.frame_id); + } + + void imageCallback(const sensor_msgs::ImageConstPtr& msg) + { + do_work(msg, msg->header.frame_id); + } + + void do_work(const sensor_msgs::ImageConstPtr& msg, const std::string input_frame_from_msg) + { + try + { + std::string input_frame_id = frameWithDefault(config_.input_frame_id, input_frame_from_msg); + std::string target_frame_id = frameWithDefault(config_.target_frame_id, input_frame_from_msg); + std::string source_frame_id = frameWithDefault(config_.source_frame_id, input_frame_from_msg); + + // Transform the target vector into the image frame. + target_vector_.header.stamp = msg->header.stamp; + target_vector_.header.frame_id = target_frame_id; + geometry_msgs::Vector3Stamped target_vector_transformed; + geometry_msgs::TransformStamped transform = tf_buffer_.lookupTransform(target_frame_id, input_frame_id, msg->header.stamp, msg->header.stamp-prev_stamp_); + tf2::doTransform(target_vector_, target_vector_transformed, transform); + + // Transform the source vector into the image frame. + source_vector_.header.stamp = msg->header.stamp; + source_vector_.header.frame_id = source_frame_id; + geometry_msgs::Vector3Stamped source_vector_transformed; + transform = tf_buffer_.lookupTransform(source_frame_id, input_frame_id, msg->header.stamp, msg->header.stamp-prev_stamp_); + tf2::doTransform(source_vector_, source_vector_transformed, transform); + + //NODELET_INFO("target: %f %f %f", target_vector_.x(), target_vector_.y(), target_vector_.z()); + //NODELET_INFO("target_transformed: %f %f %f", target_vector_transformed.x(), target_vector_transformed.y(), target_vector_transformed.z()); + //NODELET_INFO("source: %f %f %f", source_vector_.x(), source_vector_.y(), source_vector_.z()); + //NODELET_INFO("source_transformed: %f %f %f", source_vector_transformed.x(), source_vector_transformed.y(), source_vector_transformed.z()); + + // Calculate the angle of the rotation. + double angle = angle_; + if ((target_vector_transformed.vector.x != 0 || target_vector_transformed.vector.y != 0) && + (source_vector_transformed.vector.x != 0 || source_vector_transformed.vector.y != 0)) + { + angle = atan2(target_vector_transformed.vector.y, target_vector_transformed.vector.x); + angle -= atan2(source_vector_transformed.vector.y, source_vector_transformed.vector.x); + } + + // Rate limit the rotation. + if (config_.max_angular_rate == 0) + angle_ = angle; + else + { + double delta = fmod(angle - angle_, 2.0 * M_PI); + if (delta > M_PI) + delta -= 2.0 * M_PI; + else if (delta < - M_PI) + delta += 2.0 * M_PI; + + double max_delta = config_.max_angular_rate * (msg->header.stamp - prev_stamp_).toSec(); + if (delta > max_delta) + delta = max_delta; + else if (delta < -max_delta) + delta = - max_delta; + + angle_ += delta; + } + angle_ = fmod(angle_, 2.0 * M_PI); + } + catch (tf2::TransformException &e) + { + NODELET_ERROR("Transform error: %s", e.what()); + } + + //NODELET_INFO("angle: %f", 180 * angle_ / M_PI); + + // Publish the transform. + geometry_msgs::TransformStamped transform; + transform.transform.translation.x = 0; + transform.transform.translation.y = 0; + transform.transform.translation.z = 0; + tf2::convert(tf2::Quaternion(tf2::Vector3(0.0, 0.0, 1.0), angle_), transform.transform.rotation); + transform.header.frame_id = msg->header.frame_id; + transform.child_frame_id = frameWithDefault(config_.output_frame_id, msg->header.frame_id + "_rotated"); + transform.header.stamp = msg->header.stamp; + tf_pub_.sendTransform(transform); + + // Transform the image. + try + { + // Convert the image into something opencv can handle. + cv::Mat in_image = cv_bridge::toCvShare(msg, msg->encoding)->image; + + // Compute the output image size. + int max_dim = in_image.cols > in_image.rows ? in_image.cols : in_image.rows; + int min_dim = in_image.cols < in_image.rows ? in_image.cols : in_image.rows; + int noblack_dim = min_dim / sqrt(2); + int diag_dim = sqrt(in_image.cols*in_image.cols + in_image.rows*in_image.rows); + int out_size; + int candidates[] = { noblack_dim, min_dim, max_dim, diag_dim, diag_dim }; // diag_dim repeated to simplify limit case. + int step = config_.output_image_size; + out_size = candidates[step] + (candidates[step + 1] - candidates[step]) * (config_.output_image_size - step); + //NODELET_INFO("out_size: %d", out_size); + + // Compute the rotation matrix. + cv::Mat rot_matrix = cv::getRotationMatrix2D(cv::Point2f(in_image.cols / 2.0, in_image.rows / 2.0), 180 * angle_ / M_PI, 1); + cv::Mat translation = rot_matrix.col(2); + rot_matrix.at(0, 2) += (out_size - in_image.cols) / 2.0; + rot_matrix.at(1, 2) += (out_size - in_image.rows) / 2.0; + + // Do the rotation + cv::Mat out_image; + cv::warpAffine(in_image, out_image, rot_matrix, cv::Size(out_size, out_size)); + + // Publish the image. + sensor_msgs::Image::Ptr out_img = cv_bridge::CvImage(msg->header, msg->encoding, out_image).toImageMsg(); + out_img->header.frame_id = transform.child_frame_id; + img_pub_.publish(out_img); + } + catch (cv::Exception &e) + { + NODELET_ERROR("Image processing error: %s %s %s %i", e.err.c_str(), e.func.c_str(), e.file.c_str(), e.line); + } + + prev_stamp_ = msg->header.stamp; + } + + void subscribe() + { + NODELET_DEBUG("Subscribing to image topic."); + if (config_.use_camera_info && config_.input_frame_id.empty()) + cam_sub_ = it_->subscribeCamera("image", 3, &ImageRotateNodelet::imageCallbackWithInfo, this); + else + img_sub_ = it_->subscribe("image", 3, &ImageRotateNodelet::imageCallback, this); + } + + void unsubscribe() + { + NODELET_DEBUG("Unsubscribing from image topic."); + img_sub_.shutdown(); + cam_sub_.shutdown(); + } + + void connectCb(const image_transport::SingleSubscriberPublisher& ssp) + { + if (subscriber_count_++ == 0) { + subscribe(); + } + } + + void disconnectCb(const image_transport::SingleSubscriberPublisher&) + { + subscriber_count_--; + if (subscriber_count_ == 0) { + unsubscribe(); + } + } + +public: + virtual void onInit() + { + nh_ = getNodeHandle(); + it_ = boost::shared_ptr(new image_transport::ImageTransport(nh_)); + subscriber_count_ = 0; + angle_ = 0; + prev_stamp_ = ros::Time(0, 0); + tf_sub_.reset(new tf2_ros::TransformListener(tf_buffer_)); + image_transport::SubscriberStatusCallback connect_cb = boost::bind(&ImageRotateNodelet::connectCb, this, _1); + image_transport::SubscriberStatusCallback disconnect_cb = boost::bind(&ImageRotateNodelet::disconnectCb, this, _1); + img_pub_ = image_transport::ImageTransport(ros::NodeHandle(nh_, "rotated")).advertise("image", 1, connect_cb, disconnect_cb); + + dynamic_reconfigure::Server::CallbackType f = + boost::bind(&ImageRotateNodelet::reconfigureCallback, this, _1, _2); + srv.setCallback(f); + } +}; +} +#include +PLUGINLIB_EXPORT_CLASS(image_rotate::ImageRotateNodelet, nodelet::Nodelet); diff --git a/future pkg/image_pipeline/image_view/.tar b/future pkg/image_pipeline/image_view/.tar new file mode 100644 index 0000000..22dee0b --- /dev/null +++ b/future pkg/image_pipeline/image_view/.tar @@ -0,0 +1,2 @@ +{!!python/unicode 'url': 'https://github.com/ros-gbp/image_pipeline-release/archive/release/kinetic/image_view/1.12.23-0.tar.gz', + !!python/unicode 'version': image_pipeline-release-release-kinetic-image_view-1.12.23-0} diff --git a/future pkg/image_pipeline/image_view/CHANGELOG.rst b/future pkg/image_pipeline/image_view/CHANGELOG.rst new file mode 100644 index 0000000..f5756fa --- /dev/null +++ b/future pkg/image_pipeline/image_view/CHANGELOG.rst @@ -0,0 +1,188 @@ +1.12.23 (2018-05-10) +-------------------- + +1.12.22 (2017-12-08) +-------------------- + +1.12.21 (2017-11-05) +-------------------- +* call namedWindow from same thread as imshow, need waitKay, now cvStartWindowThreads is null funciton on window_QT.h (`#279 `_) +* Contributors: Kei Okada + +1.12.20 (2017-04-30) +-------------------- +* DisparityViewNodelet: fixed freeze (`#244 `_) +* launch image view with a predefined window size (`#257 `_) +* Remove python-opencv run_depend for image_view (`#270 `_) + The `python-opencv` dependency pulls in the system OpenCV v2.4 which is + not required since the `image_view` package depends on `cv_bridge` which + pulls in `opencv3` and `opencv3` provides the python library that + `image_view` can use. +* Fix encoding error message (`#253 `_) + * Fix encoding error message + * Update image_saver.cpp + Allow compilation on older compilers +* Including stereo_msgs dep fixes `#248 `_ (`#249 `_) +* Add no gui mode to just visualize & publish with image_view (`#241 `_) +* stere_view: fixed empty left, right, disparity windows with opencv3 +* Apply value scaling to depth/float image with min/max image value + If min/max image value is specified we just use it, and if not, + - 32FC1: we assume depth image with meter metric, and 10[m] as the max range. + - 16UC1: we assume depth image with milimeter metric, and 10 * 1000[mm] as the max range. +* Depends on cv_bridge 1.11.13 for CvtColorForDisplayOptions + Close `#238 `_ +* fix doc jobs + This is a proper fix for `#233 `_ +* address gcc6 build error + With gcc6, compiling fails with `stdlib.h: No such file or directory`, + as including '-isystem /usr/include' breaks with gcc6, cf., + https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70129. + This commit addresses this issue for this package in the same way + it was addressed in various other ROS packages. A list of related + commits and pull requests is at: + https://github.com/ros/rosdistro/issues/12783 + Signed-off-by: Lukas Bulwahn +* Contributors: Christopher Wecht, Kartik Mohta, Kei Okada, Kentaro Wada, Lukas Bulwahn, Leonard Gerard, Vincent Rabaud, cwecht, mryellow + +1.12.19 (2016-07-24) +-------------------- +* Add colormap option in video_recorder +* Merge pull request `#203 `_ from wkentaro/video-recorder-timestamp + [image_view] Stamped video output filename for video recorder +* bump version requirement for cv_bridge dep + Closes `#215 `_ +* Request for saving image with start/end two triggers +* Stamped video output filename + - _filename:=output.avi _stamped_filename:=false -> output.avi + - _filename:=_out.avi _stamped_filename:=true -> 1466299931.584632829_out.avi + - _filename:=$HOME/.ros/.avi _stamped_filename:=true -> /home/ubuntu/.ros/1466299931.584632829.avi +* Revert max_depth_range to default value for cvtColorForDisplay +* Contributors: Kentaro Wada, Vincent Rabaud + +1.12.18 (2016-07-12) +-------------------- +* Use image_transport::Subscriber aside from ros::Subscriber +* Refactor: Remove subscription of camera_info in video_recorder +* Add colormap options for displaying image topic +* Use CvtColorForDisplayOptions for cvtColorForDisplay +* Contributors: Kentaro Wada, Vincent Rabaud + +1.12.17 (2016-07-11) +-------------------- +* Fix timestamp to get correct fps in video_recorder +* Get correct fps in video_recorder.cpp +* Do dynamic scaling for float images +* Contributors: Kentaro Wada + +1.12.16 (2016-03-19) +-------------------- +* Remove code for roslib on .cfg files + Closes `#185 `_ +* add cv::waitKey for opencv3 installed from source to fix freezing issue +* when no image is saved, do not save camera info + When the images are not recorded because "save_all_image" is false and "save_image_service" is false, the frame count should not be incremented and the camera info should not be written to disk. +* Add std_srvs to catkin find_package() +* Contributors: Jeremy Kerfs, Jochen Sprickerhof, Kentaro Wada, Krishneel + +1.12.15 (2016-01-17) +-------------------- +* simplify the OpenCV dependency +* [image_view] Configure do_dynamic_scaling param with dynamic_reconfigure +* [image_view] Scale 16UC1 depth image +* fix compilation +* Extract images which are synchronized with message_filters +* [image_view] Show full path when failed to save image +* [image_view] Enable to specify transport with arg +* [image_view] feedback: no need threading for callback +* [image_view/image_view] Make as a node +* Added sensor_msgs::Image conversion to cv::Mat from rqt_image_view in + order to be able to create videos from kinect depth images (cv_bridge + currently doesn't support 16UC1 image encoding). + Code adapted from: + https://github.com/ros-visualization/rqt_common_plugins/blob/groovy-devel/rqt_image_view/src/rqt_image_view/image_view.cpp +* simplify OpenCV3 conversion +* use the color conversion for display from cv_bridge +* Contributors: Carlos Costa, Kentaro Wada, Vincent Rabaud + +1.12.14 (2015-07-22) +-------------------- +* reduce the differences between OpenCV2 and 3 +* do not build GUIs on Android + This fixes `#137 `_ +* Contributors: Vincent Rabaud + +1.12.13 (2015-04-06) +-------------------- + +1.12.12 (2014-12-31) +-------------------- +* Convert function to inline to avoid duplicates with image_transport +* Revert "remove GTK dependency" + This reverts commit a6e15e796a40385fbbf8da05966aa47d179dcb46. + Conflicts: + image_view/CMakeLists.txt + image_view/src/nodelets/disparity_nodelet.cpp + image_view/src/nodes/stereo_view.cpp +* Revert "make sure waitKey is called after imshow" + This reverts commit d13e3ed6af819459bca221ece779964a74beefac. +* Revert "brings back window_thread" + This reverts commit 41a655e8e99910c13a3e7f1ebfdd083207cef76f. +* Contributors: Gary Servin, Vincent Rabaud + +1.12.11 (2014-10-26) +-------------------- +* brings back window_thread + This fixes `#102 `_ fully +* small optimizations +* add the image_transport parameter +* Contributors: Vincent Rabaud + +1.12.10 (2014-09-28) +-------------------- + +1.12.9 (2014-09-21) +------------------- +* get code to compile with OpenCV3 + fixes `#96 `_ +* Contributors: Vincent Rabaud + +1.12.8 (2014-08-19) +------------------- + +1.12.6 (2014-07-27) +------------------- +* make sure waitKey is called after imshow +* remove GTK dependency +* small speedups +* Contributors: Vincent Rabaud + +1.12.5 (2014-05-11) +------------------- +* image_view: Add depend on gtk2 +* Contributors: Scott K Logan + +1.12.4 (2014-04-28) +------------------- +* fixes `#65 `_ +* Contributors: Vincent Rabaud + +1.12.3 (2014-04-12) +------------------- + +1.12.2 (2014-04-08) +------------------- + +1.12.1 (2014-04-06) +------------------- +* get proper opencv dependency +* Contributors: Vincent Rabaud + +1.11.7 (2014-03-28) +------------------- +* Added requirement for core. +* Contributors: Jonathan J Hunt + +1.11.3 (2013-10-06 20:21:55 +0100) +---------------------------------- +- #41: allow image_saver to save image topics +- #40: use proper download URL diff --git a/future pkg/image_pipeline/image_view/CMakeLists.txt b/future pkg/image_pipeline/image_view/CMakeLists.txt new file mode 100644 index 0000000..0b98148 --- /dev/null +++ b/future pkg/image_pipeline/image_view/CMakeLists.txt @@ -0,0 +1,90 @@ +cmake_minimum_required(VERSION 2.8) +project(image_view) + +find_package(catkin REQUIRED COMPONENTS camera_calibration_parsers cv_bridge dynamic_reconfigure image_transport message_filters message_generation nodelet rosconsole roscpp std_srvs stereo_msgs) +generate_dynamic_reconfigure_options(cfg/ImageView.cfg) + +catkin_package(CATKIN_DEPENDS dynamic_reconfigure) +find_package(Boost REQUIRED COMPONENTS signals thread) +find_package(OpenCV REQUIRED) + +include_directories(${Boost_INCLUDE_DIRS} + ${catkin_INCLUDE_DIRS} + ${OpenCV_INCLUDE_DIRS} +) + +# Extra tools +add_executable(extract_images src/nodes/extract_images.cpp) +target_link_libraries(extract_images ${catkin_LIBRARIES} + ${OpenCV_LIBRARIES} +) + +add_executable(image_saver src/nodes/image_saver.cpp) +target_link_libraries(image_saver ${catkin_LIBRARIES} + ${OpenCV_LIBRARIES} +) + +add_executable(video_recorder src/nodes/video_recorder.cpp) +target_link_libraries(video_recorder ${catkin_LIBRARIES} + ${OpenCV_LIBRARIES} +) + +install(TARGETS extract_images image_saver video_recorder + DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +) + +# Deal with the GUI's +if(ANDROID) + return() +endif() + +find_package(GTK2) +add_definitions(-DHAVE_GTK) +include_directories(${GTK2_INCLUDE_DIRS}) + +# Nodelet library +add_library(image_view src/nodelets/image_nodelet.cpp src/nodelets/disparity_nodelet.cpp src/nodelets/window_thread.cpp) +target_link_libraries(image_view ${catkin_LIBRARIES} + ${GTK_LIBRARIES} + ${GTK2_LIBRARIES} + ${OpenCV_LIBRARIES} + ${Boost_LIBRARIES} +) +install(TARGETS image_view + DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +) + +# Image viewers +add_executable(image_view_exe src/nodes/image_view.cpp) +add_dependencies(image_view_exe ${PROJECT_NAME}_gencfg) +SET_TARGET_PROPERTIES(image_view_exe PROPERTIES OUTPUT_NAME image_view) +target_link_libraries(image_view_exe ${catkin_LIBRARIES} + ${OpenCV_LIBRARIES} + ${Boost_LIBRARIES} +) + +add_executable(disparity_view src/nodes/disparity_view.cpp) +target_link_libraries(disparity_view ${catkin_LIBRARIES} + ${OpenCV_LIBRARIES} +) + +add_executable(stereo_view src/nodes/stereo_view.cpp) +target_link_libraries(stereo_view ${Boost_LIBRARIES} + ${catkin_LIBRARIES} + ${GTK_LIBRARIES} + ${GTK2_LIBRARIES} + ${OpenCV_LIBRARIES} +) + +install(TARGETS disparity_view image_view_exe stereo_view + DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +) +install(FILES nodelet_plugins.xml + DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} +) + +# Python programs +catkin_install_python( + PROGRAMS scripts/extract_images_sync + DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +) diff --git a/future pkg/image_pipeline/image_view/cfg/ImageView.cfg b/future pkg/image_pipeline/image_view/cfg/ImageView.cfg new file mode 100755 index 0000000..8cc0888 --- /dev/null +++ b/future pkg/image_pipeline/image_view/cfg/ImageView.cfg @@ -0,0 +1,29 @@ +#! /usr/bin/env python + +PACKAGE='image_view' +from dynamic_reconfigure.parameter_generator_catkin import * + +gen = ParameterGenerator() + +edit_method_colormap = gen.enum([ + gen.const("NO_COLORMAP", int_t, -1, "NO_COLORMAP"), + gen.const("AUTUMN", int_t, 0, "COLORMAP_AUTUMN"), + gen.const("BONE", int_t, 1, "COLORMAP_BONE"), + gen.const("JET", int_t, 2, "COLORMAP_JET"), + gen.const("WINTER", int_t, 3, "COLORMAP_WINTER"), + gen.const("RAINBOW", int_t, 4, "COLORMAP_RAINBOW"), + gen.const("OCEAN", int_t, 5, "COLORMAP_OCEAN"), + gen.const("SUMMER", int_t, 6, "COLORMAP_SUMMER"), + gen.const("SPRING", int_t, 7, "COLORMAP_SPRING"), + gen.const("COOL", int_t, 8, "COLORMAP_COOL"), + gen.const("HSV", int_t, 9, "COLORMAP_HSV"), + gen.const("PINK", int_t, 10, "COLORMAP_PINK"), + gen.const("HOT", int_t, 11, "COLORMAP_HOT"), +], "colormap") + +gen.add('do_dynamic_scaling', bool_t, 0, 'Do dynamic scaling about pixel values or not', False) +gen.add('colormap', int_t, 0, "colormap", -1, -1, 11, edit_method=edit_method_colormap); +gen.add('min_image_value', double_t, 0, "Minimum image value for scaling depth/float image.", default=0, min=0); +gen.add('max_image_value', double_t, 0, "Maximum image value for scaling depth/float image.", default=0, min=0); + +exit(gen.generate(PACKAGE, 'image_view', 'ImageView')) diff --git a/future pkg/image_pipeline/image_view/mainpage.dox b/future pkg/image_pipeline/image_view/mainpage.dox new file mode 100644 index 0000000..87cfb37 --- /dev/null +++ b/future pkg/image_pipeline/image_view/mainpage.dox @@ -0,0 +1,11 @@ +/** +@mainpage image_view + +@htmlinclude manifest.html + +@b image_view is a simple utility for viewing an image topic. For usage see +http://www.ros.org/wiki/image_view. + +Currently this package has no public code API. + +*/ diff --git a/future pkg/image_pipeline/image_view/nodelet_plugins.xml b/future pkg/image_pipeline/image_view/nodelet_plugins.xml new file mode 100644 index 0000000..b6bd0fc --- /dev/null +++ b/future pkg/image_pipeline/image_view/nodelet_plugins.xml @@ -0,0 +1,11 @@ + + + + Nodelet to view a sensor_msgs/Image topic + + + + Nodelet to view a stereo_msgs/DisparityImage topic + + + diff --git a/future pkg/image_pipeline/image_view/package.xml b/future pkg/image_pipeline/image_view/package.xml new file mode 100644 index 0000000..3463732 --- /dev/null +++ b/future pkg/image_pipeline/image_view/package.xml @@ -0,0 +1,46 @@ + + image_view + 1.12.23 + + A simple viewer for ROS image topics. Includes a specialized viewer + for stereo + disparity images. + + Patrick Mihelich + Vincent Rabaud + BSD + http://www.ros.org/wiki/image_view + + + + + + + catkin + + rostest + + camera_calibration_parsers + cv_bridge + dynamic_reconfigure + gtk2 + image_transport + message_filters + message_generation + nodelet + rosconsole + roscpp + sensor_msgs + std_srvs + stereo_msgs + + camera_calibration_parsers + cv_bridge + dynamic_reconfigure + gtk2 + image_transport + message_filters + nodelet + rosconsole + roscpp + std_srvs + diff --git a/future pkg/image_pipeline/image_view/rosdoc.yaml b/future pkg/image_pipeline/image_view/rosdoc.yaml new file mode 100644 index 0000000..976fdf0 --- /dev/null +++ b/future pkg/image_pipeline/image_view/rosdoc.yaml @@ -0,0 +1,4 @@ + - builder: doxygen + name: C++ API + output_dir: c++ + file_patterns: '*.c *.cpp *.h *.cc *.hh *.dox' diff --git a/future pkg/image_pipeline/image_view/scripts/extract_images_sync b/future pkg/image_pipeline/image_view/scripts/extract_images_sync new file mode 100755 index 0000000..cd86ddd --- /dev/null +++ b/future pkg/image_pipeline/image_view/scripts/extract_images_sync @@ -0,0 +1,99 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Software License Agreement (BSD License) +# +# Copyright (c) 2015, Willow Garage, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of the Willow Garage nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +"""Save images of multiple topics with timestamp synchronization. + +Usage: rosrun image_view extract_images_sync _inputs:='[, ]' +""" + +import sys + +import cv2 + +import cv_bridge +import message_filters +import rospy +from sensor_msgs.msg import Image + + +class ExtractImagesSync(object): + + def __init__(self): + self.seq = 0 + self.fname_fmt = rospy.get_param( + '~filename_format', 'frame%04i_%i.jpg') + self.do_dynamic_scaling = rospy.get_param( + '~do_dynamic_scaling', False) + img_topics = rospy.get_param('~inputs', None) + if img_topics is None: + rospy.logwarn("""\ +extract_images_sync: rosparam '~inputs' has not been specified! \ +Typical command-line usage: +\t$ rosrun image_view extract_images_sync _inputs:= +\t$ rosrun image_view extract_images_sync \ +_inputs:='[, ]'""") + sys.exit(1) + if not isinstance(img_topics, list): + img_topics = [img_topics] + subs = [] + for t in img_topics: + subs.append(message_filters.Subscriber(t, Image)) + if rospy.get_param('~approximate_sync', False): + sync = message_filters.ApproximateTimeSynchronizer( + subs, queue_size=100, slop=.1) + else: + sync = message_filters.TimeSynchronizer( + subs, queue_size=100) + sync.registerCallback(self.save) + + def save(self, *imgmsgs): + seq = self.seq + bridge = cv_bridge.CvBridge() + for i, imgmsg in enumerate(imgmsgs): + img = bridge.imgmsg_to_cv2(imgmsg) + channels = img.shape[2] if img.ndim == 3 else 1 + encoding_in = bridge.dtype_with_channels_to_cvtype2( + img.dtype, channels) + img = cv_bridge.cvtColorForDisplay( + img, encoding_in=encoding_in, encoding_out='', + do_dynamic_scaling=self.do_dynamic_scaling) + fname = self.fname_fmt % (seq, i) + print('Save image as {0}'.format(fname)) + cv2.imwrite(fname, img) + self.seq = seq + 1 + + +if __name__ == '__main__': + rospy.init_node('extract_images_sync') + extractor = ExtractImagesSync() + rospy.spin() diff --git a/future pkg/image_pipeline/image_view/src/nodelets/disparity_nodelet.cpp b/future pkg/image_pipeline/image_view/src/nodelets/disparity_nodelet.cpp new file mode 100644 index 0000000..e177a87 --- /dev/null +++ b/future pkg/image_pipeline/image_view/src/nodelets/disparity_nodelet.cpp @@ -0,0 +1,439 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include +#include +#include +#include +#include +#include "window_thread.h" + +#ifdef HAVE_GTK +#include + +// Platform-specific workaround for #3026: image_view doesn't close when +// closing image window. On platforms using GTK+ we connect this to the +// window's "destroy" event so that image_view exits. +static void destroyNode(GtkWidget *widget, gpointer data) +{ + exit(0); +} + +static void destroyNodelet(GtkWidget *widget, gpointer data) +{ + // We can't actually unload the nodelet from here, but we can at least + // unsubscribe from the image topic. + reinterpret_cast(data)->shutdown(); +} +#endif + + +namespace image_view { + +class DisparityNodelet : public nodelet::Nodelet +{ + // colormap for disparities, RGB order + static unsigned char colormap[]; + + std::string window_name_; + ros::Subscriber sub_; + cv::Mat_ disparity_color_; + bool initialized; + + virtual void onInit(); + + void imageCb(const stereo_msgs::DisparityImageConstPtr& msg); + +public: + ~DisparityNodelet(); +}; + +DisparityNodelet::~DisparityNodelet() +{ + cv::destroyWindow(window_name_); +} + +void DisparityNodelet::onInit() +{ + initialized = false; + ros::NodeHandle nh = getNodeHandle(); + ros::NodeHandle local_nh = getPrivateNodeHandle(); + const std::vector& argv = getMyArgv(); + + // Internal option, should be used only by image_view nodes + bool shutdown_on_close = std::find(argv.begin(), argv.end(), + "--shutdown-on-close") != argv.end(); + + // Default window name is the resolved topic name + std::string topic = nh.resolveName("image"); + local_nh.param("window_name", window_name_, topic); + + bool autosize; + local_nh.param("autosize", autosize, false); + + //cv::namedWindow(window_name_, autosize ? cv::WND_PROP_AUTOSIZE : 0); +#if CV_MAJOR_VERSION ==2 +#ifdef HAVE_GTK + // Register appropriate handler for when user closes the display window + GtkWidget *widget = GTK_WIDGET( cvGetWindowHandle(window_name_.c_str()) ); + if (shutdown_on_close) + g_signal_connect(widget, "destroy", G_CALLBACK(destroyNode), NULL); + else + g_signal_connect(widget, "destroy", G_CALLBACK(destroyNodelet), &sub_); +#endif + // Start the OpenCV window thread so we don't have to waitKey() somewhere + startWindowThread(); +#endif + + sub_ = nh.subscribe(topic, 1, &DisparityNodelet::imageCb, this); +} + +void DisparityNodelet::imageCb(const stereo_msgs::DisparityImageConstPtr& msg) +{ + // Check for common errors in input + if (msg->min_disparity == 0.0 && msg->max_disparity == 0.0) + { + NODELET_ERROR_THROTTLE(30, "Disparity image fields min_disparity and " + "max_disparity are not set"); + return; + } + if (msg->image.encoding != sensor_msgs::image_encodings::TYPE_32FC1) + { + NODELET_ERROR_THROTTLE(30, "Disparity image must be 32-bit floating point " + "(encoding '32FC1'), but has encoding '%s'", + msg->image.encoding.c_str()); + return; + } + + if(!initialized) { + cv::namedWindow(window_name_, false ? cv::WND_PROP_AUTOSIZE : 0); + initialized = true; + } + // Colormap and display the disparity image + float min_disparity = msg->min_disparity; + float max_disparity = msg->max_disparity; + float multiplier = 255.0f / (max_disparity - min_disparity); + + const cv::Mat_ dmat(msg->image.height, msg->image.width, + (float*)&msg->image.data[0], msg->image.step); + disparity_color_.create(msg->image.height, msg->image.width); + + for (int row = 0; row < disparity_color_.rows; ++row) { + const float* d = dmat[row]; + cv::Vec3b *disparity_color = disparity_color_[row], + *disparity_color_end = disparity_color + disparity_color_.cols; + for (; disparity_color < disparity_color_end; ++disparity_color, ++d) { + int index = (*d - min_disparity) * multiplier + 0.5; + index = std::min(255, std::max(0, index)); + // Fill as BGR + (*disparity_color)[2] = colormap[3*index + 0]; + (*disparity_color)[1] = colormap[3*index + 1]; + (*disparity_color)[0] = colormap[3*index + 2]; + } + } + + /// @todo For Electric, consider option to draw outline of valid window +#if 0 + sensor_msgs::RegionOfInterest valid = msg->valid_window; + cv::Point tl(valid.x_offset, valid.y_offset), br(valid.x_offset + valid.width, valid.y_offset + valid.height); + cv::rectangle(disparity_color_, tl, br, CV_RGB(255,0,0), 1); +#endif + + cv::imshow(window_name_, disparity_color_); + cv::waitKey(10); +} + +unsigned char DisparityNodelet::colormap[768] = + { 150, 150, 150, + 107, 0, 12, + 106, 0, 18, + 105, 0, 24, + 103, 0, 30, + 102, 0, 36, + 101, 0, 42, + 99, 0, 48, + 98, 0, 54, + 97, 0, 60, + 96, 0, 66, + 94, 0, 72, + 93, 0, 78, + 92, 0, 84, + 91, 0, 90, + 89, 0, 96, + 88, 0, 102, + 87, 0, 108, + 85, 0, 114, + 84, 0, 120, + 83, 0, 126, + 82, 0, 131, + 80, 0, 137, + 79, 0, 143, + 78, 0, 149, + 77, 0, 155, + 75, 0, 161, + 74, 0, 167, + 73, 0, 173, + 71, 0, 179, + 70, 0, 185, + 69, 0, 191, + 68, 0, 197, + 66, 0, 203, + 65, 0, 209, + 64, 0, 215, + 62, 0, 221, + 61, 0, 227, + 60, 0, 233, + 59, 0, 239, + 57, 0, 245, + 56, 0, 251, + 55, 0, 255, + 54, 0, 255, + 52, 0, 255, + 51, 0, 255, + 50, 0, 255, + 48, 0, 255, + 47, 0, 255, + 46, 0, 255, + 45, 0, 255, + 43, 0, 255, + 42, 0, 255, + 41, 0, 255, + 40, 0, 255, + 38, 0, 255, + 37, 0, 255, + 36, 0, 255, + 34, 0, 255, + 33, 0, 255, + 32, 0, 255, + 31, 0, 255, + 29, 0, 255, + 28, 0, 255, + 27, 0, 255, + 26, 0, 255, + 24, 0, 255, + 23, 0, 255, + 22, 0, 255, + 20, 0, 255, + 19, 0, 255, + 18, 0, 255, + 17, 0, 255, + 15, 0, 255, + 14, 0, 255, + 13, 0, 255, + 11, 0, 255, + 10, 0, 255, + 9, 0, 255, + 8, 0, 255, + 6, 0, 255, + 5, 0, 255, + 4, 0, 255, + 3, 0, 255, + 1, 0, 255, + 0, 4, 255, + 0, 10, 255, + 0, 16, 255, + 0, 22, 255, + 0, 28, 255, + 0, 34, 255, + 0, 40, 255, + 0, 46, 255, + 0, 52, 255, + 0, 58, 255, + 0, 64, 255, + 0, 70, 255, + 0, 76, 255, + 0, 82, 255, + 0, 88, 255, + 0, 94, 255, + 0, 100, 255, + 0, 106, 255, + 0, 112, 255, + 0, 118, 255, + 0, 124, 255, + 0, 129, 255, + 0, 135, 255, + 0, 141, 255, + 0, 147, 255, + 0, 153, 255, + 0, 159, 255, + 0, 165, 255, + 0, 171, 255, + 0, 177, 255, + 0, 183, 255, + 0, 189, 255, + 0, 195, 255, + 0, 201, 255, + 0, 207, 255, + 0, 213, 255, + 0, 219, 255, + 0, 225, 255, + 0, 231, 255, + 0, 237, 255, + 0, 243, 255, + 0, 249, 255, + 0, 255, 255, + 0, 255, 249, + 0, 255, 243, + 0, 255, 237, + 0, 255, 231, + 0, 255, 225, + 0, 255, 219, + 0, 255, 213, + 0, 255, 207, + 0, 255, 201, + 0, 255, 195, + 0, 255, 189, + 0, 255, 183, + 0, 255, 177, + 0, 255, 171, + 0, 255, 165, + 0, 255, 159, + 0, 255, 153, + 0, 255, 147, + 0, 255, 141, + 0, 255, 135, + 0, 255, 129, + 0, 255, 124, + 0, 255, 118, + 0, 255, 112, + 0, 255, 106, + 0, 255, 100, + 0, 255, 94, + 0, 255, 88, + 0, 255, 82, + 0, 255, 76, + 0, 255, 70, + 0, 255, 64, + 0, 255, 58, + 0, 255, 52, + 0, 255, 46, + 0, 255, 40, + 0, 255, 34, + 0, 255, 28, + 0, 255, 22, + 0, 255, 16, + 0, 255, 10, + 0, 255, 4, + 2, 255, 0, + 8, 255, 0, + 14, 255, 0, + 20, 255, 0, + 26, 255, 0, + 32, 255, 0, + 38, 255, 0, + 44, 255, 0, + 50, 255, 0, + 56, 255, 0, + 62, 255, 0, + 68, 255, 0, + 74, 255, 0, + 80, 255, 0, + 86, 255, 0, + 92, 255, 0, + 98, 255, 0, + 104, 255, 0, + 110, 255, 0, + 116, 255, 0, + 122, 255, 0, + 128, 255, 0, + 133, 255, 0, + 139, 255, 0, + 145, 255, 0, + 151, 255, 0, + 157, 255, 0, + 163, 255, 0, + 169, 255, 0, + 175, 255, 0, + 181, 255, 0, + 187, 255, 0, + 193, 255, 0, + 199, 255, 0, + 205, 255, 0, + 211, 255, 0, + 217, 255, 0, + 223, 255, 0, + 229, 255, 0, + 235, 255, 0, + 241, 255, 0, + 247, 255, 0, + 253, 255, 0, + 255, 251, 0, + 255, 245, 0, + 255, 239, 0, + 255, 233, 0, + 255, 227, 0, + 255, 221, 0, + 255, 215, 0, + 255, 209, 0, + 255, 203, 0, + 255, 197, 0, + 255, 191, 0, + 255, 185, 0, + 255, 179, 0, + 255, 173, 0, + 255, 167, 0, + 255, 161, 0, + 255, 155, 0, + 255, 149, 0, + 255, 143, 0, + 255, 137, 0, + 255, 131, 0, + 255, 126, 0, + 255, 120, 0, + 255, 114, 0, + 255, 108, 0, + 255, 102, 0, + 255, 96, 0, + 255, 90, 0, + 255, 84, 0, + 255, 78, 0, + 255, 72, 0, + 255, 66, 0, + 255, 60, 0, + 255, 54, 0, + 255, 48, 0, + 255, 42, 0, + 255, 36, 0, + 255, 30, 0, + 255, 24, 0, + 255, 18, 0, + 255, 12, 0, + 255, 6, 0, + 255, 0, 0, + }; + +} // namespace image_view + +// Register the nodelet +#include +PLUGINLIB_EXPORT_CLASS( image_view::DisparityNodelet, nodelet::Nodelet) diff --git a/future pkg/image_pipeline/image_view/src/nodelets/image_nodelet.cpp b/future pkg/image_pipeline/image_view/src/nodelets/image_nodelet.cpp new file mode 100644 index 0000000..3456243 --- /dev/null +++ b/future pkg/image_pipeline/image_view/src/nodelets/image_nodelet.cpp @@ -0,0 +1,228 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include +#include +#include + +#include +#include +#include "window_thread.h" + +#include +#include + +#ifdef HAVE_GTK +#include + +// Platform-specific workaround for #3026: image_view doesn't close when +// closing image window. On platforms using GTK+ we connect this to the +// window's "destroy" event so that image_view exits. +static void destroyNode(GtkWidget *widget, gpointer data) +{ + /// @todo On ros::shutdown(), the node hangs. Why? + //ros::shutdown(); + exit(0); // brute force solution +} + +static void destroyNodelet(GtkWidget *widget, gpointer data) +{ + // We can't actually unload the nodelet from here, but we can at least + // unsubscribe from the image topic. + reinterpret_cast(data)->shutdown(); +} +#endif + + +namespace image_view { + +class ImageNodelet : public nodelet::Nodelet +{ + image_transport::Subscriber sub_; + + boost::mutex image_mutex_; + cv::Mat last_image_; + + std::string window_name_; + bool autosize_; + boost::format filename_format_; + int count_; + bool initialized_; + + virtual void onInit(); + + void imageCb(const sensor_msgs::ImageConstPtr& msg); + + static void mouseCb(int event, int x, int y, int flags, void* param); + +public: + ImageNodelet(); + + ~ImageNodelet(); +}; + +ImageNodelet::ImageNodelet() + : filename_format_(""), count_(0), initialized_(false) +{ +} + +ImageNodelet::~ImageNodelet() +{ + cv::destroyWindow(window_name_); +} + +void ImageNodelet::onInit() +{ + ros::NodeHandle nh = getNodeHandle(); + ros::NodeHandle local_nh = getPrivateNodeHandle(); + + // Command line argument parsing + const std::vector& argv = getMyArgv(); + // First positional argument is the transport type + std::string transport; + local_nh.param("image_transport", transport, std::string("raw")); + for (int i = 0; i < (int)argv.size(); ++i) + { + if (argv[i][0] != '-') + { + transport = argv[i]; + break; + } + } + NODELET_INFO_STREAM("Using transport \"" << transport << "\""); + // Internal option, should be used only by the image_view node + bool shutdown_on_close = std::find(argv.begin(), argv.end(), + "--shutdown-on-close") != argv.end(); + + // Default window name is the resolved topic name + std::string topic = nh.resolveName("image"); + local_nh.param("window_name", window_name_, topic); + + local_nh.param("autosize", autosize_, false); + + std::string format_string; + local_nh.param("filename_format", format_string, std::string("frame%04i.jpg")); + filename_format_.parse(format_string); + +#ifdef HAVE_GTK + // Register appropriate handler for when user closes the display window + GtkWidget *widget = GTK_WIDGET( cvGetWindowHandle(window_name_.c_str()) ); + if (shutdown_on_close) + g_signal_connect(widget, "destroy", G_CALLBACK(destroyNode), NULL); + else + g_signal_connect(widget, "destroy", G_CALLBACK(destroyNodelet), &sub_); +#endif + + // Start the OpenCV window thread so we don't have to waitKey() somewhere + startWindowThread(); + + image_transport::ImageTransport it(nh); + image_transport::TransportHints hints(transport, ros::TransportHints(), getPrivateNodeHandle()); + sub_ = it.subscribe(topic, 1, &ImageNodelet::imageCb, this, hints); +} + +void ImageNodelet::imageCb(const sensor_msgs::ImageConstPtr& msg) +{ + if ( ! initialized_ ) { + cv::namedWindow(window_name_, autosize_ ? cv::WND_PROP_AUTOSIZE : 0); + cv::setMouseCallback(window_name_, &ImageNodelet::mouseCb, this); + initialized_ = true; + } + + image_mutex_.lock(); + + // We want to scale floating point images so that they display nicely + bool do_dynamic_scaling = (msg->encoding.find("F") != std::string::npos); + + // Convert to OpenCV native BGR color + try { + cv_bridge::CvtColorForDisplayOptions options; + options.do_dynamic_scaling = do_dynamic_scaling; + last_image_ = cvtColorForDisplay(cv_bridge::toCvShare(msg), "", options)->image; + } + catch (cv_bridge::Exception& e) { + NODELET_ERROR_THROTTLE(30, "Unable to convert '%s' image for display: '%s'", + msg->encoding.c_str(), e.what()); + } + + // Must release the mutex before calling cv::imshow, or can deadlock against + // OpenCV's window mutex. + image_mutex_.unlock(); + if (!last_image_.empty()) { + cv::imshow(window_name_, last_image_); + cv::waitKey(1); + } +} + +void ImageNodelet::mouseCb(int event, int x, int y, int flags, void* param) +{ + ImageNodelet *this_ = reinterpret_cast(param); + // Trick to use NODELET_* logging macros in static function + boost::function getName = + boost::bind(&ImageNodelet::getName, this_); + + if (event == cv::EVENT_LBUTTONDOWN) + { + NODELET_WARN_ONCE("Left-clicking no longer saves images. Right-click instead."); + return; + } + if (event != cv::EVENT_RBUTTONDOWN) + return; + + boost::lock_guard guard(this_->image_mutex_); + + const cv::Mat &image = this_->last_image_; + if (image.empty()) + { + NODELET_WARN("Couldn't save image, no data!"); + return; + } + + std::string filename = (this_->filename_format_ % this_->count_).str(); + if (cv::imwrite(filename, image)) + { + NODELET_INFO("Saved image %s", filename.c_str()); + this_->count_++; + } + else + { + /// @todo Show full path, ask if user has permission to write there + NODELET_ERROR("Failed to save image."); + } +} + +} // namespace image_view + +// Register the nodelet +#include +PLUGINLIB_EXPORT_CLASS( image_view::ImageNodelet, nodelet::Nodelet) diff --git a/future pkg/image_pipeline/image_view/src/nodelets/window_thread.cpp b/future pkg/image_pipeline/image_view/src/nodelets/window_thread.cpp new file mode 100644 index 0000000..8e1c8eb --- /dev/null +++ b/future pkg/image_pipeline/image_view/src/nodelets/window_thread.cpp @@ -0,0 +1,52 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include "window_thread.h" +#include +#include + +namespace { +void startWindowThreadLocal() { + cv::startWindowThread(); +} +} + +namespace image_view { + +void startWindowThread() +{ + static boost::once_flag cv_thread_flag = BOOST_ONCE_INIT; + boost::call_once(&startWindowThreadLocal, cv_thread_flag); +} + +} // namespace image_view diff --git a/future pkg/image_pipeline/image_view/src/nodelets/window_thread.h b/future pkg/image_pipeline/image_view/src/nodelets/window_thread.h new file mode 100644 index 0000000..9418e79 --- /dev/null +++ b/future pkg/image_pipeline/image_view/src/nodelets/window_thread.h @@ -0,0 +1,44 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#ifndef IMAGE_VIEW_WINDOW_THREAD_H +#define IMAGE_VIEW_WINDOW_THREAD_H + +namespace image_view { + +// Makes absolutely sure we only start the OpenCV window thread once +void startWindowThread(); + +} // namespace image_view + +#endif diff --git a/future pkg/image_pipeline/image_view/src/nodes/disparity_view.cpp b/future pkg/image_pipeline/image_view/src/nodes/disparity_view.cpp new file mode 100644 index 0000000..d013b22 --- /dev/null +++ b/future pkg/image_pipeline/image_view/src/nodes/disparity_view.cpp @@ -0,0 +1,54 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include +#include + +int main(int argc, char **argv) +{ + ros::init(argc, argv, "disparity_view", ros::init_options::AnonymousName); + if (ros::names::remap("image") == "image") { + ROS_WARN("Topic 'image' has not been remapped! Typical command-line usage:\n" + "\t$ rosrun image_view disparity_view image:="); + } + + nodelet::Loader manager(false); + nodelet::M_string remappings; + nodelet::V_string my_argv(argv + 1, argv + argc); + my_argv.push_back("--shutdown-on-close"); // Internal + + manager.load(ros::this_node::getName(), "image_view/disparity", remappings, my_argv); + + ros::spin(); + return 0; +} diff --git a/future pkg/image_pipeline/image_view/src/nodes/extract_images.cpp b/future pkg/image_pipeline/image_view/src/nodes/extract_images.cpp new file mode 100644 index 0000000..9ebdd48 --- /dev/null +++ b/future pkg/image_pipeline/image_view/src/nodes/extract_images.cpp @@ -0,0 +1,154 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ + +#include + +#include +#include +#include +#include + +#include +#include + +class ExtractImages +{ +private: + image_transport::Subscriber sub_; + + sensor_msgs::ImageConstPtr last_msg_; + boost::mutex image_mutex_; + + std::string window_name_; + boost::format filename_format_; + int count_; + double _time; + double sec_per_frame_; + +#if defined(_VIDEO) + CvVideoWriter* video_writer; +#endif //_VIDEO + +public: + ExtractImages(const ros::NodeHandle& nh, const std::string& transport) + : filename_format_(""), count_(0), _time(ros::Time::now().toSec()) + { + std::string topic = nh.resolveName("image"); + ros::NodeHandle local_nh("~"); + + std::string format_string; + local_nh.param("filename_format", format_string, std::string("frame%04i.jpg")); + filename_format_.parse(format_string); + + local_nh.param("sec_per_frame", sec_per_frame_, 0.1); + + image_transport::ImageTransport it(nh); + sub_ = it.subscribe(topic, 1, &ExtractImages::image_cb, this, transport); + +#if defined(_VIDEO) + video_writer = 0; +#endif + + ROS_INFO("Initialized sec per frame to %f", sec_per_frame_); + } + + ~ExtractImages() + { + } + + void image_cb(const sensor_msgs::ImageConstPtr& msg) + { + boost::lock_guard guard(image_mutex_); + + // Hang on to message pointer for sake of mouse_cb + last_msg_ = msg; + + // May want to view raw bayer data + // NB: This is hacky, but should be OK since we have only one image CB. + if (msg->encoding.find("bayer") != std::string::npos) + boost::const_pointer_cast(msg)->encoding = "mono8"; + + cv::Mat image; + try + { + image = cv_bridge::toCvShare(msg, "bgr8")->image; + } catch(cv_bridge::Exception) + { + ROS_ERROR("Unable to convert %s image to bgr8", msg->encoding.c_str()); + } + + double delay = ros::Time::now().toSec()-_time; + if(delay >= sec_per_frame_) + { + _time = ros::Time::now().toSec(); + + if (!image.empty()) { + std::string filename = (filename_format_ % count_).str(); + +#if !defined(_VIDEO) + cv::imwrite(filename, image); +#else + if(!video_writer) + { + video_writer = cvCreateVideoWriter("video.avi", CV_FOURCC('M','J','P','G'), + int(1.0/sec_per_frame_), cvSize(image->width, image->height)); + } + + cvWriteFrame(video_writer, image); +#endif // _VIDEO + + ROS_INFO("Saved image %s", filename.c_str()); + count_++; + } else { + ROS_WARN("Couldn't save image, no data!"); + } + } + } +}; + +int main(int argc, char **argv) +{ + ros::init(argc, argv, "extract_images", ros::init_options::AnonymousName); + ros::NodeHandle n; + if (n.resolveName("image") == "/image") { + ROS_WARN("extract_images: image has not been remapped! Typical command-line usage:\n" + "\t$ ./extract_images image:= [transport]"); + } + + ExtractImages view(n, (argc > 1) ? argv[1] : "raw"); + + ros::spin(); + + return 0; +} diff --git a/future pkg/image_pipeline/image_view/src/nodes/image_saver.cpp b/future pkg/image_pipeline/image_view/src/nodes/image_saver.cpp new file mode 100644 index 0000000..d3509c3 --- /dev/null +++ b/future pkg/image_pipeline/image_view/src/nodes/image_saver.cpp @@ -0,0 +1,226 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include + +#include +#include +#include +#include +#include + +#include +#include + +boost::format g_format; +bool save_all_image, save_image_service; +std::string encoding; +bool request_start_end; + + +bool service(std_srvs::Empty::Request &req, std_srvs::Empty::Response &res) { + save_image_service = true; + return true; +} + +/** Class to deal with which callback to call whether we have CameraInfo or not + */ +class Callbacks { +public: + Callbacks() : is_first_image_(true), has_camera_info_(false), count_(0) { + } + + bool callbackStartSave(std_srvs::Trigger::Request &req, + std_srvs::Trigger::Response &res) + { + ROS_INFO("Received start saving request"); + start_time_ = ros::Time::now(); + end_time_ = ros::Time(0); + + res.success = true; + return true; + } + + bool callbackEndSave(std_srvs::Trigger::Request &req, + std_srvs::Trigger::Response &res) + { + ROS_INFO("Received end saving request"); + end_time_ = ros::Time::now(); + + res.success = true; + return true; + } + + void callbackWithoutCameraInfo(const sensor_msgs::ImageConstPtr& image_msg) + { + if (is_first_image_) { + is_first_image_ = false; + + // Wait a tiny bit to see whether callbackWithCameraInfo is called + ros::Duration(0.001).sleep(); + } + + if (has_camera_info_) + return; + + // saving flag priority: + // 1. request by service. + // 2. request by topic about start and end. + // 3. flag 'save_all_image'. + if (!save_image_service && request_start_end) { + if (start_time_ == ros::Time(0)) + return; + else if (start_time_ > image_msg->header.stamp) + return; // wait for message which comes after start_time + else if ((end_time_ != ros::Time(0)) && (end_time_ < image_msg->header.stamp)) + return; // skip message which comes after end_time + } + + // save the image + std::string filename; + if (!saveImage(image_msg, filename)) + return; + + count_++; + } + + void callbackWithCameraInfo(const sensor_msgs::ImageConstPtr& image_msg, const sensor_msgs::CameraInfoConstPtr& info) + { + has_camera_info_ = true; + + if (!save_image_service && request_start_end) { + if (start_time_ == ros::Time(0)) + return; + else if (start_time_ > image_msg->header.stamp) + return; // wait for message which comes after start_time + else if ((end_time_ != ros::Time(0)) && (end_time_ < image_msg->header.stamp)) + return; // skip message which comes after end_time + } + + // save the image + std::string filename; + if (!saveImage(image_msg, filename)) + return; + + // save the CameraInfo + if (info) { + filename = filename.replace(filename.rfind("."), filename.length(), ".ini"); + camera_calibration_parsers::writeCalibration(filename, "camera", *info); + } + + count_++; + } +private: + bool saveImage(const sensor_msgs::ImageConstPtr& image_msg, std::string &filename) { + cv::Mat image; + try + { + image = cv_bridge::toCvShare(image_msg, encoding)->image; + } catch(cv_bridge::Exception) + { + ROS_ERROR("Unable to convert %s image to %s", image_msg->encoding.c_str(), encoding.c_str()); + return false; + } + + if (!image.empty()) { + try { + filename = (g_format).str(); + } catch (...) { g_format.clear(); } + try { + filename = (g_format % count_).str(); + } catch (...) { g_format.clear(); } + try { + filename = (g_format % count_ % "jpg").str(); + } catch (...) { g_format.clear(); } + + if ( save_all_image || save_image_service ) { + cv::imwrite(filename, image); + ROS_INFO("Saved image %s", filename.c_str()); + + save_image_service = false; + } else { + return false; + } + } else { + ROS_WARN("Couldn't save image, no data!"); + return false; + } + return true; + } + +private: + bool is_first_image_; + bool has_camera_info_; + size_t count_; + ros::Time start_time_; + ros::Time end_time_; +}; + +int main(int argc, char** argv) +{ + ros::init(argc, argv, "image_saver", ros::init_options::AnonymousName); + ros::NodeHandle nh; + image_transport::ImageTransport it(nh); + std::string topic = nh.resolveName("image"); + + Callbacks callbacks; + // Useful when CameraInfo is being published + image_transport::CameraSubscriber sub_image_and_camera = it.subscribeCamera(topic, 1, + &Callbacks::callbackWithCameraInfo, + &callbacks); + // Useful when CameraInfo is not being published + image_transport::Subscriber sub_image = it.subscribe( + topic, 1, boost::bind(&Callbacks::callbackWithoutCameraInfo, &callbacks, _1)); + + ros::NodeHandle local_nh("~"); + std::string format_string; + local_nh.param("filename_format", format_string, std::string("left%04i.%s")); + local_nh.param("encoding", encoding, std::string("bgr8")); + local_nh.param("save_all_image", save_all_image, true); + local_nh.param("request_start_end", request_start_end, false); + g_format.parse(format_string); + ros::ServiceServer save = local_nh.advertiseService ("save", service); + + if (request_start_end && !save_all_image) + ROS_WARN("'request_start_end' is true, so overwriting 'save_all_image' as true"); + + // FIXME(unkown): This does not make services appear + // if (request_start_end) { + ros::ServiceServer srv_start = local_nh.advertiseService( + "start", &Callbacks::callbackStartSave, &callbacks); + ros::ServiceServer srv_end = local_nh.advertiseService( + "end", &Callbacks::callbackEndSave, &callbacks); + // } + + ros::spin(); +} diff --git a/future pkg/image_pipeline/image_view/src/nodes/image_view.cpp b/future pkg/image_pipeline/image_view/src/nodes/image_view.cpp new file mode 100644 index 0000000..fd727ac --- /dev/null +++ b/future pkg/image_pipeline/image_view/src/nodes/image_view.cpp @@ -0,0 +1,212 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include + +#include +#include +#include + +#include +#include + +#include +#include +#include + +int g_count; +cv::Mat g_last_image; +boost::format g_filename_format; +boost::mutex g_image_mutex; +std::string g_window_name; +bool g_gui; +ros::Publisher g_pub; +bool g_do_dynamic_scaling; +int g_colormap; +double g_min_image_value; +double g_max_image_value; + +void reconfigureCb(image_view::ImageViewConfig &config, uint32_t level) +{ + boost::mutex::scoped_lock lock(g_image_mutex); + g_do_dynamic_scaling = config.do_dynamic_scaling; + g_colormap = config.colormap; + g_min_image_value = config.min_image_value; + g_max_image_value = config.max_image_value; +} + +void imageCb(const sensor_msgs::ImageConstPtr& msg) +{ + boost::mutex::scoped_lock lock(g_image_mutex); + + // Convert to OpenCV native BGR color + cv_bridge::CvImageConstPtr cv_ptr; + try { + cv_bridge::CvtColorForDisplayOptions options; + options.do_dynamic_scaling = g_do_dynamic_scaling; + options.colormap = g_colormap; + // Set min/max value for scaling to visualize depth/float image. + if (g_min_image_value == g_max_image_value) { + // Not specified by rosparam, then set default value. + // Because of current sensor limitation, we use 10m as default of max range of depth + // with consistency to the configuration in rqt_image_view. + options.min_image_value = 0; + if (msg->encoding == "32FC1") { + options.max_image_value = 10; // 10 [m] + } else if (msg->encoding == "16UC1") { + options.max_image_value = 10 * 1000; // 10 * 1000 [mm] + } + } else { + options.min_image_value = g_min_image_value; + options.max_image_value = g_max_image_value; + } + cv_ptr = cv_bridge::cvtColorForDisplay(cv_bridge::toCvShare(msg), "", options); + g_last_image = cv_ptr->image; + } catch (cv_bridge::Exception& e) { + ROS_ERROR_THROTTLE(30, "Unable to convert '%s' image for display: '%s'", + msg->encoding.c_str(), e.what()); + } + if (g_gui && !g_last_image.empty()) { + const cv::Mat &image = g_last_image; + cv::imshow(g_window_name, image); + cv::waitKey(3); + } + if (g_pub.getNumSubscribers() > 0) { + g_pub.publish(cv_ptr); + } +} + +static void mouseCb(int event, int x, int y, int flags, void* param) +{ + if (event == cv::EVENT_LBUTTONDOWN) { + ROS_WARN_ONCE("Left-clicking no longer saves images. Right-click instead."); + return; + } else if (event != cv::EVENT_RBUTTONDOWN) { + return; + } + + boost::mutex::scoped_lock lock(g_image_mutex); + + const cv::Mat &image = g_last_image; + + if (image.empty()) { + ROS_WARN("Couldn't save image, no data!"); + return; + } + + std::string filename = (g_filename_format % g_count).str(); + if (cv::imwrite(filename, image)) { + ROS_INFO("Saved image %s", filename.c_str()); + g_count++; + } else { + boost::filesystem::path full_path = boost::filesystem::complete(filename); + ROS_ERROR_STREAM("Failed to save image. Have permission to write there?: " << full_path); + } +} + + +int main(int argc, char **argv) +{ + ros::init(argc, argv, "image_view", ros::init_options::AnonymousName); + if (ros::names::remap("image") == "image") { + ROS_WARN("Topic 'image' has not been remapped! Typical command-line usage:\n" + "\t$ rosrun image_view image_view image:= [transport]"); + } + + ros::NodeHandle nh; + ros::NodeHandle local_nh("~"); + + // Default window name is the resolved topic name + std::string topic = nh.resolveName("image"); + local_nh.param("window_name", g_window_name, topic); + local_nh.param("gui", g_gui, true); // gui/no_gui mode + + if (g_gui) { + std::string format_string; + local_nh.param("filename_format", format_string, std::string("frame%04i.jpg")); + g_filename_format.parse(format_string); + + // Handle window size + bool autosize; + local_nh.param("autosize", autosize, false); + cv::namedWindow(g_window_name, autosize ? (cv::WINDOW_AUTOSIZE | cv::WINDOW_KEEPRATIO | cv::WINDOW_GUI_EXPANDED) : 0); + cv::setMouseCallback(g_window_name, &mouseCb); + + if(autosize == false) + { + if(local_nh.hasParam("width") && local_nh.hasParam("height")) + { + int width; + local_nh.getParam("width", width); + int height; + local_nh.getParam("height", height); + cv::resizeWindow(g_window_name, width, height); + } + } + + // Start the OpenCV window thread so we don't have to waitKey() somewhere + cv::startWindowThread(); + } + + // Handle transport + // priority: + // 1. command line argument + // 2. rosparam '~image_transport' + std::string transport; + local_nh.param("image_transport", transport, std::string("raw")); + ros::V_string myargv; + ros::removeROSArgs(argc, argv, myargv); + for (size_t i = 1; i < myargv.size(); ++i) { + if (myargv[i][0] != '-') { + transport = myargv[i]; + break; + } + } + ROS_INFO_STREAM("Using transport \"" << transport << "\""); + image_transport::ImageTransport it(nh); + image_transport::TransportHints hints(transport, ros::TransportHints(), local_nh); + image_transport::Subscriber sub = it.subscribe(topic, 1, imageCb, hints); + g_pub = local_nh.advertise("output", 1); + + dynamic_reconfigure::Server srv; + dynamic_reconfigure::Server::CallbackType f = + boost::bind(&reconfigureCb, _1, _2); + srv.setCallback(f); + + ros::spin(); + + if (g_gui) { + cv::destroyWindow(g_window_name); + } + return 0; +} diff --git a/future pkg/image_pipeline/image_view/src/nodes/stereo_view.cpp b/future pkg/image_pipeline/image_view/src/nodes/stereo_view.cpp new file mode 100644 index 0000000..64a01cc --- /dev/null +++ b/future pkg/image_pipeline/image_view/src/nodes/stereo_view.cpp @@ -0,0 +1,573 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#ifdef HAVE_GTK +#include + +// Platform-specific workaround for #3026: image_view doesn't close when +// closing image window. On platforms using GTK+ we connect this to the +// window's "destroy" event so that image_view exits. +static void destroy(GtkWidget *widget, gpointer data) +{ + ros::shutdown(); +} +#endif + +namespace enc = sensor_msgs::image_encodings; + +// colormap for disparities, RGB +static unsigned char colormap[768] = + { 150, 150, 150, + 107, 0, 12, + 106, 0, 18, + 105, 0, 24, + 103, 0, 30, + 102, 0, 36, + 101, 0, 42, + 99, 0, 48, + 98, 0, 54, + 97, 0, 60, + 96, 0, 66, + 94, 0, 72, + 93, 0, 78, + 92, 0, 84, + 91, 0, 90, + 89, 0, 96, + 88, 0, 102, + 87, 0, 108, + 85, 0, 114, + 84, 0, 120, + 83, 0, 126, + 82, 0, 131, + 80, 0, 137, + 79, 0, 143, + 78, 0, 149, + 77, 0, 155, + 75, 0, 161, + 74, 0, 167, + 73, 0, 173, + 71, 0, 179, + 70, 0, 185, + 69, 0, 191, + 68, 0, 197, + 66, 0, 203, + 65, 0, 209, + 64, 0, 215, + 62, 0, 221, + 61, 0, 227, + 60, 0, 233, + 59, 0, 239, + 57, 0, 245, + 56, 0, 251, + 55, 0, 255, + 54, 0, 255, + 52, 0, 255, + 51, 0, 255, + 50, 0, 255, + 48, 0, 255, + 47, 0, 255, + 46, 0, 255, + 45, 0, 255, + 43, 0, 255, + 42, 0, 255, + 41, 0, 255, + 40, 0, 255, + 38, 0, 255, + 37, 0, 255, + 36, 0, 255, + 34, 0, 255, + 33, 0, 255, + 32, 0, 255, + 31, 0, 255, + 29, 0, 255, + 28, 0, 255, + 27, 0, 255, + 26, 0, 255, + 24, 0, 255, + 23, 0, 255, + 22, 0, 255, + 20, 0, 255, + 19, 0, 255, + 18, 0, 255, + 17, 0, 255, + 15, 0, 255, + 14, 0, 255, + 13, 0, 255, + 11, 0, 255, + 10, 0, 255, + 9, 0, 255, + 8, 0, 255, + 6, 0, 255, + 5, 0, 255, + 4, 0, 255, + 3, 0, 255, + 1, 0, 255, + 0, 4, 255, + 0, 10, 255, + 0, 16, 255, + 0, 22, 255, + 0, 28, 255, + 0, 34, 255, + 0, 40, 255, + 0, 46, 255, + 0, 52, 255, + 0, 58, 255, + 0, 64, 255, + 0, 70, 255, + 0, 76, 255, + 0, 82, 255, + 0, 88, 255, + 0, 94, 255, + 0, 100, 255, + 0, 106, 255, + 0, 112, 255, + 0, 118, 255, + 0, 124, 255, + 0, 129, 255, + 0, 135, 255, + 0, 141, 255, + 0, 147, 255, + 0, 153, 255, + 0, 159, 255, + 0, 165, 255, + 0, 171, 255, + 0, 177, 255, + 0, 183, 255, + 0, 189, 255, + 0, 195, 255, + 0, 201, 255, + 0, 207, 255, + 0, 213, 255, + 0, 219, 255, + 0, 225, 255, + 0, 231, 255, + 0, 237, 255, + 0, 243, 255, + 0, 249, 255, + 0, 255, 255, + 0, 255, 249, + 0, 255, 243, + 0, 255, 237, + 0, 255, 231, + 0, 255, 225, + 0, 255, 219, + 0, 255, 213, + 0, 255, 207, + 0, 255, 201, + 0, 255, 195, + 0, 255, 189, + 0, 255, 183, + 0, 255, 177, + 0, 255, 171, + 0, 255, 165, + 0, 255, 159, + 0, 255, 153, + 0, 255, 147, + 0, 255, 141, + 0, 255, 135, + 0, 255, 129, + 0, 255, 124, + 0, 255, 118, + 0, 255, 112, + 0, 255, 106, + 0, 255, 100, + 0, 255, 94, + 0, 255, 88, + 0, 255, 82, + 0, 255, 76, + 0, 255, 70, + 0, 255, 64, + 0, 255, 58, + 0, 255, 52, + 0, 255, 46, + 0, 255, 40, + 0, 255, 34, + 0, 255, 28, + 0, 255, 22, + 0, 255, 16, + 0, 255, 10, + 0, 255, 4, + 2, 255, 0, + 8, 255, 0, + 14, 255, 0, + 20, 255, 0, + 26, 255, 0, + 32, 255, 0, + 38, 255, 0, + 44, 255, 0, + 50, 255, 0, + 56, 255, 0, + 62, 255, 0, + 68, 255, 0, + 74, 255, 0, + 80, 255, 0, + 86, 255, 0, + 92, 255, 0, + 98, 255, 0, + 104, 255, 0, + 110, 255, 0, + 116, 255, 0, + 122, 255, 0, + 128, 255, 0, + 133, 255, 0, + 139, 255, 0, + 145, 255, 0, + 151, 255, 0, + 157, 255, 0, + 163, 255, 0, + 169, 255, 0, + 175, 255, 0, + 181, 255, 0, + 187, 255, 0, + 193, 255, 0, + 199, 255, 0, + 205, 255, 0, + 211, 255, 0, + 217, 255, 0, + 223, 255, 0, + 229, 255, 0, + 235, 255, 0, + 241, 255, 0, + 247, 255, 0, + 253, 255, 0, + 255, 251, 0, + 255, 245, 0, + 255, 239, 0, + 255, 233, 0, + 255, 227, 0, + 255, 221, 0, + 255, 215, 0, + 255, 209, 0, + 255, 203, 0, + 255, 197, 0, + 255, 191, 0, + 255, 185, 0, + 255, 179, 0, + 255, 173, 0, + 255, 167, 0, + 255, 161, 0, + 255, 155, 0, + 255, 149, 0, + 255, 143, 0, + 255, 137, 0, + 255, 131, 0, + 255, 126, 0, + 255, 120, 0, + 255, 114, 0, + 255, 108, 0, + 255, 102, 0, + 255, 96, 0, + 255, 90, 0, + 255, 84, 0, + 255, 78, 0, + 255, 72, 0, + 255, 66, 0, + 255, 60, 0, + 255, 54, 0, + 255, 48, 0, + 255, 42, 0, + 255, 36, 0, + 255, 30, 0, + 255, 24, 0, + 255, 18, 0, + 255, 12, 0, + 255, 6, 0, + 255, 0, 0, + }; + +inline void increment(int* value) +{ + ++(*value); +} + +using namespace sensor_msgs; +using namespace stereo_msgs; +using namespace message_filters::sync_policies; + +// Note: StereoView is NOT nodelet-based, as it synchronizes the three streams. +class StereoView +{ +private: + image_transport::SubscriberFilter left_sub_, right_sub_; + message_filters::Subscriber disparity_sub_; + typedef ExactTime ExactPolicy; + typedef ApproximateTime ApproximatePolicy; + typedef message_filters::Synchronizer ExactSync; + typedef message_filters::Synchronizer ApproximateSync; + boost::shared_ptr exact_sync_; + boost::shared_ptr approximate_sync_; + int queue_size_; + + ImageConstPtr last_left_msg_, last_right_msg_; + cv::Mat last_left_image_, last_right_image_; + cv::Mat_ disparity_color_; + boost::mutex image_mutex_; + + boost::format filename_format_; + int save_count_; + + ros::WallTimer check_synced_timer_; + int left_received_, right_received_, disp_received_, all_received_; + +public: + StereoView(const std::string& transport) + : filename_format_(""), save_count_(0), + left_received_(0), right_received_(0), disp_received_(0), all_received_(0) + { + // Read local parameters + ros::NodeHandle local_nh("~"); + bool autosize; + local_nh.param("autosize", autosize, true); + + std::string format_string; + local_nh.param("filename_format", format_string, std::string("%s%04i.jpg")); + filename_format_.parse(format_string); + + // Do GUI window setup + int flags = autosize ? cv::WND_PROP_AUTOSIZE : 0; + cv::namedWindow("left", flags); + cv::namedWindow("right", flags); + cv::namedWindow("disparity", flags); + cv::setMouseCallback("left", &StereoView::mouseCb, this); + cv::setMouseCallback("right", &StereoView::mouseCb, this); + cv::setMouseCallback("disparity", &StereoView::mouseCb, this); +#if CV_MAJOR_VERSION == 2 +#ifdef HAVE_GTK + g_signal_connect(GTK_WIDGET( cvGetWindowHandle("left") ), + "destroy", G_CALLBACK(destroy), NULL); + g_signal_connect(GTK_WIDGET( cvGetWindowHandle("right") ), + "destroy", G_CALLBACK(destroy), NULL); + g_signal_connect(GTK_WIDGET( cvGetWindowHandle("disparity") ), + "destroy", G_CALLBACK(destroy), NULL); +#endif + cvStartWindowThread(); +#endif + + // Resolve topic names + ros::NodeHandle nh; + std::string stereo_ns = nh.resolveName("stereo"); + std::string left_topic = ros::names::clean(stereo_ns + "/left/" + nh.resolveName("image")); + std::string right_topic = ros::names::clean(stereo_ns + "/right/" + nh.resolveName("image")); + std::string disparity_topic = ros::names::clean(stereo_ns + "/disparity"); + ROS_INFO("Subscribing to:\n\t* %s\n\t* %s\n\t* %s", left_topic.c_str(), right_topic.c_str(), + disparity_topic.c_str()); + + // Subscribe to three input topics. + image_transport::ImageTransport it(nh); + left_sub_.subscribe(it, left_topic, 1, transport); + right_sub_.subscribe(it, right_topic, 1, transport); + disparity_sub_.subscribe(nh, disparity_topic, 1); + + // Complain every 30s if the topics appear unsynchronized + left_sub_.registerCallback(boost::bind(increment, &left_received_)); + right_sub_.registerCallback(boost::bind(increment, &right_received_)); + disparity_sub_.registerCallback(boost::bind(increment, &disp_received_)); + check_synced_timer_ = nh.createWallTimer(ros::WallDuration(15.0), + boost::bind(&StereoView::checkInputsSynchronized, this)); + + // Synchronize input topics. Optionally do approximate synchronization. + local_nh.param("queue_size", queue_size_, 5); + bool approx; + local_nh.param("approximate_sync", approx, false); + if (approx) + { + approximate_sync_.reset( new ApproximateSync(ApproximatePolicy(queue_size_), + left_sub_, right_sub_, disparity_sub_) ); + approximate_sync_->registerCallback(boost::bind(&StereoView::imageCb, this, _1, _2, _3)); + } + else + { + exact_sync_.reset( new ExactSync(ExactPolicy(queue_size_), + left_sub_, right_sub_, disparity_sub_) ); + exact_sync_->registerCallback(boost::bind(&StereoView::imageCb, this, _1, _2, _3)); + } + } + + ~StereoView() + { + cv::destroyAllWindows(); + } + + void imageCb(const ImageConstPtr& left, const ImageConstPtr& right, + const DisparityImageConstPtr& disparity_msg) + { + ++all_received_; // For error checking + + image_mutex_.lock(); + + // May want to view raw bayer data + if (left->encoding.find("bayer") != std::string::npos) + boost::const_pointer_cast(left)->encoding = "mono8"; + if (right->encoding.find("bayer") != std::string::npos) + boost::const_pointer_cast(right)->encoding = "mono8"; + + // Hang on to image data for sake of mouseCb + last_left_msg_ = left; + last_right_msg_ = right; + try { + last_left_image_ = cv_bridge::toCvShare(left, "bgr8")->image; + last_right_image_ = cv_bridge::toCvShare(right, "bgr8")->image; + } + catch (cv_bridge::Exception& e) { + ROS_ERROR("Unable to convert one of '%s' or '%s' to 'bgr8'", + left->encoding.c_str(), right->encoding.c_str()); + } + + // Colormap and display the disparity image + float min_disparity = disparity_msg->min_disparity; + float max_disparity = disparity_msg->max_disparity; + float multiplier = 255.0f / (max_disparity - min_disparity); + + assert(disparity_msg->image.encoding == enc::TYPE_32FC1); + const cv::Mat_ dmat(disparity_msg->image.height, disparity_msg->image.width, + (float*)&disparity_msg->image.data[0], disparity_msg->image.step); + disparity_color_.create(disparity_msg->image.height, disparity_msg->image.width); + + for (int row = 0; row < disparity_color_.rows; ++row) { + const float* d = dmat[row]; + for (int col = 0; col < disparity_color_.cols; ++col) { + int index = (d[col] - min_disparity) * multiplier + 0.5; + index = std::min(255, std::max(0, index)); + // Fill as BGR + disparity_color_(row, col)[2] = colormap[3*index + 0]; + disparity_color_(row, col)[1] = colormap[3*index + 1]; + disparity_color_(row, col)[0] = colormap[3*index + 2]; + } + } + + // Must release the mutex before calling cv::imshow, or can deadlock against + // OpenCV's window mutex. + image_mutex_.unlock(); + if (!last_left_image_.empty()) { + cv::imshow("left", last_left_image_); + cv::waitKey(1); + } + if (!last_right_image_.empty()) { + cv::imshow("right", last_right_image_); + cv::waitKey(1); + } + cv::imshow("disparity", disparity_color_); + cv::waitKey(1); + } + + void saveImage(const char* prefix, const cv::Mat& image) + { + if (!image.empty()) { + std::string filename = (filename_format_ % prefix % save_count_).str(); + cv::imwrite(filename, image); + ROS_INFO("Saved image %s", filename.c_str()); + } else { + ROS_WARN("Couldn't save %s image, no data!", prefix); + } + } + + static void mouseCb(int event, int x, int y, int flags, void* param) + { + if (event == cv::EVENT_LBUTTONDOWN) + { + ROS_WARN_ONCE("Left-clicking no longer saves images. Right-click instead."); + return; + } + if (event != cv::EVENT_RBUTTONDOWN) + return; + + StereoView *sv = (StereoView*)param; + boost::lock_guard guard(sv->image_mutex_); + + sv->saveImage("left", sv->last_left_image_); + sv->saveImage("right", sv->last_right_image_); + sv->saveImage("disp", sv->disparity_color_); + sv->save_count_++; + } + + void checkInputsSynchronized() + { + int threshold = 3 * all_received_; + if (left_received_ >= threshold || right_received_ >= threshold || disp_received_ >= threshold) { + ROS_WARN("[stereo_view] Low number of synchronized left/right/disparity triplets received.\n" + "Left images received: %d (topic '%s')\n" + "Right images received: %d (topic '%s')\n" + "Disparity images received: %d (topic '%s')\n" + "Synchronized triplets: %d\n" + "Possible issues:\n" + "\t* stereo_image_proc is not running.\n" + "\t Does `rosnode info %s` show any connections?\n" + "\t* The cameras are not synchronized.\n" + "\t Try restarting stereo_view with parameter _approximate_sync:=True\n" + "\t* The network is too slow. One or more images are dropped from each triplet.\n" + "\t Try restarting stereo_view, increasing parameter 'queue_size' (currently %d)", + left_received_, left_sub_.getTopic().c_str(), + right_received_, right_sub_.getTopic().c_str(), + disp_received_, disparity_sub_.getTopic().c_str(), + all_received_, ros::this_node::getName().c_str(), queue_size_); + } + } +}; + +int main(int argc, char **argv) +{ + ros::init(argc, argv, "stereo_view", ros::init_options::AnonymousName); + if (ros::names::remap("stereo") == "stereo") { + ROS_WARN("'stereo' has not been remapped! Example command-line usage:\n" + "\t$ rosrun image_view stereo_view stereo:=narrow_stereo image:=image_color"); + } + if (ros::names::remap("image") == "/image_raw") { + ROS_WARN("There is a delay between when the camera drivers publish the raw images and " + "when stereo_image_proc publishes the computed point cloud. stereo_view " + "may fail to synchronize these topics without a large queue_size."); + } + + std::string transport = argc > 1 ? argv[1] : "raw"; + StereoView view(transport); + + ros::spin(); + return 0; +} diff --git a/future pkg/image_pipeline/image_view/src/nodes/video_recorder.cpp b/future pkg/image_pipeline/image_view/src/nodes/video_recorder.cpp new file mode 100644 index 0000000..d1411c9 --- /dev/null +++ b/future pkg/image_pipeline/image_view/src/nodes/video_recorder.cpp @@ -0,0 +1,141 @@ +/**************************************************************************** +* Software License Agreement (Apache License) +* +* Copyright (C) 2012-2013 Open Source Robotics Foundation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +*****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#if CV_MAJOR_VERSION == 3 +#include +#endif + +cv::VideoWriter outputVideo; + +int g_count = 0; +ros::Time g_last_wrote_time = ros::Time(0); +std::string encoding; +std::string codec; +int fps; +std::string filename; +double min_depth_range; +double max_depth_range; +bool use_dynamic_range; +int colormap; + + +void callback(const sensor_msgs::ImageConstPtr& image_msg) +{ + if (!outputVideo.isOpened()) { + + cv::Size size(image_msg->width, image_msg->height); + + outputVideo.open(filename, +#if CV_MAJOR_VERSION == 3 + cv::VideoWriter::fourcc(codec.c_str()[0], +#else + cv::VideoWriter::fourcc(codec.c_str()[0], +#endif + codec.c_str()[1], + codec.c_str()[2], + codec.c_str()[3]), + fps, + size, + true); + + if (!outputVideo.isOpened()) + { + ROS_ERROR("Could not create the output video! Check filename and/or support for codec."); + exit(-1); + } + + ROS_INFO_STREAM("Starting to record " << codec << " video at " << size << "@" << fps << "fps. Press Ctrl+C to stop recording." ); + + } + + if ((image_msg->header.stamp - g_last_wrote_time) < ros::Duration(1 / fps)) + { + // Skip to get video with correct fps + return; + } + + try + { + cv_bridge::CvtColorForDisplayOptions options; + options.do_dynamic_scaling = use_dynamic_range; + options.min_image_value = min_depth_range; + options.max_image_value = max_depth_range; + options.colormap = colormap; + const cv::Mat image = cv_bridge::cvtColorForDisplay(cv_bridge::toCvShare(image_msg), encoding, options)->image; + if (!image.empty()) { + outputVideo << image; + ROS_INFO_STREAM("Recording frame " << g_count << "\x1b[1F"); + g_count++; + g_last_wrote_time = image_msg->header.stamp; + } else { + ROS_WARN("Frame skipped, no data!"); + } + } catch(cv_bridge::Exception) + { + ROS_ERROR("Unable to convert %s image to %s", image_msg->encoding.c_str(), encoding.c_str()); + return; + } +} + +int main(int argc, char** argv) +{ + ros::init(argc, argv, "video_recorder", ros::init_options::AnonymousName); + ros::NodeHandle nh; + ros::NodeHandle local_nh("~"); + local_nh.param("filename", filename, std::string("output.avi")); + bool stamped_filename; + local_nh.param("stamped_filename", stamped_filename, false); + local_nh.param("fps", fps, 15); + local_nh.param("codec", codec, std::string("MJPG")); + local_nh.param("encoding", encoding, std::string("bgr8")); + // cv_bridge::CvtColorForDisplayOptions + local_nh.param("min_depth_range", min_depth_range, 0.0); + local_nh.param("max_depth_range", max_depth_range, 0.0); + local_nh.param("use_dynamic_depth_range", use_dynamic_range, false); + local_nh.param("colormap", colormap, -1); + + if (stamped_filename) { + std::size_t found = filename.find_last_of("/\\"); + std::string path = filename.substr(0, found + 1); + std::string basename = filename.substr(found + 1); + std::stringstream ss; + ss << ros::Time::now().toNSec() << basename; + filename = path + ss.str(); + ROS_INFO("Video recording to %s", filename.c_str()); + } + + if (codec.size() != 4) { + ROS_ERROR("The video codec must be a FOURCC identifier (4 chars)"); + exit(-1); + } + + image_transport::ImageTransport it(nh); + std::string topic = nh.resolveName("image"); + image_transport::Subscriber sub_image = it.subscribe(topic, 1, callback); + + ROS_INFO_STREAM("Waiting for topic " << topic << "..."); + ros::spin(); + std::cout << "\nVideo saved as " << filename << std::endl; +} diff --git a/future pkg/image_pipeline/stereo_image_proc/.tar b/future pkg/image_pipeline/stereo_image_proc/.tar new file mode 100644 index 0000000..8e0ec5e --- /dev/null +++ b/future pkg/image_pipeline/stereo_image_proc/.tar @@ -0,0 +1,2 @@ +{!!python/unicode 'url': 'https://github.com/ros-gbp/image_pipeline-release/archive/release/kinetic/stereo_image_proc/1.12.23-0.tar.gz', + !!python/unicode 'version': image_pipeline-release-release-kinetic-stereo_image_proc-1.12.23-0} diff --git a/future pkg/image_pipeline/stereo_image_proc/CHANGELOG.rst b/future pkg/image_pipeline/stereo_image_proc/CHANGELOG.rst new file mode 100644 index 0000000..40ab517 --- /dev/null +++ b/future pkg/image_pipeline/stereo_image_proc/CHANGELOG.rst @@ -0,0 +1,102 @@ +1.12.23 (2018-05-10) +-------------------- +* Removed unused mutable scratch buffers (`#315 `_) + The uint32_t buffers conflicted with newer release of OpenCV3, as explained here https://github.com/ros-perception/image_pipeline/issues/310 +* Contributors: Miquel Massot + +1.12.22 (2017-12-08) +-------------------- + +1.12.21 (2017-11-05) +-------------------- +* Updated fix for traits change. (`#303 `_) +* Fix C++11 compilation + This fixes `#292 `_ and `#291 `_ +* Contributors: Mike Purvis, Vincent Rabaud + +1.12.20 (2017-04-30) +-------------------- +* fix doc jobs + This is a proper fix for `#233 `_ +* address gcc6 build error + With gcc6, compiling fails with `stdlib.h: No such file or directory`, + as including '-isystem /usr/include' breaks with gcc6, cf., + https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70129. + This commit addresses this issue for this package in the same way + it was addressed in various other ROS packages. A list of related + commits and pull requests is at: + https://github.com/ros/rosdistro/issues/12783 + Signed-off-by: Lukas Bulwahn +* Contributors: Lukas Bulwahn, Vincent Rabaud + +1.12.19 (2016-07-24) +-------------------- + +1.12.18 (2016-07-12) +-------------------- + +1.12.17 (2016-07-11) +-------------------- + +1.12.16 (2016-03-19) +-------------------- +* clean OpenCV dependency in package.xml +* Contributors: Vincent Rabaud + +1.12.15 (2016-01-17) +-------------------- +* simplify OpenCV3 conversion +* Contributors: Vincent Rabaud + +1.12.14 (2015-07-22) +-------------------- +* add StereoSGBM and it can be chosen from dynamic_reconfigure +* Contributors: Ryohei Ueda + +1.12.13 (2015-04-06) +-------------------- +* get code to compile with OpenCV3 +* modify pointcloud data format of stereo_image_proc using point_cloud2_iterator +* Contributors: Hiroaki Yaguchi, Vincent Rabaud + +1.12.12 (2014-12-31) +-------------------- + +1.12.11 (2014-10-26) +-------------------- + +1.12.10 (2014-09-28) +-------------------- + +1.12.9 (2014-09-21) +------------------- +* get code to compile with OpenCV3 + fixes `#96 `_ +* Contributors: Vincent Rabaud + +1.12.8 (2014-08-19) +------------------- + +1.12.6 (2014-07-27) +------------------- + +1.12.4 (2014-04-28) +------------------- + +1.12.3 (2014-04-12) +------------------- + +1.12.2 (2014-04-08) +------------------- + +1.12.0 (2014-04-04) +------------------- +* remove PointCloud1 nodelets + +1.11.5 (2013-12-07 13:42:55 +0100) +---------------------------------- +- fix compilation on OSX (#50) + +1.11.4 (2013-11-23 13:10:55 +0100) +---------------------------------- +- convert images to MONO8 when computing disparity if needed (#49) diff --git a/future pkg/image_pipeline/stereo_image_proc/CMakeLists.txt b/future pkg/image_pipeline/stereo_image_proc/CMakeLists.txt new file mode 100644 index 0000000..aed4a6d --- /dev/null +++ b/future pkg/image_pipeline/stereo_image_proc/CMakeLists.txt @@ -0,0 +1,57 @@ +cmake_minimum_required(VERSION 2.8) +project(stereo_image_proc) + +find_package(catkin REQUIRED cv_bridge dynamic_reconfigure image_geometry image_proc image_transport message_filters nodelet sensor_msgs stereo_msgs) +find_package(Boost REQUIRED COMPONENTS thread) + +if(cv_bridge_VERSION VERSION_GREATER "1.12.0") + add_compile_options(-std=c++11) +endif() + +# Dynamic reconfigure support +generate_dynamic_reconfigure_options(cfg/Disparity.cfg) + +catkin_package( + CATKIN_DEPENDS image_geometry image_proc sensor_msgs stereo_msgs + INCLUDE_DIRS include + LIBRARIES ${PROJECT_NAME} +) + +include_directories(include) + +find_package(OpenCV REQUIRED) +include_directories(${catkin_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS}) + +# See note in image_proc/CMakeLists.txt +add_definitions(-DOPENCV_TRAITS_ENABLE_DEPRECATED) + +# Nodelet library +add_library(${PROJECT_NAME} src/libstereo_image_proc/processor.cpp src/nodelets/disparity.cpp src/nodelets/point_cloud2.cpp) +target_link_libraries(${PROJECT_NAME} ${catkin_LIBRARIES} + ${OpenCV_LIBRARIES} +) +add_dependencies(${PROJECT_NAME} ${PROJECT_NAME}_gencfg) +install(TARGETS ${PROJECT_NAME} + DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +) +install(FILES nodelet_plugins.xml + DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} +) + +# Standalone node +add_executable(stereoimageproc_exe src/nodes/stereo_image_proc.cpp) +target_link_libraries(stereoimageproc_exe stereo_image_proc) +SET_TARGET_PROPERTIES(stereoimageproc_exe PROPERTIES OUTPUT_NAME stereo_image_proc) +install(TARGETS stereoimageproc_exe + DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +) + +# install the launch file +install(DIRECTORY launch + DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}/ +) + +# install the include directory +install(DIRECTORY include/${PROJECT_NAME}/ + DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} +) diff --git a/future pkg/image_pipeline/stereo_image_proc/cfg/Disparity.cfg b/future pkg/image_pipeline/stereo_image_proc/cfg/Disparity.cfg new file mode 100755 index 0000000..4c502e7 --- /dev/null +++ b/future pkg/image_pipeline/stereo_image_proc/cfg/Disparity.cfg @@ -0,0 +1,39 @@ +#! /usr/bin/env python + +# Declare parameters that control stereo processing + +PACKAGE='stereo_image_proc' + +from dynamic_reconfigure.parameter_generator_catkin import * + +gen = ParameterGenerator() + +stereo_algo_enum = gen.enum([gen.const("StereoBM", int_t, 0, "Block Matching"), + gen.const("StereoSGBM", int_t, 1, "SemiGlobal Block Matching")], + "stereo algorithm") +gen.add("stereo_algorithm", int_t, 0, "sterel algorithm", 0, 0, 1, + edit_method = stereo_algo_enum) +# disparity block matching pre-filtering parameters +gen.add("prefilter_size", int_t, 0, "Normalization window size, pixels", 9, 5, 255) +gen.add("prefilter_cap", int_t, 0, "Bound on normalized pixel values", 31, 1, 63) + +# disparity block matching correlation parameters +gen.add("correlation_window_size", int_t, 0, "SAD correlation window width, pixels", 15, 5, 255) +gen.add("min_disparity", int_t, 0, "Disparity to begin search at, pixels (may be negative)", 0, -128, 128) +gen.add("disparity_range", int_t, 0, "Number of disparities to search, pixels", 64, 32, 256) +# TODO What about trySmallerWindows? + +# disparity block matching post-filtering parameters +# NOTE: Making uniqueness_ratio int_t instead of double_t to work around dynamic_reconfigure gui issue +gen.add("uniqueness_ratio", double_t, 0, "Filter out if best match does not sufficiently exceed the next-best match", 15, 0, 100) +gen.add("texture_threshold", int_t, 0, "Filter out if SAD window response does not exceed texture threshold", 10, 0, 10000) +gen.add("speckle_size", int_t, 0, "Reject regions smaller than this size, pixels", 100, 0, 1000) +gen.add("speckle_range", int_t, 0, "Max allowed difference between detected disparities", 4, 0, 31) +gen.add("fullDP", bool_t, 0, "Run the full variant of the algorithm, only available in SGBM", False) +gen.add("P1", double_t, 0, "The first parameter controlling the disparity smoothness, only available in SGBM", 200, 0, 4000) +gen.add("P2", double_t, 0, "The second parameter controlling the disparity smoothness., only available in SGBM", 400, 0, 4000) +gen.add("disp12MaxDiff", int_t, 0, "Maximum allowed difference (in integer pixel units) in the left-right disparity check, only available in SGBM", 0, 0, 128) +# First string value is node name, used only for generating documentation +# Second string value ("Disparity") is name of class and generated +# .h file, with "Config" added, so class DisparityConfig +exit(gen.generate(PACKAGE, "stereo_image_proc", "Disparity")) diff --git a/future pkg/image_pipeline/stereo_image_proc/doc/mainpage.dox b/future pkg/image_pipeline/stereo_image_proc/doc/mainpage.dox new file mode 100644 index 0000000..39b89a1 --- /dev/null +++ b/future pkg/image_pipeline/stereo_image_proc/doc/mainpage.dox @@ -0,0 +1,14 @@ +/** + +@mainpage + +@htmlinclude manifest.html + +@b stereo_image_proc contains a node for performing rectification and +color processing on the raw images produced by a pair of stereo cameras. +It also produces 3d stereo outputs - the disparity image and point cloud. +See http://www.ros.org/wiki/stereo_image_proc for documentation. + +Currently this package has no public code API. + +*/ diff --git a/future pkg/image_pipeline/stereo_image_proc/doc/stereo_frames.svg b/future pkg/image_pipeline/stereo_image_proc/doc/stereo_frames.svg new file mode 100644 index 0000000..bac9f92 --- /dev/null +++ b/future pkg/image_pipeline/stereo_image_proc/doc/stereo_frames.svg @@ -0,0 +1,1052 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + X + Y + Z + + + + + + + + + + + + + + + + + + + + + + + + LeftImager + RightImager + Point Cloud + + + diff --git a/future pkg/image_pipeline/stereo_image_proc/include/stereo_image_proc/processor.h b/future pkg/image_pipeline/stereo_image_proc/include/stereo_image_proc/processor.h new file mode 100644 index 0000000..22abdd4 --- /dev/null +++ b/future pkg/image_pipeline/stereo_image_proc/include/stereo_image_proc/processor.h @@ -0,0 +1,299 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#ifndef STEREO_IMAGE_PROC_PROCESSOR_H +#define STEREO_IMAGE_PROC_PROCESSOR_H + +#include +#include +#include +#include +#include + +namespace stereo_image_proc { + +struct StereoImageSet +{ + image_proc::ImageSet left; + image_proc::ImageSet right; + stereo_msgs::DisparityImage disparity; + sensor_msgs::PointCloud points; + sensor_msgs::PointCloud2 points2; +}; + +class StereoProcessor +{ +public: + + StereoProcessor() +#if CV_MAJOR_VERSION == 3 + { + block_matcher_ = cv::StereoBM::create(); + sg_block_matcher_ = cv::StereoSGBM::create(1, 1, 10); +#else + : block_matcher_(cv::StereoBM::BASIC_PRESET), + sg_block_matcher_() + { +#endif + } + + enum StereoType + { + BM, SGBM + }; + + enum { + LEFT_MONO = 1 << 0, + LEFT_RECT = 1 << 1, + LEFT_COLOR = 1 << 2, + LEFT_RECT_COLOR = 1 << 3, + RIGHT_MONO = 1 << 4, + RIGHT_RECT = 1 << 5, + RIGHT_COLOR = 1 << 6, + RIGHT_RECT_COLOR = 1 << 7, + DISPARITY = 1 << 8, + POINT_CLOUD = 1 << 9, + POINT_CLOUD2 = 1 << 10, + + LEFT_ALL = LEFT_MONO | LEFT_RECT | LEFT_COLOR | LEFT_RECT_COLOR, + RIGHT_ALL = RIGHT_MONO | RIGHT_RECT | RIGHT_COLOR | RIGHT_RECT_COLOR, + STEREO_ALL = DISPARITY | POINT_CLOUD | POINT_CLOUD2, + ALL = LEFT_ALL | RIGHT_ALL | STEREO_ALL + }; + + inline + StereoType getStereoType() const {return current_stereo_algorithm_;} + inline + void setStereoType(StereoType type) {current_stereo_algorithm_ = type;} + + int getInterpolation() const; + void setInterpolation(int interp); + + // Disparity pre-filtering parameters + + int getPreFilterSize() const; + void setPreFilterSize(int size); + + int getPreFilterCap() const; + void setPreFilterCap(int cap); + + // Disparity correlation parameters + + int getCorrelationWindowSize() const; + void setCorrelationWindowSize(int size); + + int getMinDisparity() const; + void setMinDisparity(int min_d); + + int getDisparityRange() const; + void setDisparityRange(int range); // Number of pixels to search + + // Disparity post-filtering parameters + + int getTextureThreshold() const; + void setTextureThreshold(int threshold); + + float getUniquenessRatio() const; + void setUniquenessRatio(float ratio); + + int getSpeckleSize() const; + void setSpeckleSize(int size); + + int getSpeckleRange() const; + void setSpeckleRange(int range); + + // SGBM only + int getSgbmMode() const; + void setSgbmMode(int fullDP); + + int getP1() const; + void setP1(int P1); + + int getP2() const; + void setP2(int P2); + + int getDisp12MaxDiff() const; + void setDisp12MaxDiff(int disp12MaxDiff); + + // Do all the work! + bool process(const sensor_msgs::ImageConstPtr& left_raw, + const sensor_msgs::ImageConstPtr& right_raw, + const image_geometry::StereoCameraModel& model, + StereoImageSet& output, int flags) const; + + void processDisparity(const cv::Mat& left_rect, const cv::Mat& right_rect, + const image_geometry::StereoCameraModel& model, + stereo_msgs::DisparityImage& disparity) const; + + void processPoints(const stereo_msgs::DisparityImage& disparity, + const cv::Mat& color, const std::string& encoding, + const image_geometry::StereoCameraModel& model, + sensor_msgs::PointCloud& points) const; + void processPoints2(const stereo_msgs::DisparityImage& disparity, + const cv::Mat& color, const std::string& encoding, + const image_geometry::StereoCameraModel& model, + sensor_msgs::PointCloud2& points) const; + +private: + image_proc::Processor mono_processor_; + + mutable cv::Mat_ disparity16_; // scratch buffer for 16-bit signed disparity image +#if CV_MAJOR_VERSION == 3 + mutable cv::Ptr block_matcher_; // contains scratch buffers for block matching + mutable cv::Ptr sg_block_matcher_; +#else + mutable cv::StereoBM block_matcher_; // contains scratch buffers for block matching + mutable cv::StereoSGBM sg_block_matcher_; +#endif + StereoType current_stereo_algorithm_; + // scratch buffer for dense point cloud + mutable cv::Mat_ dense_points_; +}; + + +inline int StereoProcessor::getInterpolation() const +{ + return mono_processor_.interpolation_; +} + +inline void StereoProcessor::setInterpolation(int interp) +{ + mono_processor_.interpolation_ = interp; +} + +// For once, a macro is used just to avoid errors +#define STEREO_IMAGE_PROC_OPENCV2(GET, SET, TYPE, PARAM) \ +inline TYPE StereoProcessor::GET() const \ +{ \ + if (current_stereo_algorithm_ == BM) \ + return block_matcher_.state->PARAM; \ + return sg_block_matcher_.PARAM; \ +} \ + \ +inline void StereoProcessor::SET(TYPE param) \ +{ \ + block_matcher_.state->PARAM = param; \ + sg_block_matcher_.PARAM = param; \ +} + +#define STEREO_IMAGE_PROC_OPENCV3(GET, SET, TYPE, GET_OPENCV, SET_OPENCV) \ +inline TYPE StereoProcessor::GET() const \ +{ \ + if (current_stereo_algorithm_ == BM) \ + return block_matcher_->GET_OPENCV(); \ + return sg_block_matcher_->GET_OPENCV(); \ +} \ +\ +inline void StereoProcessor::SET(TYPE param) \ +{ \ + block_matcher_->SET_OPENCV(param); \ + sg_block_matcher_->SET_OPENCV(param); \ +} + +#if CV_MAJOR_VERSION == 3 +STEREO_IMAGE_PROC_OPENCV3(getPreFilterCap, setPreFilterCap, int, getPreFilterCap, setPreFilterCap) +STEREO_IMAGE_PROC_OPENCV3(getCorrelationWindowSize, setCorrelationWindowSize, int, getBlockSize, setBlockSize) +STEREO_IMAGE_PROC_OPENCV3(getMinDisparity, setMinDisparity, int, getMinDisparity, setMinDisparity) +STEREO_IMAGE_PROC_OPENCV3(getDisparityRange, setDisparityRange, int, getNumDisparities, setNumDisparities) +STEREO_IMAGE_PROC_OPENCV3(getUniquenessRatio, setUniquenessRatio, float, getUniquenessRatio, setUniquenessRatio) +STEREO_IMAGE_PROC_OPENCV3(getSpeckleSize, setSpeckleSize, int, getSpeckleWindowSize, setSpeckleWindowSize) +STEREO_IMAGE_PROC_OPENCV3(getSpeckleRange, setSpeckleRange, int, getSpeckleRange, setSpeckleRange) +#else +STEREO_IMAGE_PROC_OPENCV2(getPreFilterCap, setPreFilterCap, int, preFilterCap) +STEREO_IMAGE_PROC_OPENCV2(getCorrelationWindowSize, setCorrelationWindowSize, int, SADWindowSize) +STEREO_IMAGE_PROC_OPENCV2(getMinDisparity, setMinDisparity, int, minDisparity) +STEREO_IMAGE_PROC_OPENCV2(getDisparityRange, setDisparityRange, int, numberOfDisparities) +STEREO_IMAGE_PROC_OPENCV2(getUniquenessRatio, setUniquenessRatio, float, uniquenessRatio) +STEREO_IMAGE_PROC_OPENCV2(getSpeckleSize, setSpeckleSize, int, speckleWindowSize) +STEREO_IMAGE_PROC_OPENCV2(getSpeckleRange, setSpeckleRange, int, speckleRange) +#endif + +#define STEREO_IMAGE_PROC_BM_ONLY_OPENCV2(GET, SET, TYPE, PARAM) \ +inline TYPE StereoProcessor::GET() const \ +{ \ + return block_matcher_.state->PARAM; \ +} \ +\ +inline void StereoProcessor::SET(TYPE param) \ +{ \ + block_matcher_.state->PARAM = param; \ +} + +#define STEREO_IMAGE_PROC_SGBM_ONLY_OPENCV2(GET, SET, TYPE, PARAM) \ +inline TYPE StereoProcessor::GET() const \ +{ \ + return sg_block_matcher_.PARAM; \ +} \ +\ +inline void StereoProcessor::SET(TYPE param) \ +{ \ + sg_block_matcher_.PARAM = param; \ +} + +#define STEREO_IMAGE_PROC_ONLY_OPENCV3(MEMBER, GET, SET, TYPE, GET_OPENCV, SET_OPENCV) \ +inline TYPE StereoProcessor::GET() const \ +{ \ + return MEMBER->GET_OPENCV(); \ +} \ +\ +inline void StereoProcessor::SET(TYPE param) \ +{ \ + MEMBER->SET_OPENCV(param); \ +} + +// BM only +#if CV_MAJOR_VERSION == 3 +STEREO_IMAGE_PROC_ONLY_OPENCV3(block_matcher_, getPreFilterSize, setPreFilterSize, int, getPreFilterSize, setPreFilterSize) +STEREO_IMAGE_PROC_ONLY_OPENCV3(block_matcher_, getTextureThreshold, setTextureThreshold, int, getTextureThreshold, setTextureThreshold) +#else +STEREO_IMAGE_PROC_BM_ONLY_OPENCV2(getPreFilterSize, setPreFilterSize, int, preFilterSize) +STEREO_IMAGE_PROC_BM_ONLY_OPENCV2(getTextureThreshold, setTextureThreshold, int, textureThreshold) +#endif + +// SGBM specific +#if CV_MAJOR_VERSION == 3 +// getSgbmMode can return MODE_SGBM = 0, MODE_HH = 1. FullDP == 1 was MODE_HH so we're good +STEREO_IMAGE_PROC_ONLY_OPENCV3(sg_block_matcher_, getSgbmMode, setSgbmMode, int, getMode, setMode) +STEREO_IMAGE_PROC_ONLY_OPENCV3(sg_block_matcher_, getP1, setP1, int, getP1, setP1) +STEREO_IMAGE_PROC_ONLY_OPENCV3(sg_block_matcher_, getP2, setP2, int, getP2, setP2) +STEREO_IMAGE_PROC_ONLY_OPENCV3(sg_block_matcher_, getDisp12MaxDiff, setDisp12MaxDiff, int, getDisp12MaxDiff, setDisp12MaxDiff) +#else +STEREO_IMAGE_PROC_SGBM_ONLY_OPENCV2(getSgbmMode, setSgbmMode, int, fullDP) +STEREO_IMAGE_PROC_SGBM_ONLY_OPENCV2(getP1, setP1, int, P1) +STEREO_IMAGE_PROC_SGBM_ONLY_OPENCV2(getP2, setP2, int, P2) +STEREO_IMAGE_PROC_SGBM_ONLY_OPENCV2(getDisp12MaxDiff, setDisp12MaxDiff, int, disp12MaxDiff) +#endif + +} //namespace stereo_image_proc + +#endif diff --git a/future pkg/image_pipeline/stereo_image_proc/launch/stereo_image_proc.launch b/future pkg/image_pipeline/stereo_image_proc/launch/stereo_image_proc.launch new file mode 100644 index 0000000..605efd7 --- /dev/null +++ b/future pkg/image_pipeline/stereo_image_proc/launch/stereo_image_proc.launch @@ -0,0 +1,36 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/future pkg/image_pipeline/stereo_image_proc/nodelet_plugins.xml b/future pkg/image_pipeline/stereo_image_proc/nodelet_plugins.xml new file mode 100644 index 0000000..3512efc --- /dev/null +++ b/future pkg/image_pipeline/stereo_image_proc/nodelet_plugins.xml @@ -0,0 +1,11 @@ + + + + Nodelet to perform stereo processing on a pair of rectified image streams, producing disparity images + + + + Nodelet to produce XYZRGB PointCloud2 messages + + + diff --git a/future pkg/image_pipeline/stereo_image_proc/package.xml b/future pkg/image_pipeline/stereo_image_proc/package.xml new file mode 100644 index 0000000..ec853e7 --- /dev/null +++ b/future pkg/image_pipeline/stereo_image_proc/package.xml @@ -0,0 +1,40 @@ + + stereo_image_proc + 1.12.23 + Stereo and single image rectification and disparity processing. + Patrick Mihelich + Kurt Konolige + Jeremy Leibs + Vincent Rabaud + BSD + http://www.ros.org/wiki/stereo_image_proc + + + + + + + catkin + + rostest + + cv_bridge + dynamic_reconfigure + image_geometry + image_proc + image_transport + message_filters + nodelet + sensor_msgs + stereo_msgs + + cv_bridge + dynamic_reconfigure + image_geometry + image_proc + image_transport + message_filters + nodelet + sensor_msgs + stereo_msgs + diff --git a/future pkg/image_pipeline/stereo_image_proc/rosdoc.yaml b/future pkg/image_pipeline/stereo_image_proc/rosdoc.yaml new file mode 100644 index 0000000..976fdf0 --- /dev/null +++ b/future pkg/image_pipeline/stereo_image_proc/rosdoc.yaml @@ -0,0 +1,4 @@ + - builder: doxygen + name: C++ API + output_dir: c++ + file_patterns: '*.c *.cpp *.h *.cc *.hh *.dox' diff --git a/future pkg/image_pipeline/stereo_image_proc/src/libstereo_image_proc/processor.cpp b/future pkg/image_pipeline/stereo_image_proc/src/libstereo_image_proc/processor.cpp new file mode 100644 index 0000000..cecbf97 --- /dev/null +++ b/future pkg/image_pipeline/stereo_image_proc/src/libstereo_image_proc/processor.cpp @@ -0,0 +1,317 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include +#include "stereo_image_proc/processor.h" +#include +#include +#include + +namespace stereo_image_proc { + +bool StereoProcessor::process(const sensor_msgs::ImageConstPtr& left_raw, + const sensor_msgs::ImageConstPtr& right_raw, + const image_geometry::StereoCameraModel& model, + StereoImageSet& output, int flags) const +{ + // Do monocular processing on left and right images + int left_flags = flags & LEFT_ALL; + int right_flags = flags & RIGHT_ALL; + if (flags & STEREO_ALL) { + // Need the rectified images for stereo processing + left_flags |= LEFT_RECT; + right_flags |= RIGHT_RECT; + } + if (flags & (POINT_CLOUD | POINT_CLOUD2)) { + flags |= DISPARITY; + // Need the color channels for the point cloud + left_flags |= LEFT_RECT_COLOR; + } + if (!mono_processor_.process(left_raw, model.left(), output.left, left_flags)) + return false; + if (!mono_processor_.process(right_raw, model.right(), output.right, right_flags >> 4)) + return false; + + // Do block matching to produce the disparity image + if (flags & DISPARITY) { + processDisparity(output.left.rect, output.right.rect, model, output.disparity); + } + + // Project disparity image to 3d point cloud + if (flags & POINT_CLOUD) { + processPoints(output.disparity, output.left.rect_color, output.left.color_encoding, model, output.points); + } + + // Project disparity image to 3d point cloud + if (flags & POINT_CLOUD2) { + processPoints2(output.disparity, output.left.rect_color, output.left.color_encoding, model, output.points2); + } + + return true; +} + +void StereoProcessor::processDisparity(const cv::Mat& left_rect, const cv::Mat& right_rect, + const image_geometry::StereoCameraModel& model, + stereo_msgs::DisparityImage& disparity) const +{ + // Fixed-point disparity is 16 times the true value: d = d_fp / 16.0 = x_l - x_r. + static const int DPP = 16; // disparities per pixel + static const double inv_dpp = 1.0 / DPP; + + // Block matcher produces 16-bit signed (fixed point) disparity image + if (current_stereo_algorithm_ == BM) +#if CV_MAJOR_VERSION == 3 + block_matcher_->compute(left_rect, right_rect, disparity16_); + else + sg_block_matcher_->compute(left_rect, right_rect, disparity16_); +#else + block_matcher_(left_rect, right_rect, disparity16_); + else + sg_block_matcher_(left_rect, right_rect, disparity16_); +#endif + + // Fill in DisparityImage image data, converting to 32-bit float + sensor_msgs::Image& dimage = disparity.image; + dimage.height = disparity16_.rows; + dimage.width = disparity16_.cols; + dimage.encoding = sensor_msgs::image_encodings::TYPE_32FC1; + dimage.step = dimage.width * sizeof(float); + dimage.data.resize(dimage.step * dimage.height); + cv::Mat_ dmat(dimage.height, dimage.width, (float*)&dimage.data[0], dimage.step); + // We convert from fixed-point to float disparity and also adjust for any x-offset between + // the principal points: d = d_fp*inv_dpp - (cx_l - cx_r) + disparity16_.convertTo(dmat, dmat.type(), inv_dpp, -(model.left().cx() - model.right().cx())); + ROS_ASSERT(dmat.data == &dimage.data[0]); + /// @todo is_bigendian? :) + + // Stereo parameters + disparity.f = model.right().fx(); + disparity.T = model.baseline(); + + /// @todo Window of (potentially) valid disparities + + // Disparity search range + disparity.min_disparity = getMinDisparity(); + disparity.max_disparity = getMinDisparity() + getDisparityRange() - 1; + disparity.delta_d = inv_dpp; +} + +inline bool isValidPoint(const cv::Vec3f& pt) +{ + // Check both for disparities explicitly marked as invalid (where OpenCV maps pt.z to MISSING_Z) + // and zero disparities (point mapped to infinity). + return pt[2] != image_geometry::StereoCameraModel::MISSING_Z && !std::isinf(pt[2]); +} + +void StereoProcessor::processPoints(const stereo_msgs::DisparityImage& disparity, + const cv::Mat& color, const std::string& encoding, + const image_geometry::StereoCameraModel& model, + sensor_msgs::PointCloud& points) const +{ + // Calculate dense point cloud + const sensor_msgs::Image& dimage = disparity.image; + const cv::Mat_ dmat(dimage.height, dimage.width, (float*)&dimage.data[0], dimage.step); + model.projectDisparityImageTo3d(dmat, dense_points_, true); + + // Fill in sparse point cloud message + points.points.resize(0); + points.channels.resize(3); + points.channels[0].name = "rgb"; + points.channels[0].values.resize(0); + points.channels[1].name = "u"; + points.channels[1].values.resize(0); + points.channels[2].name = "v"; + points.channels[2].values.resize(0); + + for (int32_t u = 0; u < dense_points_.rows; ++u) { + for (int32_t v = 0; v < dense_points_.cols; ++v) { + if (isValidPoint(dense_points_(u,v))) { + // x,y,z + geometry_msgs::Point32 pt; + pt.x = dense_points_(u,v)[0]; + pt.y = dense_points_(u,v)[1]; + pt.z = dense_points_(u,v)[2]; + points.points.push_back(pt); + // u,v + points.channels[1].values.push_back(u); + points.channels[2].values.push_back(v); + } + } + } + + // Fill in color + namespace enc = sensor_msgs::image_encodings; + points.channels[0].values.reserve(points.points.size()); + if (encoding == enc::MONO8) { + for (int32_t u = 0; u < dense_points_.rows; ++u) { + for (int32_t v = 0; v < dense_points_.cols; ++v) { + if (isValidPoint(dense_points_(u,v))) { + uint8_t g = color.at(u,v); + int32_t rgb = (g << 16) | (g << 8) | g; + points.channels[0].values.push_back(*(float*)(&rgb)); + } + } + } + } + else if (encoding == enc::RGB8) { + for (int32_t u = 0; u < dense_points_.rows; ++u) { + for (int32_t v = 0; v < dense_points_.cols; ++v) { + if (isValidPoint(dense_points_(u,v))) { + const cv::Vec3b& rgb = color.at(u,v); + int32_t rgb_packed = (rgb[0] << 16) | (rgb[1] << 8) | rgb[2]; + points.channels[0].values.push_back(*(float*)(&rgb_packed)); + } + } + } + } + else if (encoding == enc::BGR8) { + for (int32_t u = 0; u < dense_points_.rows; ++u) { + for (int32_t v = 0; v < dense_points_.cols; ++v) { + if (isValidPoint(dense_points_(u,v))) { + const cv::Vec3b& bgr = color.at(u,v); + int32_t rgb_packed = (bgr[2] << 16) | (bgr[1] << 8) | bgr[0]; + points.channels[0].values.push_back(*(float*)(&rgb_packed)); + } + } + } + } + else { + ROS_WARN("Could not fill color channel of the point cloud, unrecognized encoding '%s'", encoding.c_str()); + } +} + +void StereoProcessor::processPoints2(const stereo_msgs::DisparityImage& disparity, + const cv::Mat& color, const std::string& encoding, + const image_geometry::StereoCameraModel& model, + sensor_msgs::PointCloud2& points) const +{ + // Calculate dense point cloud + const sensor_msgs::Image& dimage = disparity.image; + const cv::Mat_ dmat(dimage.height, dimage.width, (float*)&dimage.data[0], dimage.step); + model.projectDisparityImageTo3d(dmat, dense_points_, true); + + // Fill in sparse point cloud message + points.height = dense_points_.rows; + points.width = dense_points_.cols; + points.fields.resize (4); + points.fields[0].name = "x"; + points.fields[0].offset = 0; + points.fields[0].count = 1; + points.fields[0].datatype = sensor_msgs::PointField::FLOAT32; + points.fields[1].name = "y"; + points.fields[1].offset = 4; + points.fields[1].count = 1; + points.fields[1].datatype = sensor_msgs::PointField::FLOAT32; + points.fields[2].name = "z"; + points.fields[2].offset = 8; + points.fields[2].count = 1; + points.fields[2].datatype = sensor_msgs::PointField::FLOAT32; + points.fields[3].name = "rgb"; + points.fields[3].offset = 12; + points.fields[3].count = 1; + points.fields[3].datatype = sensor_msgs::PointField::FLOAT32; + //points.is_bigendian = false; ??? + points.point_step = 16; + points.row_step = points.point_step * points.width; + points.data.resize (points.row_step * points.height); + points.is_dense = false; // there may be invalid points + + float bad_point = std::numeric_limits::quiet_NaN (); + int i = 0; + for (int32_t u = 0; u < dense_points_.rows; ++u) { + for (int32_t v = 0; v < dense_points_.cols; ++v, ++i) { + if (isValidPoint(dense_points_(u,v))) { + // x,y,z,rgba + memcpy (&points.data[i * points.point_step + 0], &dense_points_(u,v)[0], sizeof (float)); + memcpy (&points.data[i * points.point_step + 4], &dense_points_(u,v)[1], sizeof (float)); + memcpy (&points.data[i * points.point_step + 8], &dense_points_(u,v)[2], sizeof (float)); + } + else { + memcpy (&points.data[i * points.point_step + 0], &bad_point, sizeof (float)); + memcpy (&points.data[i * points.point_step + 4], &bad_point, sizeof (float)); + memcpy (&points.data[i * points.point_step + 8], &bad_point, sizeof (float)); + } + } + } + + // Fill in color + namespace enc = sensor_msgs::image_encodings; + i = 0; + if (encoding == enc::MONO8) { + for (int32_t u = 0; u < dense_points_.rows; ++u) { + for (int32_t v = 0; v < dense_points_.cols; ++v, ++i) { + if (isValidPoint(dense_points_(u,v))) { + uint8_t g = color.at(u,v); + int32_t rgb = (g << 16) | (g << 8) | g; + memcpy (&points.data[i * points.point_step + 12], &rgb, sizeof (int32_t)); + } + else { + memcpy (&points.data[i * points.point_step + 12], &bad_point, sizeof (float)); + } + } + } + } + else if (encoding == enc::RGB8) { + for (int32_t u = 0; u < dense_points_.rows; ++u) { + for (int32_t v = 0; v < dense_points_.cols; ++v, ++i) { + if (isValidPoint(dense_points_(u,v))) { + const cv::Vec3b& rgb = color.at(u,v); + int32_t rgb_packed = (rgb[0] << 16) | (rgb[1] << 8) | rgb[2]; + memcpy (&points.data[i * points.point_step + 12], &rgb_packed, sizeof (int32_t)); + } + else { + memcpy (&points.data[i * points.point_step + 12], &bad_point, sizeof (float)); + } + } + } + } + else if (encoding == enc::BGR8) { + for (int32_t u = 0; u < dense_points_.rows; ++u) { + for (int32_t v = 0; v < dense_points_.cols; ++v, ++i) { + if (isValidPoint(dense_points_(u,v))) { + const cv::Vec3b& bgr = color.at(u,v); + int32_t rgb_packed = (bgr[2] << 16) | (bgr[1] << 8) | bgr[0]; + memcpy (&points.data[i * points.point_step + 12], &rgb_packed, sizeof (int32_t)); + } + else { + memcpy (&points.data[i * points.point_step + 12], &bad_point, sizeof (float)); + } + } + } + } + else { + ROS_WARN("Could not fill color channel of the point cloud, unrecognized encoding '%s'", encoding.c_str()); + } +} + +} //namespace stereo_image_proc diff --git a/future pkg/image_pipeline/stereo_image_proc/src/nodelets/disparity.cpp b/future pkg/image_pipeline/stereo_image_proc/src/nodelets/disparity.cpp new file mode 100644 index 0000000..1c39f99 --- /dev/null +++ b/future pkg/image_pipeline/stereo_image_proc/src/nodelets/disparity.cpp @@ -0,0 +1,250 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include +#if ((BOOST_VERSION / 100) % 1000) >= 53 +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include + +#include + +namespace stereo_image_proc { + +using namespace sensor_msgs; +using namespace stereo_msgs; +using namespace message_filters::sync_policies; + +class DisparityNodelet : public nodelet::Nodelet +{ + boost::shared_ptr it_; + + // Subscriptions + image_transport::SubscriberFilter sub_l_image_, sub_r_image_; + message_filters::Subscriber sub_l_info_, sub_r_info_; + typedef ExactTime ExactPolicy; + typedef ApproximateTime ApproximatePolicy; + typedef message_filters::Synchronizer ExactSync; + typedef message_filters::Synchronizer ApproximateSync; + boost::shared_ptr exact_sync_; + boost::shared_ptr approximate_sync_; + // Publications + boost::mutex connect_mutex_; + ros::Publisher pub_disparity_; + + // Dynamic reconfigure + boost::recursive_mutex config_mutex_; + typedef stereo_image_proc::DisparityConfig Config; + typedef dynamic_reconfigure::Server ReconfigureServer; + boost::shared_ptr reconfigure_server_; + + // Processing state (note: only safe because we're single-threaded!) + image_geometry::StereoCameraModel model_; + stereo_image_proc::StereoProcessor block_matcher_; // contains scratch buffers for block matching + + virtual void onInit(); + + void connectCb(); + + void imageCb(const ImageConstPtr& l_image_msg, const CameraInfoConstPtr& l_info_msg, + const ImageConstPtr& r_image_msg, const CameraInfoConstPtr& r_info_msg); + + void configCb(Config &config, uint32_t level); +}; + +void DisparityNodelet::onInit() +{ + ros::NodeHandle &nh = getNodeHandle(); + ros::NodeHandle &private_nh = getPrivateNodeHandle(); + + it_.reset(new image_transport::ImageTransport(nh)); + + // Synchronize inputs. Topic subscriptions happen on demand in the connection + // callback. Optionally do approximate synchronization. + int queue_size; + private_nh.param("queue_size", queue_size, 5); + bool approx; + private_nh.param("approximate_sync", approx, false); + if (approx) + { + approximate_sync_.reset( new ApproximateSync(ApproximatePolicy(queue_size), + sub_l_image_, sub_l_info_, + sub_r_image_, sub_r_info_) ); + approximate_sync_->registerCallback(boost::bind(&DisparityNodelet::imageCb, + this, _1, _2, _3, _4)); + } + else + { + exact_sync_.reset( new ExactSync(ExactPolicy(queue_size), + sub_l_image_, sub_l_info_, + sub_r_image_, sub_r_info_) ); + exact_sync_->registerCallback(boost::bind(&DisparityNodelet::imageCb, + this, _1, _2, _3, _4)); + } + + // Set up dynamic reconfiguration + ReconfigureServer::CallbackType f = boost::bind(&DisparityNodelet::configCb, + this, _1, _2); + reconfigure_server_.reset(new ReconfigureServer(config_mutex_, private_nh)); + reconfigure_server_->setCallback(f); + + // Monitor whether anyone is subscribed to the output + ros::SubscriberStatusCallback connect_cb = boost::bind(&DisparityNodelet::connectCb, this); + // Make sure we don't enter connectCb() between advertising and assigning to pub_disparity_ + boost::lock_guard lock(connect_mutex_); + pub_disparity_ = nh.advertise("disparity", 1, connect_cb, connect_cb); +} + +// Handles (un)subscribing when clients (un)subscribe +void DisparityNodelet::connectCb() +{ + boost::lock_guard lock(connect_mutex_); + if (pub_disparity_.getNumSubscribers() == 0) + { + sub_l_image_.unsubscribe(); + sub_l_info_ .unsubscribe(); + sub_r_image_.unsubscribe(); + sub_r_info_ .unsubscribe(); + } + else if (!sub_l_image_.getSubscriber()) + { + ros::NodeHandle &nh = getNodeHandle(); + // Queue size 1 should be OK; the one that matters is the synchronizer queue size. + /// @todo Allow remapping left, right? + image_transport::TransportHints hints("raw", ros::TransportHints(), getPrivateNodeHandle()); + sub_l_image_.subscribe(*it_, "left/image_rect", 1, hints); + sub_l_info_ .subscribe(nh, "left/camera_info", 1); + sub_r_image_.subscribe(*it_, "right/image_rect", 1, hints); + sub_r_info_ .subscribe(nh, "right/camera_info", 1); + } +} + +void DisparityNodelet::imageCb(const ImageConstPtr& l_image_msg, + const CameraInfoConstPtr& l_info_msg, + const ImageConstPtr& r_image_msg, + const CameraInfoConstPtr& r_info_msg) +{ + // Update the camera model + model_.fromCameraInfo(l_info_msg, r_info_msg); + + // Allocate new disparity image message + DisparityImagePtr disp_msg = boost::make_shared(); + disp_msg->header = l_info_msg->header; + disp_msg->image.header = l_info_msg->header; + + // Compute window of (potentially) valid disparities + int border = block_matcher_.getCorrelationWindowSize() / 2; + int left = block_matcher_.getDisparityRange() + block_matcher_.getMinDisparity() + border - 1; + int wtf = (block_matcher_.getMinDisparity() >= 0) ? border + block_matcher_.getMinDisparity() : std::max(border, -block_matcher_.getMinDisparity()); + int right = disp_msg->image.width - 1 - wtf; + int top = border; + int bottom = disp_msg->image.height - 1 - border; + disp_msg->valid_window.x_offset = left; + disp_msg->valid_window.y_offset = top; + disp_msg->valid_window.width = right - left; + disp_msg->valid_window.height = bottom - top; + + // Create cv::Mat views onto all buffers + const cv::Mat_ l_image = cv_bridge::toCvShare(l_image_msg, sensor_msgs::image_encodings::MONO8)->image; + const cv::Mat_ r_image = cv_bridge::toCvShare(r_image_msg, sensor_msgs::image_encodings::MONO8)->image; + + // Perform block matching to find the disparities + block_matcher_.processDisparity(l_image, r_image, model_, *disp_msg); + + // Adjust for any x-offset between the principal points: d' = d - (cx_l - cx_r) + double cx_l = model_.left().cx(); + double cx_r = model_.right().cx(); + if (cx_l != cx_r) { + cv::Mat_ disp_image(disp_msg->image.height, disp_msg->image.width, + reinterpret_cast(&disp_msg->image.data[0]), + disp_msg->image.step); + cv::subtract(disp_image, cv::Scalar(cx_l - cx_r), disp_image); + } + + pub_disparity_.publish(disp_msg); +} + +void DisparityNodelet::configCb(Config &config, uint32_t level) +{ + // Tweak all settings to be valid + config.prefilter_size |= 0x1; // must be odd + config.correlation_window_size |= 0x1; // must be odd + config.disparity_range = (config.disparity_range / 16) * 16; // must be multiple of 16 + + // check stereo method + // Note: With single-threaded NodeHandle, configCb and imageCb can't be called + // concurrently, so this is thread-safe. + block_matcher_.setPreFilterCap(config.prefilter_cap); + block_matcher_.setCorrelationWindowSize(config.correlation_window_size); + block_matcher_.setMinDisparity(config.min_disparity); + block_matcher_.setDisparityRange(config.disparity_range); + block_matcher_.setUniquenessRatio(config.uniqueness_ratio); + block_matcher_.setSpeckleSize(config.speckle_size); + block_matcher_.setSpeckleRange(config.speckle_range); + if (config.stereo_algorithm == stereo_image_proc::Disparity_StereoBM) { // StereoBM + block_matcher_.setStereoType(StereoProcessor::BM); + block_matcher_.setPreFilterSize(config.prefilter_size); + block_matcher_.setTextureThreshold(config.texture_threshold); + } + else if (config.stereo_algorithm == stereo_image_proc::Disparity_StereoSGBM) { // StereoSGBM + block_matcher_.setStereoType(StereoProcessor::SGBM); + block_matcher_.setSgbmMode(config.fullDP); + block_matcher_.setP1(config.P1); + block_matcher_.setP2(config.P2); + block_matcher_.setDisp12MaxDiff(config.disp12MaxDiff); + } +} + +} // namespace stereo_image_proc + +// Register nodelet +#include +PLUGINLIB_EXPORT_CLASS(stereo_image_proc::DisparityNodelet,nodelet::Nodelet) diff --git a/future pkg/image_pipeline/stereo_image_proc/src/nodelets/point_cloud2.cpp b/future pkg/image_pipeline/stereo_image_proc/src/nodelets/point_cloud2.cpp new file mode 100644 index 0000000..fae8ba1 --- /dev/null +++ b/future pkg/image_pipeline/stereo_image_proc/src/nodelets/point_cloud2.cpp @@ -0,0 +1,272 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ +#include +#if ((BOOST_VERSION / 100) % 1000) >= 53 +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace stereo_image_proc { + +using namespace sensor_msgs; +using namespace stereo_msgs; +using namespace message_filters::sync_policies; + +class PointCloud2Nodelet : public nodelet::Nodelet +{ + boost::shared_ptr it_; + + // Subscriptions + image_transport::SubscriberFilter sub_l_image_; + message_filters::Subscriber sub_l_info_, sub_r_info_; + message_filters::Subscriber sub_disparity_; + typedef ExactTime ExactPolicy; + typedef ApproximateTime ApproximatePolicy; + typedef message_filters::Synchronizer ExactSync; + typedef message_filters::Synchronizer ApproximateSync; + boost::shared_ptr exact_sync_; + boost::shared_ptr approximate_sync_; + + // Publications + boost::mutex connect_mutex_; + ros::Publisher pub_points2_; + + // Processing state (note: only safe because we're single-threaded!) + image_geometry::StereoCameraModel model_; + cv::Mat_ points_mat_; // scratch buffer + + virtual void onInit(); + + void connectCb(); + + void imageCb(const ImageConstPtr& l_image_msg, + const CameraInfoConstPtr& l_info_msg, + const CameraInfoConstPtr& r_info_msg, + const DisparityImageConstPtr& disp_msg); +}; + +void PointCloud2Nodelet::onInit() +{ + ros::NodeHandle &nh = getNodeHandle(); + ros::NodeHandle &private_nh = getPrivateNodeHandle(); + it_.reset(new image_transport::ImageTransport(nh)); + + // Synchronize inputs. Topic subscriptions happen on demand in the connection + // callback. Optionally do approximate synchronization. + int queue_size; + private_nh.param("queue_size", queue_size, 5); + bool approx; + private_nh.param("approximate_sync", approx, false); + if (approx) + { + approximate_sync_.reset( new ApproximateSync(ApproximatePolicy(queue_size), + sub_l_image_, sub_l_info_, + sub_r_info_, sub_disparity_) ); + approximate_sync_->registerCallback(boost::bind(&PointCloud2Nodelet::imageCb, + this, _1, _2, _3, _4)); + } + else + { + exact_sync_.reset( new ExactSync(ExactPolicy(queue_size), + sub_l_image_, sub_l_info_, + sub_r_info_, sub_disparity_) ); + exact_sync_->registerCallback(boost::bind(&PointCloud2Nodelet::imageCb, + this, _1, _2, _3, _4)); + } + + // Monitor whether anyone is subscribed to the output + ros::SubscriberStatusCallback connect_cb = boost::bind(&PointCloud2Nodelet::connectCb, this); + // Make sure we don't enter connectCb() between advertising and assigning to pub_points2_ + boost::lock_guard lock(connect_mutex_); + pub_points2_ = nh.advertise("points2", 1, connect_cb, connect_cb); +} + +// Handles (un)subscribing when clients (un)subscribe +void PointCloud2Nodelet::connectCb() +{ + boost::lock_guard lock(connect_mutex_); + if (pub_points2_.getNumSubscribers() == 0) + { + sub_l_image_ .unsubscribe(); + sub_l_info_ .unsubscribe(); + sub_r_info_ .unsubscribe(); + sub_disparity_.unsubscribe(); + } + else if (!sub_l_image_.getSubscriber()) + { + ros::NodeHandle &nh = getNodeHandle(); + // Queue size 1 should be OK; the one that matters is the synchronizer queue size. + image_transport::TransportHints hints("raw", ros::TransportHints(), getPrivateNodeHandle()); + sub_l_image_ .subscribe(*it_, "left/image_rect_color", 1, hints); + sub_l_info_ .subscribe(nh, "left/camera_info", 1); + sub_r_info_ .subscribe(nh, "right/camera_info", 1); + sub_disparity_.subscribe(nh, "disparity", 1); + } +} + +inline bool isValidPoint(const cv::Vec3f& pt) +{ + // Check both for disparities explicitly marked as invalid (where OpenCV maps pt.z to MISSING_Z) + // and zero disparities (point mapped to infinity). + return pt[2] != image_geometry::StereoCameraModel::MISSING_Z && !std::isinf(pt[2]); +} + +void PointCloud2Nodelet::imageCb(const ImageConstPtr& l_image_msg, + const CameraInfoConstPtr& l_info_msg, + const CameraInfoConstPtr& r_info_msg, + const DisparityImageConstPtr& disp_msg) +{ + // Update the camera model + model_.fromCameraInfo(l_info_msg, r_info_msg); + + // Calculate point cloud + const Image& dimage = disp_msg->image; + const cv::Mat_ dmat(dimage.height, dimage.width, (float*)&dimage.data[0], dimage.step); + model_.projectDisparityImageTo3d(dmat, points_mat_, true); + cv::Mat_ mat = points_mat_; + + // Fill in new PointCloud2 message (2D image-like layout) + PointCloud2Ptr points_msg = boost::make_shared(); + points_msg->header = disp_msg->header; + points_msg->height = mat.rows; + points_msg->width = mat.cols; + points_msg->is_bigendian = false; + points_msg->is_dense = false; // there may be invalid points + + sensor_msgs::PointCloud2Modifier pcd_modifier(*points_msg); + pcd_modifier.setPointCloud2FieldsByString(2, "xyz", "rgb"); + + sensor_msgs::PointCloud2Iterator iter_x(*points_msg, "x"); + sensor_msgs::PointCloud2Iterator iter_y(*points_msg, "y"); + sensor_msgs::PointCloud2Iterator iter_z(*points_msg, "z"); + sensor_msgs::PointCloud2Iterator iter_r(*points_msg, "r"); + sensor_msgs::PointCloud2Iterator iter_g(*points_msg, "g"); + sensor_msgs::PointCloud2Iterator iter_b(*points_msg, "b"); + + float bad_point = std::numeric_limits::quiet_NaN (); + for (int v = 0; v < mat.rows; ++v) + { + for (int u = 0; u < mat.cols; ++u, ++iter_x, ++iter_y, ++iter_z) + { + if (isValidPoint(mat(v,u))) + { + // x,y,z + *iter_x = mat(v, u)[0]; + *iter_y = mat(v, u)[1]; + *iter_z = mat(v, u)[2]; + } + else + { + *iter_x = *iter_y = *iter_z = bad_point; + } + } + } + + // Fill in color + namespace enc = sensor_msgs::image_encodings; + const std::string& encoding = l_image_msg->encoding; + if (encoding == enc::MONO8) + { + const cv::Mat_ color(l_image_msg->height, l_image_msg->width, + (uint8_t*)&l_image_msg->data[0], + l_image_msg->step); + for (int v = 0; v < mat.rows; ++v) + { + for (int u = 0; u < mat.cols; ++u, ++iter_r, ++iter_g, ++iter_b) + { + uint8_t g = color(v,u); + *iter_r = *iter_g = *iter_b = g; + } + } + } + else if (encoding == enc::RGB8) + { + const cv::Mat_ color(l_image_msg->height, l_image_msg->width, + (cv::Vec3b*)&l_image_msg->data[0], + l_image_msg->step); + for (int v = 0; v < mat.rows; ++v) + { + for (int u = 0; u < mat.cols; ++u, ++iter_r, ++iter_g, ++iter_b) + { + const cv::Vec3b& rgb = color(v,u); + *iter_r = rgb[0]; + *iter_g = rgb[1]; + *iter_b = rgb[2]; + } + } + } + else if (encoding == enc::BGR8) + { + const cv::Mat_ color(l_image_msg->height, l_image_msg->width, + (cv::Vec3b*)&l_image_msg->data[0], + l_image_msg->step); + for (int v = 0; v < mat.rows; ++v) + { + for (int u = 0; u < mat.cols; ++u, ++iter_r, ++iter_g, ++iter_b) + { + const cv::Vec3b& bgr = color(v,u); + *iter_r = bgr[2]; + *iter_g = bgr[1]; + *iter_b = bgr[0]; + } + } + } + else + { + NODELET_WARN_THROTTLE(30, "Could not fill color channel of the point cloud, " + "unsupported encoding '%s'", encoding.c_str()); + } + + pub_points2_.publish(points_msg); +} + +} // namespace stereo_image_proc + +// Register nodelet +#include +PLUGINLIB_EXPORT_CLASS(stereo_image_proc::PointCloud2Nodelet,nodelet::Nodelet) diff --git a/future pkg/image_pipeline/stereo_image_proc/src/nodes/stereo_image_proc.cpp b/future pkg/image_pipeline/stereo_image_proc/src/nodes/stereo_image_proc.cpp new file mode 100644 index 0000000..df83302 --- /dev/null +++ b/future pkg/image_pipeline/stereo_image_proc/src/nodes/stereo_image_proc.cpp @@ -0,0 +1,149 @@ +/********************************************************************* +* Software License Agreement (BSD License) +* +* Copyright (c) 2008, Willow Garage, Inc. +* All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of the Willow Garage nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +* POSSIBILITY OF SUCH DAMAGE. +*********************************************************************/ + +#include +#include +#include + +void loadMonocularNodelets(nodelet::Loader& manager, const std::string& side, + const XmlRpc::XmlRpcValue& rectify_params, + const nodelet::V_string& my_argv) +{ + nodelet::M_string remappings; + + // Explicitly resolve global remappings (wg-ros-pkg #5055). + // Otherwise the internal remapping 'image_raw' -> 'left/image_raw' can hide a + // global remapping from the launch file or command line. + std::string image_raw_topic = ros::names::resolve(side + "/image_raw"); + std::string image_mono_topic = ros::names::resolve(side + "/image_mono"); + std::string image_color_topic = ros::names::resolve(side + "/image_color"); + std::string image_rect_topic = ros::names::resolve(side + "/image_rect"); + std::string image_rect_color_topic = ros::names::resolve(side + "/image_rect_color"); + std::string camera_info_topic = ros::names::resolve(side + "/camera_info"); + + // Debayer nodelet: image_raw -> image_mono, image_color + remappings["image_raw"] = image_raw_topic; + remappings["image_mono"] = image_mono_topic; + remappings["image_color"] = image_color_topic; + std::string debayer_name = ros::this_node::getName() + "_debayer_" + side; + manager.load(debayer_name, "image_proc/debayer", remappings, my_argv); + + // Rectify nodelet: image_mono -> image_rect + remappings.clear(); + remappings["image_mono"] = image_mono_topic; + remappings["camera_info"] = camera_info_topic; + remappings["image_rect"] = image_rect_topic; + std::string rectify_mono_name = ros::this_node::getName() + "_rectify_mono_" + side; + if (rectify_params.valid()) + ros::param::set(rectify_mono_name, rectify_params); + manager.load(rectify_mono_name, "image_proc/rectify", remappings, my_argv); + + // Rectify nodelet: image_color -> image_rect_color + remappings.clear(); + remappings["image_mono"] = image_color_topic; + remappings["camera_info"] = camera_info_topic; + remappings["image_rect"] = image_rect_color_topic; + std::string rectify_color_name = ros::this_node::getName() + "_rectify_color_" + side; + if (rectify_params.valid()) + ros::param::set(rectify_color_name, rectify_params); + manager.load(rectify_color_name, "image_proc/rectify", remappings, my_argv); +} + +int main(int argc, char **argv) +{ + ros::init(argc, argv, "stereo_image_proc"); + + // Check for common user errors + if (ros::names::remap("camera") != "camera") + { + ROS_WARN("Remapping 'camera' has no effect! Start stereo_image_proc in the " + "stereo namespace instead.\nExample command-line usage:\n" + "\t$ ROS_NAMESPACE=%s rosrun stereo_image_proc stereo_image_proc", + ros::names::remap("camera").c_str()); + } + if (ros::this_node::getNamespace() == "/") + { + ROS_WARN("Started in the global namespace! This is probably wrong. Start " + "stereo_image_proc in the stereo namespace.\nExample command-line usage:\n" + "\t$ ROS_NAMESPACE=my_stereo rosrun stereo_image_proc stereo_image_proc"); + } + + // Shared parameters to be propagated to nodelet private namespaces + ros::NodeHandle private_nh("~"); + XmlRpc::XmlRpcValue shared_params; + int queue_size; + if (private_nh.getParam("queue_size", queue_size)) + shared_params["queue_size"] = queue_size; + + nodelet::Loader manager(false); // Don't bring up the manager ROS API + nodelet::M_string remappings; + nodelet::V_string my_argv; + + // Load equivalents of image_proc for left and right cameras + loadMonocularNodelets(manager, "left", shared_params, my_argv); + loadMonocularNodelets(manager, "right", shared_params, my_argv); + + // Stereo nodelets also need to know the synchronization policy + bool approx_sync; + if (private_nh.getParam("approximate_sync", approx_sync)) + shared_params["approximate_sync"] = XmlRpc::XmlRpcValue(approx_sync); + + // Disparity nodelet + // Inputs: left/image_rect, left/camera_info, right/image_rect, right/camera_info + // Outputs: disparity + // NOTE: Using node name for the disparity nodelet because it is the only one using + // dynamic_reconfigure so far, and this makes us backwards-compatible with cturtle. + std::string disparity_name = ros::this_node::getName(); + manager.load(disparity_name, "stereo_image_proc/disparity", remappings, my_argv); + + // PointCloud2 nodelet + // Inputs: left/image_rect_color, left/camera_info, right/camera_info, disparity + // Outputs: points2 + std::string point_cloud2_name = ros::this_node::getName() + "_point_cloud2"; + if (shared_params.valid()) + ros::param::set(point_cloud2_name, shared_params); + manager.load(point_cloud2_name, "stereo_image_proc/point_cloud2", remappings, my_argv); + + // Check for only the original camera topics + ros::V_string topics; + topics.push_back(ros::names::resolve("left/image_raw")); + topics.push_back(ros::names::resolve("left/camera_info")); + topics.push_back(ros::names::resolve("right/image_raw")); + topics.push_back(ros::names::resolve("right/camera_info")); + image_proc::AdvertisementChecker check_inputs(ros::NodeHandle(), ros::this_node::getName()); + check_inputs.start(topics, 60.0); + + ros::spin(); + return 0; +} diff --git a/install b/install index 8a9bc0d..3ccee9e 100755 --- a/install +++ b/install @@ -40,137 +40,137 @@ do_install() fi brew update - # XQuartz - if ! hash xquartz 2>/dev/null; then - brew install caskroom/cask/brew-cask - brew cask install xquartz - echo "Log out and in to finalize XQuartz setup." - exit 0 - fi - - # Check for pip packages in the system Python directory. - if [ $(ls /Library/Python/2.7/site-packages/ | wc -l) -gt 1 ]; then - echo "These instructions are about to install Python from Homebrew. However, there are already" - echo "pip packages installed against the system python, in the following path:" - echo - echo " /Library/Python/2.7/site-packages/" - echo - echo "If you have problems, please uninstall these packages:" - echo - echo " for i in \$( pip freeze ); do sudo -H pip uninstall -y \$i; done" - echo - echo "Then delete the build directory and start over again from scratch." - fi - - # Check for root-owned stuff in /usr/local - if [ $(find /usr/local/* -maxdepth 3 -type d -user root | wc -l) -gt 1 ]; then - echo "Looks like you have some stuff owned by the root user in /usr/local. The installation can" - echo "continue, but if there are pip packages which were installed using sudo, this will be a" - echo "problem if rosdep tries to update them. If you have issues installing dependencies, consider" - echo "nuking your Homebrew installation and starting from scratch:" - echo - echo " https://gist.github.com/mxcl/1173223" - echo - echo "Alternatively, you could try chowning the contents of /usr/local to yourself:" - echo - echo " sudo chown -R $USER:admin /usr/local/*" - fi - - # Brewed Python - if [ ! "$(which python2)" = "/usr/local/bin/python2" ]; then - brew install python - mkdir -p ~/Library/Python/2.7/lib/python/site-packages - echo "$(brew --prefix)/lib/python2.7/site-packages" >> ~/Library/Python/2.7/lib/python/site-packages/homebrew.pth - fi - - # Gives us console_bridge, urdfdom, and gtest. - brew tap ros/deps - - # This tap gives us formulae for Gazebo and its dependencies, including SDF. - brew tap osrf/simulation - - # ROS infrastructure tools - brew install libyaml || true - pip2 install -U setuptools rosdep rosinstall_generator wstool rosinstall catkin_tools bloom empy sphinx pycurl - - # Rosdep has an issue detecting that qt5 is correctly installed, so preinstall it. This is a keg-only formula, - # so add its location to the prefix path in order for workspace packages to be able to find it. - brew install qt5 pyqt5 sip - export CMAKE_PREFIX_PATH=$(brew --prefix qt5) - - # This hack is required to make qt_gui_cpp compile correctly. See https://github.com/mikepurvis/ros-install-osx/pull/84#issuecomment-338209466 - pushd /usr/local/share/sip - if [ ! -e PyQt5 ]; then - ln -s Qt5 PyQt5 - fi - popd - - # Get homebrew's opencv3 from a bottle so that we don't have to build it. - brew install opencv3 - - # Mock out python and pip so that we get the brewed versions when rosdep and other programs call them. - # Mock out pip so that we get the brewed pip2 when rosdep calls it to make an installation. - export PATH=$(pwd)/shim:$PATH + # # XQuartz + # if ! hash xquartz 2>/dev/null; then + # brew install caskroom/cask/brew-cask + # brew cask install xquartz + # echo "Log out and in to finalize XQuartz setup." + # exit 0 + # fi + + # # Check for pip packages in the system Python directory. + # if [ $(ls /Library/Python/2.7/site-packages/ | wc -l) -gt 1 ]; then + # echo "These instructions are about to install Python from Homebrew. However, there are already" + # echo "pip packages installed against the system python, in the following path:" + # echo + # echo " /Library/Python/2.7/site-packages/" + # echo + # echo "If you have problems, please uninstall these packages:" + # echo + # echo " for i in \$( pip freeze ); do sudo -H pip uninstall -y \$i; done" + # echo + # echo "Then delete the build directory and start over again from scratch." + # fi + + # # Check for root-owned stuff in /usr/local + # if [ $(find /usr/local/* -maxdepth 3 -type d -user root | wc -l) -gt 1 ]; then + # echo "Looks like you have some stuff owned by the root user in /usr/local. The installation can" + # echo "continue, but if there are pip packages which were installed using sudo, this will be a" + # echo "problem if rosdep tries to update them. If you have issues installing dependencies, consider" + # echo "nuking your Homebrew installation and starting from scratch:" + # echo + # echo " https://gist.github.com/mxcl/1173223" + # echo + # echo "Alternatively, you could try chowning the contents of /usr/local to yourself:" + # echo + # echo " sudo chown -R $USER:admin /usr/local/*" + # fi + + # # Brewed Python + # if [ ! "$(which python2)" = "/usr/local/bin/python2" ]; then + # brew install python + # mkdir -p ~/Library/Python/2.7/lib/python/site-packages + # echo "$(brew --prefix)/lib/python2.7/site-packages" >> ~/Library/Python/2.7/lib/python/site-packages/homebrew.pth + # fi + + # # Gives us console_bridge, urdfdom, and gtest. + # brew tap nagakiran/deps + + # # This tap gives us formulae for Gazebo and its dependencies, including SDF. + # brew tap osrf/simulation + + # # ROS infrastructure tools + # brew install libyaml || true + # sudo pip2 install -U rosdep rosinstall_generator wstool rosinstall bloom empy sphinx --ignore-installed six --user + + # # Rosdep has an issue detecting that qt5 is correctly installed, so preinstall it. This is a keg-only formula, + # # so add its location to the prefix path in order for workspace packages to be able to find it. + # brew install qt5 pyqt5 sip + # export CMAKE_PREFIX_PATH=$(brew --prefix qt5) + + # # This hack is required to make qt_gui_cpp compile correctly. See https://github.com/mikepurvis/ros-install-osx/pull/84#issuecomment-338209466 + # pushd /usr/local/share/sip + # if [ ! -e PyQt5 ]; then + # ln -s Qt5 PyQt5 + # fi + # popd + + # # Get homebrew's opencv3 from a bottle so that we don't have to build it. + # brew install opencv3 + + # # Mock out python and pip so that we get the brewed versions when rosdep and other programs call them. + # # Mock out pip so that we get the brewed pip2 when rosdep calls it to make an installation. + # export PATH=$(pwd)/shim:$PATH # Initialize and update rosdep - if [ ! -d /etc/ros/rosdep/ ]; then - echo "This sudo prompt is to initialize rosdep (creates the /etc/ros/rosdep path)." - sudo rosdep init - fi - if [ ! -f /etc/ros/rosdep/sources.list.d/10-ros-install-osx.list ]; then - echo "This sudo prompt adds the the brewed python rosdep yaml to /etc/ros/rosdep/sources.list.d/10-ros-install-osx.list" - sudo sh -c "echo 'yaml file://$(pwd)/rosdeps.yaml osx' > /etc/ros/rosdep/sources.list.d/10-ros-install-osx.list" - fi - rosdep update - - # Remove previous workspace if present, create and enter new one. + # if [ ! -d /etc/ros/rosdep/ ]; then + # echo "This sudo prompt is to initialize rosdep (creates the /etc/ros/rosdep path)." + # sudo rosdep init + # fi + # if [ ! -f /etc/ros/rosdep/sources.list.d/10-ros-install-osx.list ]; then + # echo "This sudo prompt adds the the brewed python rosdep yaml to /etc/ros/rosdep/sources.list.d/10-ros-install-osx.list" + # sudo sh -c "echo 'yaml file://$(pwd)/rosdeps.yaml osx' > /etc/ros/rosdep/sources.list.d/10-ros-install-osx.list" + # fi + # rosdep update + + # # Remove previous workspace if present, create and enter new one. WS=${ROS_DISTRO}_${ROS_CONFIGURATION}_ws - if [ -d "$WS" ]; then - rm -rf "$WS" - fi - mkdir $WS + # if [ -d "$WS" ]; then + # rm -rf "$WS" + # fi + # mkdir $WS cd $WS - # Standard source install - rosinstall_generator ${ROS_CONFIGURATION} ${ROS_EXTRA_PACKAGES} --rosdistro ${ROS_DISTRO} --deps --tar > ${WS}.rosinstall - wstool init src - pushd src - # Avoid downloading opencv3; we already installed it from homebrew. - wstool merge file://$(pwd)/../${WS}.rosinstall - wstool remove opencv3 - wstool update -j8 - - # See: https://github.com/ros/catkin/pull/784 - if [ -d catkin ]; then - curl https://raw.githubusercontent.com/ros/catkin/8a47f4eceb4954beb4a5b38b50793d0bbe2c96cf/cmake/catkinConfig.cmake.in > catkin/cmake/catkinConfig.cmake.in - fi - - # Pending release: https://github.com/ros-visualization/rviz/pull/1165 - if [ -d rviz ]; then - curl https://raw.githubusercontent.com/mikepurvis/rviz/21eac2bcc061bb623fd17aa449f6215c493b27f2/CMakeLists.txt > rviz/CMakeLists.txt - fi - - if [ -d image_pipeline ]; then - # Pending merge and release: https://github.com/ros-perception/image_pipeline/pull/303 - curl https://raw.githubusercontent.com/mikepurvis/image_pipeline/d308d00aed5b66961f9e13ab7a50660a24be7c7f/image_proc/CMakeLists.txt > image_pipeline/image_proc/CMakeLists.txt - curl https://raw.githubusercontent.com/mikepurvis/image_pipeline/d308d00aed5b66961f9e13ab7a50660a24be7c7f/stereo_image_proc/CMakeLists.txt > image_pipeline/stereo_image_proc/CMakeLists.txt - - # Pending merge and release: https://github.com/ros-perception/image_pipeline/pull/304 - curl https://raw.githubusercontent.com/mbreyer/image_pipeline/4bf67d3c861cb157e8174f67250680263b18d2b7/image_publisher/CMakeLists.txt > image_pipeline/image_publisher/CMakeLists.txt - fi - - # Package dependencies. - rosdep install --from-paths . --ignore-src --rosdistro ${ROS_DISTRO} -y --as-root pip:no - popd - - # Clean out or create the install directory. - if [ -d ${ROS_INSTALL_DIR} ]; then - rm -rf ${ROS_INSTALL_DIR}/* - else - echo "This sudo prompt is to create and chown ${ROS_INSTALL_DIR}." - sudo mkdir -p ${ROS_INSTALL_DIR} - sudo chown $USER ${ROS_INSTALL_DIR} - fi + # # Standard source install + # rosinstall_generator ${ROS_CONFIGURATION} ${ROS_EXTRA_PACKAGES} --rosdistro ${ROS_DISTRO} --deps --tar > ${WS}.rosinstall + # wstool init src + # pushd src + # # Avoid downloading opencv3; we already installed it from homebrew. + # wstool merge file://$(pwd)/../${WS}.rosinstall + # wstool remove opencv3 + # wstool update -j8 + + # # See: https://github.com/ros/catkin/pull/784 + # if [ -d catkin ]; then + # curl https://raw.githubusercontent.com/ros/catkin/8a47f4eceb4954beb4a5b38b50793d0bbe2c96cf/cmake/catkinConfig.cmake.in > catkin/cmake/catkinConfig.cmake.in + # fi + + # # Pending release: https://github.com/ros-visualization/rviz/pull/1165 + # if [ -d rviz ]; then + # curl https://raw.githubusercontent.com/mikepurvis/rviz/21eac2bcc061bb623fd17aa449f6215c493b27f2/CMakeLists.txt > rviz/CMakeLists.txt + # fi + + # if [ -d image_pipeline ]; then + # # Pending merge and release: https://github.com/ros-perception/image_pipeline/pull/303 + # curl https://raw.githubusercontent.com/mikepurvis/image_pipeline/d308d00aed5b66961f9e13ab7a50660a24be7c7f/image_proc/CMakeLists.txt > image_pipeline/image_proc/CMakeLists.txt + # curl https://raw.githubusercontent.com/mikepurvis/image_pipeline/d308d00aed5b66961f9e13ab7a50660a24be7c7f/stereo_image_proc/CMakeLists.txt > image_pipeline/stereo_image_proc/CMakeLists.txt + + # # Pending merge and release: https://github.com/ros-perception/image_pipeline/pull/304 + # curl https://raw.githubusercontent.com/mbreyer/image_pipeline/4bf67d3c861cb157e8174f67250680263b18d2b7/image_publisher/CMakeLists.txt > image_pipeline/image_publisher/CMakeLists.txt + # fi + + # # Package dependencies. + # rosdep install --from-paths --skip-keys gtest --skip-keys google-mock . --ignore-src --rosdistro ${ROS_DISTRO} -y --as-root pip:no + # popd + + # # Clean out or create the install directory. + # if [ -d ${ROS_INSTALL_DIR} ]; then + # rm -rf ${ROS_INSTALL_DIR}/* + # else + # echo "This sudo prompt is to create and chown ${ROS_INSTALL_DIR}." + # sudo mkdir -p ${ROS_INSTALL_DIR} + # sudo chown $USER ${ROS_INSTALL_DIR} + # fi # Parallel build. catkin config --install \ @@ -178,7 +178,9 @@ do_install() --cmake-args \ -DCATKIN_ENABLE_TESTING=1 \ -DCMAKE_BUILD_TYPE=Release \ + -DCATKIN_ENABLE_TESTING=0 \ -DCMAKE_FIND_FRAMEWORK=LAST \ + -DCMAKE_CXX_FLAGS="-std=c++11" \ -DPYTHON_EXECUTABLE=$(which python2) \ -DPYTHON_LIBRARY=$(python2 -c "import sys; print sys.prefix")/lib/libpython2.7.dylib \ -DPYTHON_INCLUDE_DIR=$(python2 -c "import sys; print sys.prefix")/include/python2.7 @@ -197,7 +199,7 @@ do_install() echo "You have System Integrity Protection enabled." echo echo "This prevents DYLD_LIBRARY_PATH from being exported to subshells" - echo "Please add: export DYLD_LIBRARY_PATH=\$DYLD_LIBRARY_PATH:/opt/ros/\$ROS_DISTRO/lib" + echo "Please add: export DYLD_LIBRARY_PATH=\${DYLD_LIBRARY_PATH}:/opt/ros/\${ROS_DISTRO}/lib" echo "To the start of /opt/ros/$ROS_DISTRO/bin/rosrun to work around the issue." fi fi