diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 173363ec17..677761b0fa 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -7,19 +7,19 @@ on: schedule: - cron: '0 2 * * SUN' +# https://stackoverflow.com/questions/66335225/how-to-cancel-previous-runs-in-the-pr-when-you-push-new-commitsupdate-the-curre#comment133398800_72408109 +# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#concurrency +concurrency: + group: ${{ github.workflow }}-${{ github.ref || github.run_id }} + cancel-in-progress: true + jobs: build: runs-on: ubuntu-latest steps: - # https://github.com/marketplace/actions/cancel-workflow-action - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.9.1 - with: - access_token: ${{ github.token }} - - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Print system information run: lscpu diff --git a/.github/workflows/ios.yml b/.github/workflows/ios.yml index c8e0432fd3..eb3c8f95c6 100644 --- a/.github/workflows/ios.yml +++ b/.github/workflows/ios.yml @@ -7,6 +7,12 @@ on: schedule: - cron: '0 2 * * SUN' +# https://stackoverflow.com/questions/66335225/how-to-cancel-previous-runs-in-the-pr-when-you-push-new-commitsupdate-the-curre#comment133398800_72408109 +# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#concurrency +concurrency: + group: ${{ github.workflow }}-${{ github.ref || github.run_id }} + cancel-in-progress: true + jobs: build-ios: runs-on: ${{ matrix.os }} @@ -16,14 +22,8 @@ jobs: os: [macos-11] steps: - # https://github.com/marketplace/actions/cancel-workflow-action - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.9.1 - with: - access_token: ${{ github.token }} - - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Print system information run: | diff --git a/.github/workflows/macos-ustk.yml b/.github/workflows/macos-ustk.yml index 120d6211e2..266e8c3baf 100644 --- a/.github/workflows/macos-ustk.yml +++ b/.github/workflows/macos-ustk.yml @@ -7,6 +7,12 @@ on: schedule: - cron: '0 2 * * SUN' +# https://stackoverflow.com/questions/66335225/how-to-cancel-previous-runs-in-the-pr-when-you-push-new-commitsupdate-the-curre#comment133398800_72408109 +# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#concurrency +concurrency: + group: ${{ github.workflow }}-${{ github.ref || github.run_id }} + cancel-in-progress: true + jobs: build-macos: runs-on: ${{ matrix.os }} @@ -16,14 +22,8 @@ jobs: os: [macos-12] steps: - # https://github.com/marketplace/actions/cancel-workflow-action - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.9.1 - with: - access_token: ${{ github.token }} - - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Print system information run: | diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 4427219ff7..7bd441dbbe 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -7,6 +7,12 @@ on: schedule: - cron: '0 2 * * SUN' +# https://stackoverflow.com/questions/66335225/how-to-cancel-previous-runs-in-the-pr-when-you-push-new-commitsupdate-the-curre#comment133398800_72408109 +# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#concurrency +concurrency: + group: ${{ github.workflow }}-${{ github.ref || github.run_id }} + cancel-in-progress: true + jobs: build-macos: runs-on: ${{ matrix.os }} @@ -16,14 +22,8 @@ jobs: os: [macos-12, macos-13] steps: - # https://github.com/marketplace/actions/cancel-workflow-action - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.9.1 - with: - access_token: ${{ github.token }} - - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Print system information run: | diff --git a/.github/workflows/other-arch-isolated.yml b/.github/workflows/other-arch-isolated.yml index 71d9e8ed55..868bfebb3d 100644 --- a/.github/workflows/other-arch-isolated.yml +++ b/.github/workflows/other-arch-isolated.yml @@ -8,10 +8,16 @@ on: schedule: - cron: '0 2 * * SUN' +# https://stackoverflow.com/questions/66335225/how-to-cancel-previous-runs-in-the-pr-when-you-push-new-commitsupdate-the-curre#comment133398800_72408109 +# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#concurrency +concurrency: + group: ${{ github.workflow }}-${{ github.ref || github.run_id }} + cancel-in-progress: true + jobs: build-other-architectures: # The host should always be linux - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 name: Build on ${{ matrix.distro }} ${{ matrix.arch }} ${{ matrix.endianness }} # Run steps on a matrix of different arch/distro combinations @@ -40,14 +46,8 @@ jobs: endianness: (Big Endian) steps: - # https://github.com/marketplace/actions/cancel-workflow-action - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.9.1 - with: - access_token: ${{ github.token }} - - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Run on arch uses: uraimo/run-on-arch-action@v2.2.1 @@ -65,7 +65,8 @@ jobs: pwd mkdir build && cd build - cmake .. -DBUILD_DEMOS=OFF -DBUILD_EXAMPLES=OFF -DBUILD_TUTORIALS=OFF -DBUILD_JAVA=OFF -DBUILD_MODULE_visp_java=OFF -DBUILD_MODULE_visp_java_binding=OFF -DUSE_CXX_STANDARD=17 + cmake .. -DBUILD_DEMOS=OFF -DBUILD_EXAMPLES=OFF -DBUILD_TUTORIALS=OFF -DBUILD_JAVA=OFF \ + -DUSE_JPEG=OFF -DUSE_PNG=OFF -DUSE_X11=OFF -DUSE_XML2=OFF -DBUILD_JAVA=OFF -DUSE_BLAS/LAPACK=OFF cat ViSP-third-party.txt make -j$(nproc) diff --git a/.github/workflows/other-arch.yml b/.github/workflows/other-arch.yml index 2818fda11a..902a514e86 100644 --- a/.github/workflows/other-arch.yml +++ b/.github/workflows/other-arch.yml @@ -8,6 +8,12 @@ on: schedule: - cron: '0 2 * * SUN' +# https://stackoverflow.com/questions/66335225/how-to-cancel-previous-runs-in-the-pr-when-you-push-new-commitsupdate-the-curre#comment133398800_72408109 +# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#concurrency +concurrency: + group: ${{ github.workflow }}-${{ github.ref || github.run_id }} + cancel-in-progress: true + jobs: build-other-architectures: # The host should always be linux @@ -38,14 +44,8 @@ jobs: endianness: (Big Endian) steps: - # https://github.com/marketplace/actions/cancel-workflow-action - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.9.1 - with: - access_token: ${{ github.token }} - - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Run on arch uses: uraimo/run-on-arch-action@v2.1.1 diff --git a/.github/workflows/ubuntu-3rdparty.yml b/.github/workflows/ubuntu-3rdparty.yml index c500260066..a0950d6946 100644 --- a/.github/workflows/ubuntu-3rdparty.yml +++ b/.github/workflows/ubuntu-3rdparty.yml @@ -7,6 +7,12 @@ on: schedule: - cron: '0 2 * * SUN' +# https://stackoverflow.com/questions/66335225/how-to-cancel-previous-runs-in-the-pr-when-you-push-new-commitsupdate-the-curre#comment133398800_72408109 +# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#concurrency +concurrency: + group: ${{ github.workflow }}-${{ github.ref || github.run_id }} + cancel-in-progress: true + jobs: build-ubuntu-dep-apt: runs-on: ${{ matrix.os }} @@ -17,14 +23,8 @@ jobs: compiler: [ {CC: /usr/bin/gcc-10, CXX: /usr/bin/g++-10}, {CC: /usr/bin/clang, CXX: /usr/bin/clang++} ] steps: - # https://github.com/marketplace/actions/cancel-workflow-action - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.9.1 - with: - access_token: ${{ github.token }} - - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Print system information run: lscpu diff --git a/.github/workflows/ubuntu-contrib.yml b/.github/workflows/ubuntu-contrib.yml index 7a417e34d1..6ff03f0316 100644 --- a/.github/workflows/ubuntu-contrib.yml +++ b/.github/workflows/ubuntu-contrib.yml @@ -7,6 +7,12 @@ on: schedule: - cron: '0 2 * * SUN' +# https://stackoverflow.com/questions/66335225/how-to-cancel-previous-runs-in-the-pr-when-you-push-new-commitsupdate-the-curre#comment133398800_72408109 +# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#concurrency +concurrency: + group: ${{ github.workflow }}-${{ github.ref || github.run_id }} + cancel-in-progress: true + jobs: build-ubuntu-dep-apt: runs-on: ${{ matrix.os }} @@ -17,14 +23,8 @@ jobs: compiler: [ {CC: /usr/bin/gcc-10, CXX: /usr/bin/g++-10}, {CC: /usr/bin/clang, CXX: /usr/bin/clang++} ] steps: - # https://github.com/marketplace/actions/cancel-workflow-action - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.9.1 - with: - access_token: ${{ github.token }} - - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Print system information run: lscpu @@ -35,7 +35,7 @@ jobs: - name: Print compiler information run: dpkg --list | grep compiler - - name: Install dependencies for ubuntu 18.04 and 20.04 + - name: Install dependencies for ubuntu 20.04 if: matrix.os != 'ubuntu-22.04' run: sudo apt-get update && sudo apt-get install -y libx11-dev libdc1394-22-dev libv4l-dev liblapack-dev libopenblas-dev libeigen3-dev libopencv-dev nlohmann-json3-dev diff --git a/.github/workflows/ubuntu-dep-apt.yml b/.github/workflows/ubuntu-dep-apt.yml index bb77160c2c..cc206218f6 100644 --- a/.github/workflows/ubuntu-dep-apt.yml +++ b/.github/workflows/ubuntu-dep-apt.yml @@ -7,6 +7,12 @@ on: schedule: - cron: '0 2 * * SUN' +# https://stackoverflow.com/questions/66335225/how-to-cancel-previous-runs-in-the-pr-when-you-push-new-commitsupdate-the-curre#comment133398800_72408109 +# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#concurrency +concurrency: + group: ${{ github.workflow }}-${{ github.ref || github.run_id }} + cancel-in-progress: true + jobs: build-ubuntu-dep-apt: runs-on: ${{ matrix.os }} @@ -15,17 +21,11 @@ jobs: matrix: os: [ubuntu-20.04, ubuntu-22.04] compiler: [ {CC: /usr/bin/gcc-9, CXX: /usr/bin/g++-9}, {CC: /usr/bin/gcc-10, CXX: /usr/bin/g++-10}, {CC: /usr/bin/clang, CXX: /usr/bin/clang++} ] - standard: [ 11, 14, 17 ] + standard: [ 98, 11, 17 ] steps: - # https://github.com/marketplace/actions/cancel-workflow-action - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.9.1 - with: - access_token: ${{ github.token }} - - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Print system information run: lscpu diff --git a/.github/workflows/ubuntu-dep-src.yml b/.github/workflows/ubuntu-dep-src.yml index af526068c8..aa382ffd69 100644 --- a/.github/workflows/ubuntu-dep-src.yml +++ b/.github/workflows/ubuntu-dep-src.yml @@ -7,6 +7,12 @@ on: schedule: - cron: '0 2 * * SUN' +# https://stackoverflow.com/questions/66335225/how-to-cancel-previous-runs-in-the-pr-when-you-push-new-commitsupdate-the-curre#comment133398800_72408109 +# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#concurrency +concurrency: + group: ${{ github.workflow }}-${{ github.ref || github.run_id }} + cancel-in-progress: true + jobs: build-ubuntu-dep-src: runs-on: ${{ matrix.os }} @@ -16,14 +22,8 @@ jobs: os: [ubuntu-20.04, ubuntu-22.04] steps: - # https://github.com/marketplace/actions/cancel-workflow-action - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.9.1 - with: - access_token: ${{ github.token }} - - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Print system information run: lscpu diff --git a/.github/workflows/ubuntu-isolated.yml b/.github/workflows/ubuntu-isolated.yml index 206d8130e2..bd5684f0e5 100644 --- a/.github/workflows/ubuntu-isolated.yml +++ b/.github/workflows/ubuntu-isolated.yml @@ -8,25 +8,31 @@ on: schedule: - cron: '0 2 * * SUN' +# https://stackoverflow.com/questions/66335225/how-to-cancel-previous-runs-in-the-pr-when-you-push-new-commitsupdate-the-curre#comment133398800_72408109 +# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#concurrency +concurrency: + group: ${{ github.workflow }}-${{ github.ref || github.run_id }} + cancel-in-progress: true + jobs: build-ubuntu-dep-apt: runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: - os: [ubuntu-22.04] - compiler: [ {CC: /usr/bin/clang, CXX: /usr/bin/clang++} ] + os: [ubuntu-latest] + compiler: [ {CC: /usr/bin/gcc, CXX: /usr/bin/g++} ] standard: [ 17 ] steps: - # https://github.com/marketplace/actions/cancel-workflow-action - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.9.1 - with: - access_token: ${{ github.token }} + # Install OpenMP and turn-off some dependencies (below) to try to have a similar env compared to the ROS buildbot + - name: Install OpenMP + run: | + sudo apt-get update + sudo apt-get install libomp-dev - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Print system information run: lscpu @@ -47,7 +53,9 @@ jobs: echo "CC: $CC" echo "CXX: $CXX" echo "Standard: $CXX_STANDARD" - cmake .. -DCMAKE_C_COMPILER="${CC}" -DCMAKE_CXX_COMPILER="${CXX}" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/tmp/usr/local -DCMAKE_VERBOSE_MAKEFILE=ON -DUSE_CXX_STANDARD=$CXX_STANDARD + cmake .. -DCMAKE_C_COMPILER="${CC}" -DCMAKE_CXX_COMPILER="${CXX}" \ + -DCMAKE_INSTALL_PREFIX=/tmp/usr/local -DCMAKE_VERBOSE_MAKEFILE=ON -DUSE_CXX_STANDARD=$CXX_STANDARD \ + -DUSE_JPEG=OFF -DUSE_PNG=OFF -DUSE_X11=OFF -DUSE_XML2=OFF -DBUILD_JAVA=OFF -DUSE_BLAS/LAPACK=OFF cat ViSP-third-party.txt - name: Compile diff --git a/.github/workflows/ubuntu-sanitizers.yml b/.github/workflows/ubuntu-sanitizers.yml index 46bc99623a..4210c71d7f 100644 --- a/.github/workflows/ubuntu-sanitizers.yml +++ b/.github/workflows/ubuntu-sanitizers.yml @@ -9,6 +9,12 @@ on: # every Sunday at 2 am - cron: '0 2 * * SUN' +# https://stackoverflow.com/questions/66335225/how-to-cancel-previous-runs-in-the-pr-when-you-push-new-commitsupdate-the-curre#comment133398800_72408109 +# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#concurrency +concurrency: + group: ${{ github.workflow }}-${{ github.ref || github.run_id }} + cancel-in-progress: true + jobs: build-ubuntu-sanitizers: runs-on: ubuntu-latest @@ -18,14 +24,8 @@ jobs: flags: ["-fsanitize=address", "-fsanitize=leak", "-fsanitize=thread", "-fsanitize=undefined"] steps: - # https://github.com/marketplace/actions/cancel-workflow-action - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.9.1 - with: - access_token: ${{ github.token }} - - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Print system information run: lscpu @@ -82,4 +82,12 @@ jobs: # SUMMARY: AddressSanitizer: odr-violation: global 'ALIGNMENT' at /home/runner/work/visp/visp/3rdparty/simdlib/Simd/SimdLib.cpp:82:18 ASAN_OPTIONS: detect_odr_violation=0 working-directory: build - run: ctest -j$(nproc) --output-on-failure + # When running ctest we got a lot of segfault + # This seems a bug reported in + # - https://stackoverflow.com/questions/77894856/possible-bug-in-gcc-sanitizers + # - https://stackoverflow.com/questions/77850769/fatal-threadsanitizer-unexpected-memory-mapping-when-running-on-linux-kernels + # The workaround seems to be to set vm.mmap_rnd_bits=28 + run: | + sudo cat /proc/sys/vm/mmap_rnd_bits + sudo sysctl vm.mmap_rnd_bits=28 + ctest -j$(nproc) --output-on-failure -V diff --git a/.github/workflows/ubuntu-ustk.yml b/.github/workflows/ubuntu-ustk.yml index fab02f8d6b..6c63dd55db 100644 --- a/.github/workflows/ubuntu-ustk.yml +++ b/.github/workflows/ubuntu-ustk.yml @@ -7,6 +7,12 @@ on: schedule: - cron: '0 2 * * SUN' +# https://stackoverflow.com/questions/66335225/how-to-cancel-previous-runs-in-the-pr-when-you-push-new-commitsupdate-the-curre#comment133398800_72408109 +# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#concurrency +concurrency: + group: ${{ github.workflow }}-${{ github.ref || github.run_id }} + cancel-in-progress: true + jobs: build-ubuntu-dep-apt: runs-on: ${{ matrix.os }} @@ -16,14 +22,8 @@ jobs: os: [ubuntu-20.04, ubuntu-22.04] steps: - # https://github.com/marketplace/actions/cancel-workflow-action - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.9.1 - with: - access_token: ${{ github.token }} - - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Print system information run: lscpu diff --git a/.github/workflows/valgrind.yml b/.github/workflows/valgrind.yml index 6095eb2034..3f77bb5762 100644 --- a/.github/workflows/valgrind.yml +++ b/.github/workflows/valgrind.yml @@ -7,19 +7,19 @@ on: schedule: - cron: '0 2 * * SUN' +# https://stackoverflow.com/questions/66335225/how-to-cancel-previous-runs-in-the-pr-when-you-push-new-commitsupdate-the-curre#comment133398800_72408109 +# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#concurrency +concurrency: + group: ${{ github.workflow }}-${{ github.ref || github.run_id }} + cancel-in-progress: true + jobs: build-ubuntu-valgrind: runs-on: ubuntu-latest steps: - # https://github.com/marketplace/actions/cancel-workflow-action - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.9.1 - with: - access_token: ${{ github.token }} - - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Print system information run: lscpu diff --git a/.github/workflows/windows-clang.yaml b/.github/workflows/windows-clang.yaml index 7868044bf9..6da3a39259 100644 --- a/.github/workflows/windows-clang.yaml +++ b/.github/workflows/windows-clang.yaml @@ -3,6 +3,12 @@ on: pull_request: push: +# https://stackoverflow.com/questions/66335225/how-to-cancel-previous-runs-in-the-pr-when-you-push-new-commitsupdate-the-curre#comment133398800_72408109 +# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#concurrency +concurrency: + group: ${{ github.workflow }}-${{ github.ref || github.run_id }} + cancel-in-progress: true + jobs: build: runs-on: ${{ matrix.os }} @@ -20,14 +26,8 @@ jobs: compiler: clang-cl steps: - # https://github.com/marketplace/actions/cancel-workflow-action - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.9.1 - with: - access_token: ${{ github.token }} - - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Display the workspace path working-directory: ${{ github.workspace }} @@ -51,19 +51,21 @@ jobs: cd build cmake .. -G "Visual Studio 17 2022" -T "ClangCl" -A "x64" type ViSP-third-party.txt + pwd - name: Build ViSP - working-directory: build + working-directory: ${{ github.workspace }}\build run: | + pwd cmake --build . --config Release - name: Install ViSP - working-directory: build + working-directory: ${{ github.workspace }}\build run: | cmake --build . --config Release --target install - name: Check installation folder - working-directory: build + working-directory: ${{ github.workspace }}\build run: | dir ${{ github.workspace }}\build\install\ dir ${{ github.workspace }}\build\install\x64\ @@ -71,7 +73,7 @@ jobs: dir ${{ github.workspace }}\build\install\x64\vc17\bin - name: Test ViSP - working-directory: build + working-directory: ${{ github.workspace }}\build run: | set VISP_INPUT_IMAGE_PATH=${{ env.VISP_INPUT_IMAGE_PATH }} echo "VISP_INPUT_IMAGE_PATH: " diff --git a/.github/workflows/windows-msvc.yaml b/.github/workflows/windows-msvc.yaml index 4a3b846ce3..6a0e8940af 100644 --- a/.github/workflows/windows-msvc.yaml +++ b/.github/workflows/windows-msvc.yaml @@ -3,6 +3,12 @@ on: pull_request: push: +# https://stackoverflow.com/questions/66335225/how-to-cancel-previous-runs-in-the-pr-when-you-push-new-commitsupdate-the-curre#comment133398800_72408109 +# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#concurrency +concurrency: + group: ${{ github.workflow }}-${{ github.ref || github.run_id }} + cancel-in-progress: true + jobs: build: runs-on: ${{ matrix.os }} @@ -16,17 +22,11 @@ jobs: include: - name: windows-latest - os: windows-2019 + os: windows-2022 steps: - # https://github.com/marketplace/actions/cancel-workflow-action - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.9.1 - with: - access_token: ${{ github.token }} - - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Display the workspace path working-directory: ${{ github.workspace }} @@ -47,31 +47,31 @@ jobs: echo %VISP_INPUT_IMAGE_PATH% mkdir build cd build - cmake .. -G "Visual Studio 16 2019" -A "x64" + cmake .. -G "Visual Studio 17 2022" -A "x64" type ViSP-third-party.txt - name: Build ViSP - working-directory: build + working-directory: ${{ github.workspace }}\build run: | cmake --build . --config Release - name: Install ViSP - working-directory: build + working-directory: ${{ github.workspace }}\build run: | cmake --build . --config Release --target install - name: Check installation folder - working-directory: build + working-directory: ${{ github.workspace }}\build run: | - dir ${{ github.workspace }}\build\install\x64\vc16\bin + dir ${{ github.workspace }}\build\install\x64\vc17\bin - name: Test ViSP - working-directory: build + working-directory: ${{ github.workspace }}\build run: | set VISP_INPUT_IMAGE_PATH=${{ env.VISP_INPUT_IMAGE_PATH }} echo "VISP_INPUT_IMAGE_PATH: " echo %VISP_INPUT_IMAGE_PATH% - set PATH=%PATH%;${{ github.workspace }}\build\install\x64\vc16\bin + set PATH=%PATH%;${{ github.workspace }}\build\install\x64\vc17\bin echo "PATH: " echo %PATH% ctest --output-on-failure -C Release -V diff --git a/.vscode/settings.json b/.vscode/settings.json index 8536ebc9b7..6d0919271e 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -117,7 +117,28 @@ "stop_token": "cpp", "view": "cpp", "mixinvector": "cpp", - "charconv": "cpp" + "charconv": "cpp", + "coroutine": "cpp", + "resumable": "cpp", + "format": "cpp", + "ranges": "cpp", + "span": "cpp", + "xfacet": "cpp", + "xhash": "cpp", + "xiosbase": "cpp", + "xlocale": "cpp", + "xlocbuf": "cpp", + "xlocinfo": "cpp", + "xlocmes": "cpp", + "xlocmon": "cpp", + "xlocnum": "cpp", + "xloctime": "cpp", + "xmemory": "cpp", + "xstring": "cpp", + "xtr1common": "cpp", + "xtree": "cpp", + "xutility": "cpp", + "execution": "cpp" }, "C_Cpp.vcFormat.indent.namespaceContents": false, "editor.formatOnSave": true, diff --git a/3rdparty/simdlib/Simd/SimdAlignment.h b/3rdparty/simdlib/Simd/SimdAlignment.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdAllocator.hpp b/3rdparty/simdlib/Simd/SimdAllocator.hpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdArray.h b/3rdparty/simdlib/Simd/SimdArray.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdAvx1.h b/3rdparty/simdlib/Simd/SimdAvx1.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdAvx1Cpu.cpp b/3rdparty/simdlib/Simd/SimdAvx1Cpu.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdAvx1Resizer.cpp b/3rdparty/simdlib/Simd/SimdAvx1Resizer.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdAvx2.h b/3rdparty/simdlib/Simd/SimdAvx2.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdAvx2BgrToBgra.cpp b/3rdparty/simdlib/Simd/SimdAvx2BgrToBgra.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdAvx2BgrToGray.cpp b/3rdparty/simdlib/Simd/SimdAvx2BgrToGray.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdAvx2BgrToRgb.cpp b/3rdparty/simdlib/Simd/SimdAvx2BgrToRgb.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdAvx2BgraToBgr.cpp b/3rdparty/simdlib/Simd/SimdAvx2BgraToBgr.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdAvx2BgraToGray.cpp b/3rdparty/simdlib/Simd/SimdAvx2BgraToGray.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdAvx2Cpu.cpp b/3rdparty/simdlib/Simd/SimdAvx2Cpu.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdAvx2Deinterleave.cpp b/3rdparty/simdlib/Simd/SimdAvx2Deinterleave.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdAvx2GaussianBlur.cpp b/3rdparty/simdlib/Simd/SimdAvx2GaussianBlur.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdAvx2Reduce.cpp b/3rdparty/simdlib/Simd/SimdAvx2Reduce.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdAvx2ReduceGray2x2.cpp b/3rdparty/simdlib/Simd/SimdAvx2ReduceGray2x2.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdAvx2ReduceGray3x3.cpp b/3rdparty/simdlib/Simd/SimdAvx2ReduceGray3x3.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdAvx2ReduceGray4x4.cpp b/3rdparty/simdlib/Simd/SimdAvx2ReduceGray4x4.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdAvx2ReduceGray5x5.cpp b/3rdparty/simdlib/Simd/SimdAvx2ReduceGray5x5.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdAvx2ResizeBilinear.cpp b/3rdparty/simdlib/Simd/SimdAvx2ResizeBilinear.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdAvx2Resizer.cpp b/3rdparty/simdlib/Simd/SimdAvx2Resizer.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdBase.h b/3rdparty/simdlib/Simd/SimdBase.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdBaseBgrToBgra.cpp b/3rdparty/simdlib/Simd/SimdBaseBgrToBgra.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdBaseBgrToGray.cpp b/3rdparty/simdlib/Simd/SimdBaseBgrToGray.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdBaseBgrToRgb.cpp b/3rdparty/simdlib/Simd/SimdBaseBgrToRgb.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdBaseBgraToBgr.cpp b/3rdparty/simdlib/Simd/SimdBaseBgraToBgr.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdBaseBgraToGray.cpp b/3rdparty/simdlib/Simd/SimdBaseBgraToGray.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdBaseCpu.cpp b/3rdparty/simdlib/Simd/SimdBaseCpu.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdBaseDeinterleave.cpp b/3rdparty/simdlib/Simd/SimdBaseDeinterleave.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdBaseGaussianBlur.cpp b/3rdparty/simdlib/Simd/SimdBaseGaussianBlur.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdBaseResizer.cpp b/3rdparty/simdlib/Simd/SimdBaseResizer.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdConfig.h b/3rdparty/simdlib/Simd/SimdConfig.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdConst.h b/3rdparty/simdlib/Simd/SimdConst.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdConversion.h b/3rdparty/simdlib/Simd/SimdConversion.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdCopyPixel.h b/3rdparty/simdlib/Simd/SimdCopyPixel.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdCpu.h b/3rdparty/simdlib/Simd/SimdCpu.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdDefs.h b/3rdparty/simdlib/Simd/SimdDefs.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdEnable.h b/3rdparty/simdlib/Simd/SimdEnable.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdExp.h b/3rdparty/simdlib/Simd/SimdExp.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdExtract.h b/3rdparty/simdlib/Simd/SimdExtract.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdFrame.hpp b/3rdparty/simdlib/Simd/SimdFrame.hpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdInit.h b/3rdparty/simdlib/Simd/SimdInit.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdLib.cpp b/3rdparty/simdlib/Simd/SimdLib.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdLib.h b/3rdparty/simdlib/Simd/SimdLib.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdLib.hpp b/3rdparty/simdlib/Simd/SimdLib.hpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdLoad.h b/3rdparty/simdlib/Simd/SimdLoad.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdLoadBlock.h b/3rdparty/simdlib/Simd/SimdLoadBlock.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdLog.h b/3rdparty/simdlib/Simd/SimdLog.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdMath.h b/3rdparty/simdlib/Simd/SimdMath.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdMemory.h b/3rdparty/simdlib/Simd/SimdMemory.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdNeon.h b/3rdparty/simdlib/Simd/SimdNeon.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdNeonBgrToBgra.cpp b/3rdparty/simdlib/Simd/SimdNeonBgrToBgra.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdNeonBgrToGray.cpp b/3rdparty/simdlib/Simd/SimdNeonBgrToGray.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdNeonBgrToRgb.cpp b/3rdparty/simdlib/Simd/SimdNeonBgrToRgb.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdNeonBgraToBgr.cpp b/3rdparty/simdlib/Simd/SimdNeonBgraToBgr.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdNeonBgraToGray.cpp b/3rdparty/simdlib/Simd/SimdNeonBgraToGray.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdNeonDeinterleave.cpp b/3rdparty/simdlib/Simd/SimdNeonDeinterleave.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdNeonGaussianBlur.cpp b/3rdparty/simdlib/Simd/SimdNeonGaussianBlur.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdNeonResizer.cpp b/3rdparty/simdlib/Simd/SimdNeonResizer.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdPixel.hpp b/3rdparty/simdlib/Simd/SimdPixel.hpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdPow.h b/3rdparty/simdlib/Simd/SimdPow.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdResizer.h b/3rdparty/simdlib/Simd/SimdResizer.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdResizerCommon.h b/3rdparty/simdlib/Simd/SimdResizerCommon.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdRuntime.h b/3rdparty/simdlib/Simd/SimdRuntime.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdSet.h b/3rdparty/simdlib/Simd/SimdSet.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdSse41.h b/3rdparty/simdlib/Simd/SimdSse41.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdSse41BgrToBgra.cpp b/3rdparty/simdlib/Simd/SimdSse41BgrToBgra.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdSse41BgrToGray.cpp b/3rdparty/simdlib/Simd/SimdSse41BgrToGray.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdSse41BgrToRgb.cpp b/3rdparty/simdlib/Simd/SimdSse41BgrToRgb.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdSse41BgraToBgr.cpp b/3rdparty/simdlib/Simd/SimdSse41BgraToBgr.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdSse41Cpu.cpp b/3rdparty/simdlib/Simd/SimdSse41Cpu.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdSse41GaussianBlur.cpp b/3rdparty/simdlib/Simd/SimdSse41GaussianBlur.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdSse41GrayToBgr.cpp b/3rdparty/simdlib/Simd/SimdSse41GrayToBgr.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdSse41Reduce.cpp b/3rdparty/simdlib/Simd/SimdSse41Reduce.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdSse41ReduceGray2x2.cpp b/3rdparty/simdlib/Simd/SimdSse41ReduceGray2x2.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdSse41ReduceGray4x4.cpp b/3rdparty/simdlib/Simd/SimdSse41ReduceGray4x4.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdSse41ResizeBilinear.cpp b/3rdparty/simdlib/Simd/SimdSse41ResizeBilinear.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdSse41Resizer.cpp b/3rdparty/simdlib/Simd/SimdSse41Resizer.cpp old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdStore.h b/3rdparty/simdlib/Simd/SimdStore.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdStream.h b/3rdparty/simdlib/Simd/SimdStream.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdUpdate.h b/3rdparty/simdlib/Simd/SimdUpdate.h old mode 100755 new mode 100644 diff --git a/3rdparty/simdlib/Simd/SimdView.hpp b/3rdparty/simdlib/Simd/SimdView.hpp old mode 100755 new mode 100644 diff --git a/CMakeLists.txt b/CMakeLists.txt index 18f9b81221..b94225d85d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -112,6 +112,9 @@ endif() if(POLICY CMP0075) cmake_policy(SET CMP0075 NEW) # For check_include_file and cmake 3.12.0 endif() +if(POLICY CMP0146) + cmake_policy(SET CMP0146 OLD) # The ``FindCUDA`` module deprecated since CMake 3.10 +endif() if(APPLE) # Fix following errors for libpng and libjpeg detection: @@ -127,11 +130,7 @@ project(VISP C CXX) list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake") include(cmake/VISPUtils.cmake) -include(cmake/VISPDetectCXXStandard.cmake) # Set cxx standard to 11 by default - -if (CMAKE_VERSION VERSION_LESS 3.0.0) - vp_clear_vars(VISPModules_TARGETS) -endif() +include(cmake/VISPDetectCXXStandard.cmake) # Set cxx standard to 17 by default #----------------------------------------------------------------------------- # VISP version number. An even minor number corresponds to releases. @@ -146,8 +145,7 @@ set(VISP_REVISION "1") #----------------------------------------------------------------------------- # TO BE CHECKED BEFORE NEXT RELEASE # -# see here: https://github.com/PointCloudLibrary/pcl/issues/3680 -# when this is fixed, we can remove the following 3 lines. +# Remove following 3 next lines and check if pcl produces a CMP0144 complain around CMake variable FLANN_ROOT set to /opt/homebrew if(NOT DEFINED CMAKE_SUPPRESS_DEVELOPER_WARNINGS) set(CMAKE_SUPPRESS_DEVELOPER_WARNINGS 1 CACHE INTERNAL "No dev warnings") endif() @@ -269,39 +267,93 @@ endif() # --- Python Support --- if(NOT IOS) - include(cmake/VISPDetectPython.cmake) + # Make sure to refresh the python interpreter every time we rerun cmake + # If we don't do this, we may use an old or invalid python when installing the bindings + # that was cached by a previous attempt at building + if(CMAKE_VERSION VERSION_LESS "3.15.0") + set(PYTHON3_CACHE_LIST + PYTHON3INTERP_FOUND PYTHONINTERP_FOUND PYTHONLIBS_FOUND PYTHON_FOUND + PYTHON3_EXECUTABLE PYTHON_EXECUTABLE + ) + foreach (_variableName ${PYTHON3_CACHE_LIST}) + unset(${_variableName} CACHE) + endforeach() + include(cmake/VISPDetectPython.cmake) + else() + set(PYTHON3_CACHE_LIST + Python3_FOUND Python3_EXECUTABLE Python3_Interpreter_FOUND Python3_LIBRARIES + _Python3_EXECUTABLE _Python3_INCLUDE_DIR _Python3_INTERPRETER_PROPERTIES _Python3_LIBRARY_RELEASE + ) + foreach (_variableName ${PYTHON3_CACHE_LIST}) + unset(${_variableName} CACHE) + endforeach() + # Find strategy + set(Python3_FIND_REGISTRY LAST) + set(Python3_FIND_VIRTUALENV FIRST) + set(Python3_FIND_STRATEGY LOCATION) + find_package (Python3 COMPONENTS Interpreter Development) + + # Alias variables to be consistent with previous detection method + set(PYTHON3_FOUND ${Python3_FOUND}) + set(PYTHON3_EXECUTABLE ${Python3_EXECUTABLE}) + set(PYTHON_DEFAULT_EXECUTABLE ${PYTHON3_EXECUTABLE}) + set(PYTHON3INTERP_FOUND ${Python3_Interpreter_FOUND}) + set(PYTHON3_VERSION_STRING ${Python3_VERSION}) + endif() endif() -if(CMAKE_VERSION VERSION_LESS "3.19.0") +# --- Python Bindings requirements --- +VP_OPTION(USE_PYBIND11 pybind11 QUIET "Include pybind11 to create Python bindings" "" ON) + +# Minimum tool versions +set(CMAKE_MINIMUM_VERSION_PYTHON_BINDINGS "3.19.0") +set(PYTHON3_MINIMUM_VERSION_PYTHON_BINDINGS "3.7.0") +if(CMAKE_VERSION VERSION_LESS ${CMAKE_MINIMUM_VERSION_PYTHON_BINDINGS}) set(CMAKE_NOT_OK_FOR_BINDINGS TRUE) + message(STATUS "Required CMake version for Python bindings is ${CMAKE_MINIMUM_VERSION_PYTHON_BINDINGS}, + but you have ${CMAKE_VERSION}. + Python bindings generation will be deactivated. + ") else() set(CMAKE_NOT_OK_FOR_BINDINGS FALSE) endif() -if(CMAKE_NOT_OK_FOR_BINDINGS) - status("${CMAKE_NOT_OK_FOR_BINDINGS}") - status("CMake version required for Python bindings is 3.19.0, but you have ${CMAKE_VERSION}. Python bindings generation will be deactivated") +if(PYTHON3_VERSION_STRING VERSION_LESS ${PYTHON3_MINIMUM_VERSION_PYTHON_BINDINGS}) + set(PYTHON3_NOT_OK_FOR_BINDINGS TRUE) + message(STATUS "Required Python version for Python bindings is ${PYTHON3_MINIMUM_VERSION_PYTHON_BINDINGS}, + but you have ${PYTHON3_VERSION_STRING}. + Python bindings generation will be deactivated. + ") +else() + set(PYTHON3_NOT_OK_FOR_BINDINGS FALSE) +endif() +if(VISP_CXX_STANDARD LESS VISP_CXX_STANDARD_17) + set(CXX_STANDARD_NOT_OK_FOR_BINDINGS TRUE) + message(STATUS "Required C++ standard is C++17, but you have ${VISP_CXX_STANDARD}") +else() + set(CXX_STANDARD_NOT_OK_FOR_BINDINGS FALSE) endif() -# --- Python Bindings requirements --- -# this avoids non-active conda from getting picked anyway on Windows -#set(Python_FIND_REGISTRY LAST) -# Use environment variable PATH to decide preference for Python -#set(Python_FIND_VIRTUALENV FIRST) -#set(Python_FIND_STRATEGY LOCATION) - -#find_package(Python 3.7 COMPONENTS Interpreter Development) # TODO: use visp function to find python? -#if(Python_FOUND) -# set(VISP_PYTHON_BINDINGS_EXECUTABLE "${Python_EXECUTABLE}") -#endif() -#find_package(pybind11) -VP_OPTION(USE_PYBIND11 pybind11 QUIET "Include pybind11 to create Python bindings" "" ON) -#if(pybind11_FOUND) -# set(VISP_PYBIND11_DIR "${pybind11_DIR}") -#endif() -#message("${pybind11_FOUND}") +# Forbid system Python +if(DEFINED ENV{VIRTUAL_ENV} OR DEFINED ENV{CONDA_PREFIX}) + set(_pip_args) + set(VISP_PYTHON_IS_SYSTEM_WIDE FALSE) +else() + # First solution: raise an error when cmake will call pip install + # set(_pip_args "--require-virtualenv") # If this is a system python, throw an error + if(PYTHON3_FOUND) + message(STATUS "The python version that you are using (${PYTHON3_EXECUTABLE}) is the system interpreter. + pip packages should not be installed system-wide! + Python bindings targets will be deactivated! + To reenable them, install conda or virtualenv, + delete the CMakeCache file then rerun cmake when inside the virtual environment. + ") + set(VISP_PYTHON_IS_SYSTEM_WIDE TRUE) + endif() +endif() + # --- @@ -447,7 +499,7 @@ VP_OPTION(BUILD_ANDROID_EXAMPLES "" "" "Build examples for Android platform" VP_OPTION(INSTALL_ANDROID_EXAMPLES "" "" "Install Android examples" "" OFF IF ANDROID ) # Build python bindings as an option -VP_OPTION(BUILD_PYTHON_BINDINGS "" "" "Build Python bindings" "" ON IF (PYTHON3INTERP_FOUND AND USE_PYBIND11 AND NOT CMAKE_NOT_OK_FOR_BINDINGS) ) +VP_OPTION(BUILD_PYTHON_BINDINGS "" "" "Build Python bindings" "" ON IF (PYTHON3INTERP_FOUND AND USE_PYBIND11 AND NOT CMAKE_NOT_OK_FOR_BINDINGS AND NOT VISP_PYTHON_IS_SYSTEM_WIDE AND NOT PYTHON3_NOT_OK_FOR_BINDINGS AND NOT CXX_STANDARD_NOT_OK_FOR_BINDINGS) ) VP_OPTION(BUILD_PYTHON_BINDINGS_DOC "" "" "Build the documentation for the Python bindings" "" ON IF BUILD_PYTHON_BINDINGS ) @@ -586,17 +638,8 @@ if(SOQT_FOUND) # SoQt < 1.6.0 that depends on Qt4 was found. We need an explicit VP_OPTION(USE_QT Qt "" "Include Coin/SoQt/Qt support" "" ON IF USE_SOQT AND NOT WINRT AND NOT IOS) endif() VP_OPTION(USE_SOXT SOXT "" "Include Coin/SoXt support" "" OFF IF USE_COIN3D AND NOT WINRT AND NOT IOS) -set(THREADS_PREFER_PTHREAD_FLAG ON) -VP_OPTION(USE_THREADS Threads "" "Include std::thread support" "" ON IF NOT (WIN32 OR MINGW)) - -# We need threads. To be changed to make threads optional -if(NOT USE_THREADS) - if(Threads_FOUND) - message(WARNING "We need std::thread. We turn USE_THREADS=ON.") - unset(USE_THREADS) - set(USE_THREADS ON CACHE BOOL "Include std::thread support" FORCE) - endif() -endif() +set(THREADS_PREFER_PTHREAD_FLAG TRUE) +VP_OPTION(USE_THREADS Threads "" "Include std::thread support" "" ON) VP_OPTION(USE_XML2 XML2 "" "Include libxml2 support" "" ON IF NOT WINRT) if(CMAKE_TOOLCHAIN_FILE) @@ -613,7 +656,7 @@ VP_OPTION(USE_X11 X11 "" "Include X11 support" "${X1 VP_OPTION(USE_GTK2 MyGTK2 "" "Include gtk2 support" "" OFF IF NOT WINRT AND NOT IOS) VP_OPTION(USE_JPEG "JPEG;MyJPEG" "" "Include jpeg support" "" ON IF NOT IOS) VP_OPTION(USE_PNG "PNG;MyPNG" "" "Include png support" "" ON IF NOT IOS) -# To control Pioneer mobile robots, under UNIX we need Aria, pthread, rt and dl 3rd party libraries +# To control Pioneer mobile robots, under UNIX we need Aria and std::threads, rt and dl 3rd party libraries VP_OPTION(USE_ARIA ARIA "" "Include aria support" "" ON IF NOT WINRT AND NOT IOS) #VP_OPTION(USE_RT RT "" "Include rt support" "" ON) #VP_OPTION(USE_DL DL "" "Include dl support" "" ON) @@ -624,7 +667,6 @@ VP_OPTION(USE_PCL PCL QUIET "Include Point Cloud Library suppor VP_OPTION(USE_TENSORRT TensorRT "" "Include TensorRT support" "" ON IF NOT WINRT AND NOT IOS) VP_OPTION(USE_NLOHMANN_JSON nlohmann_json QUIET "Include nlohmann json support" "" ON) -# Upgrade c++ standard to 14 for pcl 1.9.1.99 that enables by default c++ 14 standard if(USE_PCL) # PCL is used in modules gui, sensor and mbt. # In these modules we cannot directly use PCL_INCLUDE_DIRS and PCL_LIBRARIES using: @@ -638,6 +680,23 @@ if(USE_PCL) vp_find_pcl(PCL_LIBRARIES PCL_DEPS_INCLUDE_DIRS PCL_DEPS_LIBRARIES) endif() +# ---------------------------------------------------------------------------- +# Handle OpenCV 2.4.8 as minimal version +# ---------------------------------------------------------------------------- +if(USE_OPENCV) + if(OpenCV_VERSION) + if(OpenCV_VERSION VERSION_LESS "2.4.8") + message(WARNING "OpenCV 3rd party was detected but its version ${OpenCV_VERSION} is too old. Thus we disable OpenCV usage turning USE_OPENCV=OFF.") + unset(USE_OPENCV) + set(USE_OPENCV OFF CACHE BOOL "Include OpenCV support" FORCE) + endif() + else() + message(WARNING "OpenCV 3rd party was detected but its version cannot be found or is too old. Thus we disable OpenCV usage turning USE_OPENCV=OFF.") + unset(USE_OPENCV) + set(USE_OPENCV OFF CACHE BOOL "Include OpenCV support" FORCE) + endif() +endif() + # ---------------------------------------------------------------------------- # Handle cxx standard depending on specific 3rd parties. Should be before module parsing and VISP3rdParty.cmake include # ---------------------------------------------------------------------------- @@ -652,7 +711,7 @@ endif() if(VISP_CXX_STANDARD LESS VISP_CXX_STANDARD_14) if(USE_FTIITSDK) - message(WARNING "IIT force-torque SDK 3rd party was detected and needs at least c++14 standard compiler flag while you have set c++${USE_CXX_STANDARD}. Thus we disable IIT force-torque usage turning USE_OPENCV=OFF.") + message(WARNING "IIT force-torque SDK 3rd party was detected and needs at least c++14 standard compiler flag while you have set c++${USE_CXX_STANDARD}. Thus we disable IIT force-torque usage turning USE_FTIITSDK=OFF.") unset(USE_FTIITSDK) set(USE_FTIITSDK OFF CACHE BOOL "Include IIT force-torque SDK support" FORCE) endif() @@ -700,14 +759,24 @@ if(VISP_CXX_STANDARD LESS VISP_CXX_STANDARD_11) unset(USE_ARSDK) set(USE_ARSDK OFF CACHE BOOL "Include Parrot ARSDK support" FORCE) endif() + if(USE_THREADS) + message(WARNING "std::thread was detected but needs at least c++11 standard compiler flag while you have set c++${USE_CXX_STANDARD}. Thus we disable std::thread usage turning USE_THREADS=OFF.") + unset(USE_THREADS) + set(USE_THREADS OFF CACHE BOOL "Include std::thread support" FORCE) + endif() +endif() + +if(UNIX AND Threads_FOUND) + # Apriltag on unix needs native pthread. On windows we are using pthread built-in + set(USE_PTHREAD ON) # for AprilTag only endif() # ---------------------------------------------------------------------------- # Build-in 3rd parties. Should be after c++ standard potential modification # ---------------------------------------------------------------------------- -VP_OPTION(WITH_PTHREAD "" "" "Build pthread as built-in library" "" ON IF (NOT USE_THREADS) AND (WIN32 OR MINGW) AND (NOT WINRT)) +VP_OPTION(WITH_PTHREAD "" "" "Build pthread as built-in library" "" ON IF (WIN32 OR MINGW) AND (NOT WINRT)) # Since C99 is not supported by MSVC 2010 or prior, we disable apriltag if MSVC < 2012 -VP_OPTION(WITH_APRILTAG "" "" "Build AprilTag as built-in library" "" ON IF (USE_THREADS OR WITH_PTHREAD) AND (NOT WINRT) AND (NOT MSVC_VERSION LESS 1700)) +VP_OPTION(WITH_APRILTAG "" "" "Build AprilTag as built-in library" "" ON IF (USE_THREADS OR USE_PTHREAD OR WITH_PTHREAD) AND (NOT WINRT) AND (NOT MSVC_VERSION LESS 1700)) VP_OPTION(WITH_APRILTAG_BIG_FAMILY "" "" "Build AprilTag big family (41h12, 48h12, 49h12, 52h13)" "" OFF IF WITH_APRILTAG) VP_OPTION(WITH_ATIDAQ "" "" "Build atidaq-c as built-in library" "" ON IF USE_COMEDI AND NOT WINRT) VP_OPTION(WITH_CLIPPER "" "" "Build clipper as built-in library" "" ON IF USE_OPENCV) @@ -996,7 +1065,10 @@ VP_SET(VISP_HAVE_LAPACK_GSL TRUE IF (BUILD_MODULE_visp_core AND USE_GSL)) VP_SET(VISP_HAVE_LAPACK_MKL TRUE IF (BUILD_MODULE_visp_core AND USE_MKL)) VP_SET(VISP_HAVE_LAPACK_NETLIB TRUE IF (BUILD_MODULE_visp_core AND USE_NETLIB)) VP_SET(VISP_HAVE_LAPACK_OPENBLAS TRUE IF (BUILD_MODULE_visp_core AND USE_OPENBLAS)) -VP_SET(VISP_HAVE_PTHREAD TRUE IF (BUILD_MODULE_visp_core AND USE_THREADS)) # Keep for the momment for compat + +# Keep VISP_HAVE_PTHREAD for the moment for compat and for vpMutex and vpThread deprecated classes +VP_SET(VISP_HAVE_PTHREAD TRUE IF (BUILD_MODULE_visp_core AND (USE_PTHREAD AND UNIX))) + VP_SET(VISP_HAVE_THREADS TRUE IF (BUILD_MODULE_visp_core AND USE_THREADS)) VP_SET(VISP_HAVE_XML2 TRUE IF (BUILD_MODULE_visp_core AND USE_XML2)) VP_SET(VISP_HAVE_PCL TRUE IF (BUILD_MODULE_visp_core AND USE_PCL)) @@ -1078,8 +1150,6 @@ VP_SET(VISP_HAVE_VICON TRUE IF (BUILD_MODULE_visp_sensor AND USE_VICON)) VP_SET(VISP_BUILD_SHARED_LIBS TRUE IF BUILD_SHARED_LIBS) # for header vpConfig.h VP_SET(VISP_HAVE_DC1394_CAMERA_ENUMERATE TRUE IF (USE_DC1394 AND DC1394_CAMERA_ENUMERATE_FOUND)) # for header vpConfig.h VP_SET(VISP_HAVE_DC1394_FIND_CAMERAS TRUE IF (USE_DC1394 AND DC1394_FIND_CAMERAS_FOUND)) # for header vpConfig.h -VP_SET(VISP_HAVE_D3D9 TRUE IF USE_DIRECT3D) # for header vpConfig.h -VP_SET(VISP_HAVE_GTK TRUE IF USE_GTK2) # for header vpConfig.h VP_SET(VISP_HAVE_XRANDR TRUE IF XRANDR) # for header vpConfig.h VP_SET(VISP_HAVE_NULLPTR TRUE IF HAVE_NULLPTR) # for header vpConfig.h @@ -1603,8 +1673,17 @@ if(BUILD_PYTHON_BINDINGS) status(" Package version:" "${VISP_PYTHON_PACKAGE_VERSION}") status(" Wrapped modules:" "${VISP_PYTHON_BOUND_MODULES}") status(" Generated input config:" "${VISP_PYTHON_GENERATED_CONFIG_FILE}") +else() + status(" Requirements: ") + status(" Python version > ${PYTHON3_MINIMUM_VERSION_PYTHON_BINDINGS}:" PYTHON3_FOUND AND NOT PYTHON3_NOT_OK_FOR_BINDINGS THEN "ok (ver ${PYTHON3_VERSION_STRING})" ELSE "python not found or too old (${PYTHON3_VERSION_STRING})") + status(" Python in Virtual environment or conda:" VISP_PYTHON_IS_SYSTEM_WIDE THEN "failed" ELSE "ok") + status(" Pybind11 found:" USE_PYBIND11 THEN "ok" ELSE "failed") + status(" CMake > ${CMAKE_MINIMUM_VERSION_PYTHON_BINDINGS}:" CMAKE_NOT_OK_FOR_BINDINGS THEN "failed (${CMAKE_VERSION})" ELSE "ok (${CMAKE_VERSION})") + status(" C++ standard > ${VISP_CXX_STANDARD_17}:" CXX_STANDARD_NOT_OK_FOR_BINDINGS THEN "failed (${VISP_CXX_STANDARD})" ELSE "ok (${VISP_CXX_STANDARD})") + endif() + # ============================ Options =========================== status("") status(" Build options: ") @@ -1730,6 +1809,7 @@ status("") status(" Optimization: ") status(" Use OpenMP:" USE_OPENMP THEN "yes" ELSE "no") status(" Use std::thread:" USE_THREADS THEN "yes" ELSE "no") +status(" Use pthread:" USE_PTHREAD THEN "yes" ELSE "no") status(" Use pthread (built-in):" WITH_PTHREAD THEN "yes (ver ${PTHREADS_VERSION})" ELSE "no") status(" Use simdlib (built-in):" WITH_SIMDLIB THEN "yes" ELSE "no") status("") diff --git a/CTestConfig.cmake b/CTestConfig.cmake index 32fb240079..f8d8f03839 100644 --- a/CTestConfig.cmake +++ b/CTestConfig.cmake @@ -91,52 +91,42 @@ else() endif() # Find out the version of gcc being used. -if(CMAKE_COMPILER_IS_GNUCC) - exec_program(${CMAKE_CXX_COMPILER} - ARGS -dumpversion - OUTPUT_VARIABLE COMPILER_VERSION - ) - #message("COMPILER_VERSION 1: ${COMPILER_VERSION}") - string(REGEX REPLACE ".* ([0-9])\\.([0-9])\\.[0-9].*" "\\1\\2" - COMPILER_VERSION ${COMPILER_VERSION}) - #message("COMPILER_VERSION 2: ${COMPILER_VERSION}") - - set(BUILDNAME "${BUILDNAME}${COMPILER_VERSION}") - -endif(CMAKE_COMPILER_IS_GNUCC) +if(CMAKE_CXX_COMPILER_VERSION) + set(BUILDNAME "${BUILDNAME}-${CMAKE_CXX_COMPILER_VERSION}") +endif() # Add the type of library generation, e.g. "Dynamic or Static" if(BUILD_SHARED_LIBS) set(BUILDNAME "${BUILDNAME}-Dyn") else(BUILD_SHARED_LIBS) set(BUILDNAME "${BUILDNAME}-Sta") -endif(BUILD_SHARED_LIBS) +endif() # Add the build type, e.g. "Debug, Release..." if(CMAKE_BUILD_TYPE) set(BUILDNAME "${BUILDNAME}-${CMAKE_BUILD_TYPE}") -endif(CMAKE_BUILD_TYPE) +endif() #---- Robots ---- # Add specific Afma4 robots if(VISP_HAVE_AFMA4) set(BUILDNAME "${BUILDNAME}-Afma4") -endif(VISP_HAVE_AFMA4) +endif() # Add specific Afma6 robots if(VISP_HAVE_AFMA6) set(BUILDNAME "${BUILDNAME}-Afma6") -endif(VISP_HAVE_AFMA6) +endif() # Add specific Ptu46 robots if(VISP_HAVE_PTU46) set(BUILDNAME "${BUILDNAME}-Ptu46") -endif(VISP_HAVE_PTU46) +endif() # Add specific Biclops robots if(VISP_HAVE_BICLOPS) set(BUILDNAME "${BUILDNAME}-Biclops") -endif(VISP_HAVE_BICLOPS) +endif() # Add specific Pioneer robots if(VISP_HAVE_PIONEER) @@ -152,18 +142,18 @@ endif() # Firewire dc1394-2.x if(VISP_HAVE_DC1394) set(BUILDNAME "${BUILDNAME}-dc1394") -endif(VISP_HAVE_DC1394) +endif() # Video 4 linux 2 (V4L2) if(VISP_HAVE_V4L2) set(BUILDNAME "${BUILDNAME}-v4l2") -endif(VISP_HAVE_V4L2) +endif() # Directshow if(VISP_HAVE_DIRECTSHOW) set(BUILDNAME "${BUILDNAME}-dshow") -endif(VISP_HAVE_DIRECTSHOW) +endif() if(VISP_HAVE_CMU1394) set(BUILDNAME "${BUILDNAME}-CMU1394") -endif(VISP_HAVE_CMU1394) +endif() if(VISP_HAVE_LIBFREENECT) set(BUILDNAME "${BUILDNAME}-freenect") endif() @@ -196,19 +186,19 @@ endif() # X11 if(VISP_HAVE_X11) set(BUILDNAME "${BUILDNAME}-X11") -endif(VISP_HAVE_X11) +endif() # GTK if(VISP_HAVE_GTK) set(BUILDNAME "${BUILDNAME}-gtk") -endif(VISP_HAVE_GTK) +endif() # GDI (Windows Graphics Device Interface) if(VISP_HAVE_GDI) set(BUILDNAME "${BUILDNAME}-gdi") -endif(VISP_HAVE_GDI) +endif() # D3D (Direct3D9) if(VISP_HAVE_D3D9) set(BUILDNAME "${BUILDNAME}-Direct3D") -endif(VISP_HAVE_D3D9) +endif() # OpenCV if(VISP_HAVE_OPENCV) if(OpenCV_VERSION) @@ -220,7 +210,7 @@ if(VISP_HAVE_OPENCV) else() set(BUILDNAME "${BUILDNAME}-OpenCV") endif() -endif(VISP_HAVE_OPENCV) +endif() #---- Mathematics ---- # Lapack (Linear Algebra PACKage) @@ -246,34 +236,31 @@ endif() # Coin if(VISP_HAVE_COIN3D) set(BUILDNAME "${BUILDNAME}-Coin") -endif(VISP_HAVE_COIN3D) +endif() # SoQt if(VISP_HAVE_SOQT) set(BUILDNAME "${BUILDNAME}-SoQt") -endif(VISP_HAVE_SOQT) +endif() # Qt if(VISP_HAVE_QT) set(BUILDNAME "${BUILDNAME}-Qt${DESIRED_QT_VERSION}") -endif(VISP_HAVE_QT) +endif() # SoWin if(VISP_HAVE_SOWIN) set(BUILDNAME "${BUILDNAME}-SoWin") -endif(VISP_HAVE_SOWIN) +endif() # SoXt if(VISP_HAVE_SOXT) set(BUILDNAME "${BUILDNAME}-SoXt") -endif(VISP_HAVE_SOXT) +endif() #---- Images ---- if(VISP_HAVE_JPEG) set(BUILDNAME "${BUILDNAME}-jpeg") -endif(VISP_HAVE_JPEG) +endif() if(VISP_HAVE_PNG) set(BUILDNAME "${BUILDNAME}-png") -endif(VISP_HAVE_PNG) -#if(VISP_HAVE_ZLIB) -# set(BUILDNAME "${BUILDNAME}-zlib") -#endif() +endif() #---- Misc ---- # XML @@ -306,7 +293,9 @@ endif() if(ACTIVATE_WARNING_FLOAT_EQUAL) set(BUILDNAME "${BUILDNAME}-Weq") endif() -if(VISP_CXX_STANDARD EQUAL VISP_CXX_STANDARD_11) +if(VISP_CXX_STANDARD EQUAL VISP_CXX_STANDARD_98) + set(BUILDNAME "${BUILDNAME}-c98") +elseif(VISP_CXX_STANDARD EQUAL VISP_CXX_STANDARD_11) set(BUILDNAME "${BUILDNAME}-c11") elseif(VISP_CXX_STANDARD EQUAL VISP_CXX_STANDARD_14) set(BUILDNAME "${BUILDNAME}-c14") diff --git a/ChangeLog.txt b/ChangeLog.txt index d8a85ed998..c7aea44e05 100644 --- a/ChangeLog.txt +++ b/ChangeLog.txt @@ -13,15 +13,24 @@ ViSP 3.x.x (Version in development) . vpPololu and vpRobotPololuPtu to control respectively a servo motor using a pololu maestro board or a 3D printed 2 dof pan-tilt unit. Visual servoing example provided in example/servo-pololu-ptu. Tests available in modules/robot/test/servo-pololu/ + . vpStatisticalTestAbstract, vpStatisticalTestEWMA, vpStatisticalTestHinkley, vpStatisticalTestMeanAdjustedCUSUM + vpStatisticalTestShewhart and vpStatisticalTestSigma: classes implementing Statistical Control Process methods to + detect mean drift / jump of a signal - Deprecated . vpPlanarObjectDetector, vpFernClassifier deprecated classes are removed . End of supporting c++98 standard. As a consequence, ViSP is no more compatible with Ubuntu 12.04 . vpDisplay::displayCharString() is marked deprecated. Use vpDisplay::displayText() instead + . vpHinkley class is deprecated, in favor of vpStatisticalTestHinkley - New features and improvements + . Introduce Python bindings for most of ViSP modules and classes (see corresponding tutorial) + . Updated Dockerfile in ci/docker folder for Ubuntu 18.04, 20.04 and 22.04. Corresponding images are also available + ready to use on DockerHub https://hub.docker.com/repository/docker/vispci/vispci/general + . OpenCV 2.4.8 is the minimal supported version . Introduce applications in apps folder, a collection of useful tools that have a dependency to the install target - . Bump minimal c++ standard to c++11 . Speed up build by including only opencv2/opencv_modules.hpp instead of opencv2/opencv.hpp header in vpConfig.h + . In imgproc module, implementation of automatic gamma factor computation methods for gamma correction. + . Eliminate the use of pthread in favour of std::thread - Applications . Migrate eye-to-hand tutorials in apps - Tutorials @@ -30,6 +39,10 @@ ViSP 3.x.x (Version in development) https://visp-doc.inria.fr/doxygen/visp-daily/tutorial-tracking-mb-generic-rgbd-Blender.html . New tutorial: Installation from prebuilt Conda packages for Linux / OSX / Windows https://visp-doc.inria.fr/doxygen/visp-daily/tutorial-install-conda-package.html + . New tutorial: Using Statistical Process Control to monitor your signal + https://visp-doc.inria.fr/doxygen/visp-daily/tutorial-spc.html + . New tutorial: Installing ViSP Python bindings + https://visp-doc.inria.fr/doxygen/visp-daily/tutorial-install-python-bindings.html - Bug fixed . [#1251] Bug in vpDisplay::displayFrame() . [#1270] Build issue around std::clamp and optional header which are not found with cxx17 @@ -40,6 +53,8 @@ ViSP 3.x.x (Version in development) . [#1279] Issue in vpPoseVector json serialization . [#1296] Unable to parse camera parameters with vpXmlParserCamera::parse() when camera name is empty . [#1307] Cannot set cxx standard when configuring for Visual Studio + . [#1320] Broken links in the documentation + . [#1341] SVD computation fails with Lapack when m < n ---------------------------------------------- ViSP 3.6.0 (released September 22, 2023) - Contributors: diff --git a/apps/calibration/hand_eye_calibration_show_extrinsics.py b/apps/calibration/hand_eye_calibration_show_extrinsics.py old mode 100755 new mode 100644 diff --git a/ci/docker/ubuntu-18.04/Dockerfile b/ci/docker/ubuntu-18.04/Dockerfile index 4ae1ff3230..0e8c23a443 100644 --- a/ci/docker/ubuntu-18.04/Dockerfile +++ b/ci/docker/ubuntu-18.04/Dockerfile @@ -2,16 +2,21 @@ FROM ubuntu:18.04 MAINTAINER Fabien Spindler ARG DEBIAN_FRONTEND=noninteractive +ARG VISPCI_USER_UID=1001 +ARG DOCKER_GROUP_GID=130 ENV TZ=Europe/Paris # Update aptitude with new repo -RUN apt-get update - +RUN apt-get update + # Install packages RUN apt-get install -y \ + sudo \ build-essential \ cmake \ git \ + net-tools \ + iputils-ping \ # Recommended ViSP 3rd parties libopencv-dev \ libx11-dev \ @@ -31,6 +36,16 @@ RUN apt-get install -y \ libdmtx-dev \ libgsl-dev +RUN adduser --disabled-password --gecos "" --uid $VISPCI_USER_UID vispci \ + && groupadd docker --gid $DOCKER_GROUP_GID \ + && usermod -aG sudo vispci \ + && usermod -aG docker vispci \ + && echo "%sudo ALL=(ALL:ALL) NOPASSWD:ALL" > /etc/sudoers \ + && echo "Defaults env_keep += \"DEBIAN_FRONTEND\"" >> /etc/sudoers \ + && adduser vispci video + +ENV HOME=/home/vispci + # Install visp-images RUN mkdir -p ${HOME}/visp-ws \ && cd ${HOME}/visp-ws \ @@ -47,6 +62,8 @@ RUN cd ${HOME}/visp-ws \ && mkdir visp-build \ && cd visp-build \ && cmake ../visp \ - && make + && make -j4 + +USER vispci -WORKDIR / +WORKDIR /home/vispci diff --git a/ci/docker/ubuntu-20.04/Dockerfile b/ci/docker/ubuntu-20.04/Dockerfile index 34105dc365..d9de38da6f 100644 --- a/ci/docker/ubuntu-20.04/Dockerfile +++ b/ci/docker/ubuntu-20.04/Dockerfile @@ -2,16 +2,21 @@ FROM ubuntu:20.04 MAINTAINER Fabien Spindler ARG DEBIAN_FRONTEND=noninteractive +ARG VISPCI_USER_UID=1001 +ARG DOCKER_GROUP_GID=130 ENV TZ=Europe/Paris # Update aptitude with new repo -RUN apt-get update - +RUN apt-get update + # Install packages RUN apt-get install -y \ + sudo \ build-essential \ cmake \ git \ + net-tools \ + iputils-ping \ # Recommended ViSP 3rd parties libopencv-dev \ libx11-dev \ @@ -31,6 +36,16 @@ RUN apt-get install -y \ libdmtx-dev \ libgsl-dev +RUN adduser --disabled-password --gecos "" --uid $VISPCI_USER_UID vispci \ + && groupadd docker --gid $DOCKER_GROUP_GID \ + && usermod -aG sudo vispci \ + && usermod -aG docker vispci \ + && echo "%sudo ALL=(ALL:ALL) NOPASSWD:ALL" > /etc/sudoers \ + && echo "Defaults env_keep += \"DEBIAN_FRONTEND\"" >> /etc/sudoers \ + && adduser vispci video + +ENV HOME=/home/vispci + # Install visp-images RUN mkdir -p ${HOME}/visp-ws \ && cd ${HOME}/visp-ws \ @@ -47,7 +62,8 @@ RUN cd ${HOME}/visp-ws \ && mkdir visp-build \ && cd visp-build \ && cmake ../visp \ - && make + && make -j4 -WORKDIR / +USER vispci +WORKDIR /home/vispci diff --git a/ci/docker/ubuntu-22.04/Dockerfile b/ci/docker/ubuntu-22.04/Dockerfile new file mode 100644 index 0000000000..dfe5a9da72 --- /dev/null +++ b/ci/docker/ubuntu-22.04/Dockerfile @@ -0,0 +1,68 @@ +FROM ubuntu:22.04 +MAINTAINER Fabien Spindler + +ARG DEBIAN_FRONTEND=noninteractive +ARG VISPCI_USER_UID=1001 +ARG DOCKER_GROUP_GID=121 +ENV TZ=Europe/Paris + +# Update aptitude with new repo +RUN apt-get update + +# Install packages +RUN apt-get install -y \ + sudo \ + build-essential \ + cmake \ + git \ + net-tools \ + iputils-ping \ + # Recommended ViSP 3rd parties + libopencv-dev \ + libx11-dev \ + liblapack-dev \ + libeigen3-dev \ + libdc1394-dev \ + libv4l-dev \ + libzbar-dev \ + # Other optional 3rd parties + libpcl-dev \ + libcoin-dev \ + libjpeg-turbo8-dev \ + libpng-dev \ + libogre-1.9-dev \ + libois-dev \ + libdmtx-dev \ + libgsl-dev + +RUN adduser --disabled-password --gecos "" --uid $VISPCI_USER_UID vispci \ + && groupadd docker --gid $DOCKER_GROUP_GID \ + && usermod -aG sudo vispci \ + && usermod -aG docker vispci \ + && echo "%sudo ALL=(ALL:ALL) NOPASSWD:ALL" > /etc/sudoers \ + && echo "Defaults env_keep += \"DEBIAN_FRONTEND\"" >> /etc/sudoers \ + && adduser vispci video + +ENV HOME=/home/vispci + +# Install visp-images +RUN mkdir -p ${HOME}/visp-ws \ + && cd ${HOME}/visp-ws \ + && git clone https://github.com/lagadic/visp-images.git \ + && echo "export VISP_WS=${HOME}/visp-ws" >> ${HOME}/.bashrc \ + && echo "export VISP_INPUT_IMAGE_PATH=${HOME}/visp-ws/visp-images" >> ${HOME}/.bashrc + +# Get visp +RUN cd ${HOME}/visp-ws \ + && git clone https://github.com/lagadic/visp + +# Build visp +RUN cd ${HOME}/visp-ws \ + && mkdir visp-build \ + && cd visp-build \ + && cmake ../visp \ + && make -j$(nproc) + +USER vispci + +WORKDIR /home/vispci diff --git a/cmake/AddExtraCompilationFlags.cmake b/cmake/AddExtraCompilationFlags.cmake index 0cec5eef4e..7adb63f986 100644 --- a/cmake/AddExtraCompilationFlags.cmake +++ b/cmake/AddExtraCompilationFlags.cmake @@ -117,7 +117,8 @@ if(USE_OPENMP) add_extra_compiler_option("${OpenMP_CXX_FLAGS}") endif() -if(USE_THREADS) +if(USE_THREADS OR USE_PTHREAD) + # Condider the case of Apriltags on Unix that needs pthread if(THREADS_HAVE_PTHREAD_ARG) add_extra_compiler_option("-pthread") endif() diff --git a/cmake/FindDMTX.cmake b/cmake/FindDMTX.cmake index 86fcf28340..925d777122 100644 --- a/cmake/FindDMTX.cmake +++ b/cmake/FindDMTX.cmake @@ -68,4 +68,3 @@ mark_as_advanced( DMTX_INCLUDE_DIRS DMTX_LIBRARIES ) - diff --git a/cmake/FindGSL.cmake b/cmake/FindGSL.cmake index f933c2628e..16b9f5095d 100644 --- a/cmake/FindGSL.cmake +++ b/cmake/FindGSL.cmake @@ -150,7 +150,6 @@ else() get_filename_component(GSL_LIB_DIR ${GSL_gsl_LIBRARY} PATH) vp_get_version_from_pkg("gsl" "${GSL_LIB_DIR}/pkgconfig" GSL_VERSION) - else() set(GSL_FOUND FALSE) endif() @@ -184,4 +183,3 @@ mark_as_advanced( GSL_cblas_LIBRARY GSL_INCLUDE_DIR ) - diff --git a/cmake/FindLIBUSB_1.cmake b/cmake/FindLIBUSB_1.cmake index 4eaf8ae1ce..8b963fe651 100644 --- a/cmake/FindLIBUSB_1.cmake +++ b/cmake/FindLIBUSB_1.cmake @@ -89,7 +89,3 @@ mark_as_advanced( LIBUSB_1_LIBRARIES LIBUSB_1_LIBRARY ) - - - - diff --git a/cmake/FindMyGTK2.cmake b/cmake/FindMyGTK2.cmake index 5b95fb4698..0a5eff7bec 100644 --- a/cmake/FindMyGTK2.cmake +++ b/cmake/FindMyGTK2.cmake @@ -302,4 +302,3 @@ if(UNIX OR WIN32) else(UNIX OR WIN32) message("FindGTK2 is working on UNIX/LINUX and Windows, only!") endif() - diff --git a/cmake/FindNetlib.cmake b/cmake/FindNetlib.cmake index 9f835fb98d..2a2b1c7994 100644 --- a/cmake/FindNetlib.cmake +++ b/cmake/FindNetlib.cmake @@ -143,4 +143,3 @@ mark_as_advanced( NETLIB_LIBRARY_F2C_DEBUG NETLIB_LIBRARY_F2C_RELEASE ) - diff --git a/cmake/FindZBAR.cmake b/cmake/FindZBAR.cmake index 3ee22ce5c5..4a7696b3a5 100644 --- a/cmake/FindZBAR.cmake +++ b/cmake/FindZBAR.cmake @@ -132,4 +132,3 @@ mark_as_advanced( ZBAR_INCLUDE_DIRS ZBAR_LIBRARIES ) - diff --git a/cmake/VISPDetectCXXStandard.cmake b/cmake/VISPDetectCXXStandard.cmake index 950b1bee36..3bdce51e8f 100644 --- a/cmake/VISPDetectCXXStandard.cmake +++ b/cmake/VISPDetectCXXStandard.cmake @@ -157,15 +157,35 @@ else() if(USE_CXX_STANDARD STREQUAL "98") set(CMAKE_CXX_STANDARD 98) set(VISP_CXX_STANDARD ${VISP_CXX_STANDARD_98}) + set(CXX98_CXX_FLAGS "-std=c++98" CACHE STRING "C++ compiler flags for C++98 support") + mark_as_advanced(CXX98_CXX_FLAGS) elseif(USE_CXX_STANDARD STREQUAL "11") set(CMAKE_CXX_STANDARD 11) set(VISP_CXX_STANDARD ${VISP_CXX_STANDARD_11}) + vp_check_compiler_flag(CXX "-std=c++11" HAVE_STD_CXX11_FLAG "${PROJECT_SOURCE_DIR}/cmake/checks/cxx11.cpp") + if(HAVE_STD_CXX11_FLAG) + set(CXX11_CXX_FLAGS "-std=c++11" CACHE STRING "C++ compiler flags for C++11 support") + mark_as_advanced(CXX11_CXX_FLAGS) + endif() + mark_as_advanced(HAVE_STD_CXX11_FLAG) elseif(USE_CXX_STANDARD STREQUAL "14") set(CMAKE_CXX_STANDARD 14) set(VISP_CXX_STANDARD ${VISP_CXX_STANDARD_14}) + vp_check_compiler_flag(CXX "-std=c++14" HAVE_STD_CXX14_FLAG "${PROJECT_SOURCE_DIR}/cmake/checks/cxx14.cpp") + if(HAVE_STD_CXX14_FLAG) + set(CXX14_CXX_FLAGS "-std=c++14" CACHE STRING "C++ compiler flags for C++14 support") + mark_as_advanced(CXX14_CXX_FLAGS) + endif() + mark_as_advanced(HAVE_STD_CXX14_FLAG) elseif(USE_CXX_STANDARD STREQUAL "17") set(CMAKE_CXX_STANDARD 17) set(VISP_CXX_STANDARD ${VISP_CXX_STANDARD_17}) + vp_check_compiler_flag(CXX "-std=c++17" HAVE_STD_CXX17_FLAG "${PROJECT_SOURCE_DIR}/cmake/checks/cxx17.cpp") + if(HAVE_STD_CXX17_FLAG) + set(CXX17_CXX_FLAGS "-std=c++17" CACHE STRING "C++ compiler flags for C++17 support") + mark_as_advanced(CXX17_CXX_FLAGS) + endif() + mark_as_advanced(HAVE_STD_CXX17_FLAG) endif() endif() diff --git a/cmake/VISPDetectPython.cmake b/cmake/VISPDetectPython.cmake index 7601715c3c..f5ac45b910 100644 --- a/cmake/VISPDetectPython.cmake +++ b/cmake/VISPDetectPython.cmake @@ -209,11 +209,10 @@ if(NOT ${found}) message(STATUS " PYTHON3_NUMPY_INCLUDE_DIRS") else() # Attempt to discover the NumPy include directory. If this succeeds, then build python API with NumPy - execute_process(COMMAND "${_executable}" -c "import os; os.environ['DISTUTILS_USE_SDK']='1'; import numpy.distutils; print(os.pathsep.join(numpy.distutils.misc_util.get_numpy_include_dirs()))" + execute_process(COMMAND "${_executable}" -c "import numpy; print(numpy.get_include())" RESULT_VARIABLE _numpy_process OUTPUT_VARIABLE _numpy_include_dirs OUTPUT_STRIP_TRAILING_WHITESPACE) - if(NOT _numpy_process EQUAL 0) unset(_numpy_include_dirs) endif() diff --git a/cmake/VISPGenerateConfig.cmake b/cmake/VISPGenerateConfig.cmake index 60d0b5f922..01b548313f 100644 --- a/cmake/VISPGenerateConfig.cmake +++ b/cmake/VISPGenerateConfig.cmake @@ -99,11 +99,7 @@ endif() # ------------------------------------------------------------------------------------------- # Export the library -if (CMAKE_VERSION VERSION_LESS 3.0.0) - export(TARGETS ${VISPModules_TARGETS} FILE "${PROJECT_BINARY_DIR}/VISPModules.cmake") -else() - export(EXPORT VISPModules FILE "${PROJECT_BINARY_DIR}/VISPModules.cmake") -endif() +export(EXPORT VISPModules FILE "${PROJECT_BINARY_DIR}/VISPModules.cmake") ## Update include dirs set(VISP_INCLUDE_DIRS_CONFIGCMAKE "${VISP_INCLUDE_DIR}") diff --git a/cmake/VISPUtils.cmake b/cmake/VISPUtils.cmake index 1e37d00e4e..b6d9118098 100644 --- a/cmake/VISPUtils.cmake +++ b/cmake/VISPUtils.cmake @@ -1331,11 +1331,24 @@ macro(vp_parse_header4 LIBNAME HDR_PATH DEFINE_NAME OUTPUT_VAR) endif() endmacro() -# read single version info from the pkg file +# Get package version from pkg-config macro(vp_get_version_from_pkg LIBNAME PKG_PATH OUTPUT_VAR) if(EXISTS "${PKG_PATH}/${LIBNAME}.pc") + # Consider the case where pkg-config is not installed file(STRINGS "${PKG_PATH}/${LIBNAME}.pc" line_to_parse REGEX "^Version:[ \t]+[0-9.]*.*$" LIMIT_COUNT 1) string(REGEX REPLACE ".*Version: ([^ ]+).*" "\\1" ${OUTPUT_VAR} "${line_to_parse}" ) + else() + find_package(PkgConfig) + if(PkgConfig_FOUND) + string(TOUPPER ${LIBNAME} LIBNAME_UPPER) + pkg_get_variable(${LIBNAME_UPPER}_PCFILEDIR ${LIBNAME} pcfiledir) + if(EXISTS "${${LIBNAME_UPPER}_PCFILEDIR}/${LIBNAME}.pc") + file(STRINGS "${${LIBNAME_UPPER}_PCFILEDIR}/${LIBNAME}.pc" line_to_parse REGEX "^Version:[ \t]+[0-9.]*.*$" LIMIT_COUNT 1) + string(REGEX REPLACE ".*Version: ([^ ]+).*" "\\1" ${OUTPUT_VAR} "${line_to_parse}" ) + unset(LIBNAME_UPPER) + mark_as_advanced(${LIBNAME_UPPER}_PCFILEDIR) + endif() + endif() endif() endmacro() diff --git a/cmake/templates/visp-config.bat.in b/cmake/templates/visp-config.bat.in old mode 100755 new mode 100644 diff --git a/cmake/templates/vpConfig.h.in b/cmake/templates/vpConfig.h.in index 45d8918a21..7ff33610b8 100644 --- a/cmake/templates/vpConfig.h.in +++ b/cmake/templates/vpConfig.h.in @@ -146,7 +146,7 @@ // Defined if XML2 library available. #cmakedefine VISP_HAVE_XML2 -// Defined if pthread library available. +// Defined if pthread library available (deprecated). #cmakedefine VISP_HAVE_PTHREAD // Defined if std::thread available. @@ -565,7 +565,9 @@ #cmakedefine VISP_HAVE_NULLPTR // Emulate nullptr when not available when cxx98 is enabled -#if (!defined(VISP_HAVE_NULLPTR)) && (__cplusplus == 199711L) +// Note that on ubuntu 12.04 __cplusplus is equal to 1 that's why in the next line we consider __cplusplus <= 199711L +// and not __cplusplus == 199711L +#if (!defined(VISP_HAVE_NULLPTR)) && (__cplusplus <= 199711L) #include #endif diff --git a/doc/image/tutorial/misc/img-tutorial-spc-run.jpg b/doc/image/tutorial/misc/img-tutorial-spc-run.jpg new file mode 100644 index 0000000000..ccbcce61ea Binary files /dev/null and b/doc/image/tutorial/misc/img-tutorial-spc-run.jpg differ diff --git a/doc/image/tutorial/started/img-monkey-win.jpg b/doc/image/tutorial/started/img-monkey-win.jpg old mode 100755 new mode 100644 diff --git a/doc/image/tutorial/visual-servo/img-bebop2-coord-system.png b/doc/image/tutorial/visual-servo/img-bebop2-coord-system.png old mode 100755 new mode 100644 diff --git a/doc/mainpage.dox.in b/doc/mainpage.dox.in index 802a620462..4bee5562af 100644 --- a/doc/mainpage.dox.in +++ b/doc/mainpage.dox.in @@ -16,23 +16,29 @@ href="http://www.inria.fr/en/centre/rennes" target="_parent">Inria Rennes.ViSP wiki is https://github.com/lagadic/visp/wiki

-

ViSP source code is available on GitHub https://github.com/lagadic/visp

+

ViSP source code is available on GitHub +https://github.com/lagadic/visp

If you have any problems or find any bugs, please report them in -the bug tracker. If you may need help, please use the available forums.

+the bug tracker. If you may need help, please use the available +discussion forum.

It is also possible to contact ViSP main developers using: visp@inria.fr \section arch_sec Software architecture -Since ViSP 3.0.0, we design a new modular software architecture where ViSP capabilities are grouped in several modules (core, io, gui, vision, …). In ViSP 3.1.0 we introduced a new module called imgproc. As a result, the user will find several shared or static libraries, one for each module. The following figure highlights the module dependencies and the third-party libraries that may be used by each module. The central module is the core module. All other modules depend on core. +Since ViSP 3.0.0, we design a new modular software architecture where ViSP capabilities are grouped in several modules +(core, io, gui, vision, …). In ViSP 3.1.0 we introduced a new module called imgproc. As a result, the user will find +several shared or static libraries, one for each module. The following figure highlights the module dependencies and +the third-party libraries that may be used by each module. The central module is the core module. All other modules +depend on core. \image html soft_archi_v6.png "ViSP modules and their optional dependencies" \section hardware Supported Hardware -ViSP is interfaced with various robots, haptic devices, force-torque sensors, laser range finders, depth cameras, RGB cameras, -and motion capture system that you can use for learning visual-servoing or building robotics applications +ViSP is interfaced with various robots, haptic devices, force-torque sensors, laser range finders, depth cameras, +RGB cameras, and motion capture system that you can use for learning visual-servoing or building robotics applications on top of them based on ViSP. See \ref supported-material section. \section software Supported Third-Party Libraries @@ -58,12 +64,14 @@ install \ref supported-third-parties. There is also this page https://visp.inria.fr/software-architecture that gives an overview of the third-parties used by each ViSP module.

-

ViSP full installation procedure explaining how to install CMake and third-parties is detailed in the \ref tutorial_install page. \ref tutorial_started and documents in pdf are also available from ViSP full installation procedure explaining how to install CMake and third-parties is detailed in the +\ref tutorial_install page. \ref tutorial_started and documents in pdf are also available from https://visp.inria.fr/publications.

\section tutorial Tutorials -To learn ViSP, we propose a lot of tutorials that show the basic use of ViSP classes in \ref tutorial_users. +To learn ViSP, we propose a lot of tutorials that show the basic use of ViSP classes in +\ref tutorial_users. @VISP_MAINPAGE_EXTENSION@ @@ -94,7 +102,8 @@ To cite the generic model-bas Year = {2018} } ``` -To cite pose estimation algorithms and hands-on survey illustrated with ViSP examples: +To cite pose estimation algorithms and hands-on survey illustrated with +ViSP examples: ``` @article{Marchand16a, Author = {Marchand, E. and Uchiyama, H. and Spindler, F.}, @@ -110,7 +119,8 @@ To cite pose estimation algorithms \section Examples_sec Using ViSP -

ViSP C++ classes are organized in modules that may help the user during his project implementation.

+

ViSP C++ classes are organized in modules that may help the user during his project +implementation.

From the example page, you will also find examples showing how to use the library to acquire and display an @@ -127,7 +137,9 @@ in different ways. This will motivate us to continue the efforts. - You can submit a bug report using the tracker. - You can submit patches or new functionalities using GitHub Pull Request mechanism. - You can write new tutorials, new documentations or simply improve the existing documentation. -- If you just want to say you've been happy with the library, you can send us a postcard from your place, to the following address: Inria Rennes Bretagne Atlantique, Lagadic team, Campus de Beaulieu, 35042 Rennes Cedex, FRANCE. +- If you just want to say you've been happy with the library, you can send us a postcard from your place, to the + following address: Inria Rennes Bretagne Atlantique, Lagadic team, Campus de Beaulieu, 35042 Rennes Cedex, + FRANCE.

You can also ask for help using GitHub discussions. @@ -271,6 +283,11 @@ in different ways. This will motivate us to continue the efforts. \defgroup group_core_munkres Munkres Assignment Algorithm Munkres Assignment Algorithm. */ +/*! + \ingroup group_core_tools + \defgroup group_core_cpu_features CPU features + CPU features. +*/ /******************************************* * Module io @@ -427,7 +444,8 @@ in different ways. This will motivate us to continue the efforts. - to track an object observed by a single, a stereo camera or more cameras, a RGB-D camera - to consider moving-edges, keypoints or depth as visual features or a combination of these visual features. - The following classes that allow to track an object on a single image continue to be maintained but should no more be used since we provide a more generic class with vpMbGenericTracker: + The following classes that allow to track an object on a single image continue to be maintained but should no more + be used since we provide a more generic class with vpMbGenericTracker: - vpMbEdgeTracker - vpMbKltTracker - vpMbEdgeKltTracker diff --git a/doc/tutorial/bridge/matlab/tutorial-visp-matlab.dox b/doc/tutorial/bridge/matlab/tutorial-visp-matlab.dox index 97fd1f4cf3..533ea4c73d 100644 --- a/doc/tutorial/bridge/matlab/tutorial-visp-matlab.dox +++ b/doc/tutorial/bridge/matlab/tutorial-visp-matlab.dox @@ -5,23 +5,28 @@ \section intro_visp_matlab Introduction -This tutorial shows how to invoke MATLAB functions from ViSP using MATLAB Engine. The MATLAB C/C++ engine library contains routines that allow you to call MATLAB from your own programs, using MATLAB as a computation engine. This can be used to extend ViSP functionality using MATLAB. +This tutorial shows how to invoke MATLAB functions from ViSP using MATLAB Engine. The MATLAB C/C++ engine library +contains routines that allow you to call MATLAB from your own programs, using MATLAB as a computation engine. This can + be used to extend ViSP functionality using MATLAB. -Standalone programs written using MATLAB engine communicates with MATLAB process using pipes on UNIX system and Component Object Model (COM) interface on a Microsoft Windows system. MATLAB provides an API to start and end MATLAB process, send and receive data, and send commands to be processed in MATLAB. +Standalone programs written using MATLAB engine communicates with MATLAB process using pipes on UNIX system and +Component Object Model (COM) interface on a Microsoft Windows system. MATLAB provides an API to start and end MATLAB +process, send and receive data, and send commands to be processed in MATLAB. -Using the MATLAB engine requires an installed version of MATLAB; you cannot run the MATLAB engine on a machine that only has the MATLAB Runtime. Also, path to MATLAB runtime must be set in the PATH environment variable. For a 64bit machine running Windows, the path is `path\to\MATLAB\R20XXy\bin\win64`. +Using the MATLAB engine requires an installed version of MATLAB; you cannot run the MATLAB engine on a machine that +only has the MATLAB Runtime. Also, path to MATLAB runtime must be set in the PATH environment variable. For a 64bit +machine running Windows, the path is `path\to\MATLAB\R20XXy\bin\win64`. -For this tutorial, we create a vpMatrix object containing a 3x3 matrix and pass it to MATLAB sum function to compute a column wise sum of the vpMatrix. +For this tutorial, we create a vpMatrix object containing a 3x3 matrix and pass it to MATLAB sum function to compute a +column wise sum of the vpMatrix. -Note that all the material (source code and image) described in this tutorial is part of ViSP source code and could be downloaded using the following command: - -\code -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/matlab -\endcode +Note that all the material (source code and image) described in this tutorial is part of ViSP source code +(in `tutorial/matlab` folder) and could be found in https://github.com/lagadic/visp/tree/master/tutorial/matlab. \section visp_matlab_cmake CMakeLists.txt file -In order to build a source code that mix ViSP and MATLAB you should first create a `CMakeLists.txt` file that tries to find ViSP and MATLAB. In the following example we consider the case of the tutorial-matlab.cpp source file. +In order to build a source code that mix ViSP and MATLAB you should first create a `CMakeLists.txt` file that tries to +find ViSP and MATLAB. In the following example we consider the case of the tutorial-matlab.cpp source file. \code cmake_minimum_required(VERSION 3.5) diff --git a/doc/tutorial/calibration/tutorial-calibration-extrinsic.dox b/doc/tutorial/calibration/tutorial-calibration-extrinsic.dox index af8ac9b803..d3fcf3585a 100644 --- a/doc/tutorial/calibration/tutorial-calibration-extrinsic.dox +++ b/doc/tutorial/calibration/tutorial-calibration-extrinsic.dox @@ -31,11 +31,7 @@ The calibration process described in this tutorial consists in 3 steps: to estimate the \f$^e{\bf M}_c\f$ transformation. Note that all the material (source code) described in this tutorial is part of ViSP source code -(in `apps/calibration` folder) and could be downloaded using the following command: - -\verbatim -$ svn export https://github.com/lagadic/visp.git/trunk/apps/calibration -\endverbatim +(in `apps/calibration` folder) and could be found in https://github.com/lagadic/visp/tree/master/apps/calibration. \section calib_ext_recommendation Recommendations diff --git a/doc/tutorial/calibration/tutorial-calibration-intrinsic.dox b/doc/tutorial/calibration/tutorial-calibration-intrinsic.dox index 9edf540a8a..a472ec0942 100644 --- a/doc/tutorial/calibration/tutorial-calibration-intrinsic.dox +++ b/doc/tutorial/calibration/tutorial-calibration-intrinsic.dox @@ -3,7 +3,9 @@ \page tutorial-calibration-intrinsic Tutorial: Camera intrinsic calibration \tableofcontents -This tutorial focuses on pinhole camera calibration. The goal of the calibration is here to estimate some camera parameters that allows to make the relation between camera's natural units (pixel positions in the image) and real world units (normalized position in meters in the image plane). +This tutorial focuses on pinhole camera calibration. The goal of the calibration is here to estimate some camera +parameters that allows to make the relation between camera's natural units (pixel positions in the image) and real +world units (normalized position in meters in the image plane). \section calibration_intro Introduction @@ -31,11 +33,15 @@ In ViSP we consider two unit conversions: In this model we consider the parameters \f$(u_0,v_0,p_x,p_y, k_{ud}, k_{du})\f$ where: - \f$(u_0, v_0)\f$ are the coordinates of the principal point in pixel; - \f$(p_x, p_y)\f$ are the ratio between the focal length and the size of a pixel; -- \f$(k_{ud}, k_{du})\f$ are the parameters used to correct the distortion. \f$k_{ud}\f$ is the distortion parameter used to transform the coordinates from \e undistorted to \e distorted images, while \f$k_{du}\f$ is used to transform the coordinates from \e distorted to \e undistorted images. +- \f$(k_{ud}, k_{du})\f$ are the parameters used to correct the distortion. \f$k_{ud}\f$ is the distortion parameter +used to transform the coordinates from \e undistorted to \e distorted images, while \f$k_{du}\f$ is used to transform +the coordinates from \e distorted to \e undistorted images. -\note It may be useful to make a tour in \ref tutorial-bridge-opencv that makes in relation the camera model used in ViSP with the one proposed by OpenCV. +\note It may be useful to make a tour in \ref tutorial-bridge-opencv that makes in relation the camera model used in +ViSP with the one proposed by OpenCV. -\note Note also that the container dedicated to camera parameters is implemented in the vpCameraParameters class. It allows to consider two kind of models; with or without distortion. +\note Note also that the container dedicated to camera parameters is implemented in the vpCameraParameters class. +It allows to consider two kind of models; with or without distortion. The calibration process allows to estimate the values of these parameters. @@ -44,9 +50,11 @@ The calibration process allows to estimate the values of these parameters. The following recommendation should be taken into account to obtain good results: - Use a rigid planar calibration board, a printed sheet is not ideal - Adjust the lens to get focused images and adjust the camera parameters to have good illumination -- Avoid perpendicular images (in other terms image plane parallel to the calibration grid) since there is an ambiguity between the camera position and the focal lens +- Avoid perpendicular images (in other terms image plane parallel to the calibration grid) since there is an ambiguity + between the camera position and the focal lens - Take oblique images w.r.t. the calibration pattern -- During data acquisition try to fill the images with corners keeping the whole grid in the image. It is very important to have points close to the edges and the corners of the image in order to get a better estimate of the distortion coefficients +- During data acquisition try to fill the images with corners keeping the whole grid in the image. It is very important + to have points close to the edges and the corners of the image in order to get a better estimate of the distortion coefficients - Acquire between 5 and 15 images. There is no need to acquire a lot of images, but rather good poses How to improve calibration accuracy: @@ -64,9 +72,11 @@ How to improve calibration accuracy: 3. Acquire images of the calibration grid -To calibrate your camera you need to take snapshots of one of these two patterns with your camera. At least 5 good snapshots of the input pattern acquired at different positions are requested for good results. +To calibrate your camera you need to take snapshots of one of these two patterns with your camera. At least 5 good +snapshots of the input pattern acquired at different positions are requested for good results. -To this end see \ref tutorial-grabber and use one of the binaries that could be found in `tutorial/grabber` folder to grab single shot images of the grid. +To this end see \ref tutorial-grabber and use one of the binaries that could be found in `tutorial/grabber` folder +to grab single shot images of the grid. For example, with a webcam connected to a laptop running Linux (Ubuntu, Fedora...) use one of the following: \verbatim @@ -118,13 +128,10 @@ In all other cases, try with: \section calibration Calibration \subsection calibration_source_code Source code -All the material (source code and images) described in this tutorial is part of ViSP source code and could be downloaded using the following command: +Note that all the material (source code) described in this tutorial is part of ViSP source code +(in `example/calibration` folder) and could be found in https://github.com/lagadic/visp/tree/master/example/calibration. -\verbatim -$ svn export https://github.com/lagadic/visp.git/trunk/example/calibration -\endverbatim - -The calibration tool is available in \c calibrate_camera.cpp located in \c example/calibration folder. +The calibration tool is available in \c calibrate_camera.cpp located in \c apps/calibration folder. We will not describe in detail the source, but just mention that: - the image processing is performed using OpenCV; diff --git a/doc/tutorial/detection/tutorial-detection-apriltag.dox b/doc/tutorial/detection/tutorial-detection-apriltag.dox index b83418e360..1595c2f81d 100644 --- a/doc/tutorial/detection/tutorial-detection-apriltag.dox +++ b/doc/tutorial/detection/tutorial-detection-apriltag.dox @@ -5,19 +5,22 @@ \section intro_apriltag Introduction -This tutorial shows how to detect one or more AprilTag marker with ViSP. To this end, we provide vpDetectorAprilTag class that is a wrapper over Apriltag 3rd party library. Notice that there is no need to install this 3rd party, since AprilTag source code is embedded in ViSP. +This tutorial shows how to detect one or more AprilTag marker with ViSP. To this end, we provide vpDetectorAprilTag +class that is a wrapper over Apriltag 3rd party +library. Notice that there is no need to install this 3rd party, since AprilTag source code is embedded in ViSP. -The vpDetectorAprilTag class inherits from vpDetectorBase class, a generic class dedicated to detection. For each detected tag, it allows retrieving some characteristics such as the tag id, and in the image, the polygon that contains the tag and corresponds to its 4 corner coordinates, the bounding box and the center of gravity of the tag. +The vpDetectorAprilTag class inherits from vpDetectorBase class, a generic class dedicated to detection. For each +detected tag, it allows retrieving some characteristics such as the tag id, and in the image, the polygon that contains +the tag and corresponds to its 4 corner coordinates, the bounding box and the center of gravity of the tag. -Moreover, vpDetectorAprilTag class allows estimating the 3D pose of the tag. To this end, the camera parameters as well as the size of the tag are required. +Moreover, vpDetectorAprilTag class allows estimating the 3D pose of the tag. To this end, the camera parameters as well +as the size of the tag are required. -In the next sections you will find examples that show how to detect tags in a single image or in images acquired from a camera connected to your computer. +In the next sections you will find examples that show how to detect tags in a single image or in images acquired from a +camera connected to your computer. -Note that all the material (source code and image) described in this tutorial is part of ViSP source code and could be downloaded using the following command: - -\code -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/detection/tag -\endcode +Note that all the material (source code and image) described in this tutorial is part of ViSP source code +(in `tutorial/detection/tag` folder) and could be found in https://github.com/lagadic/visp/tree/master/tutorial/detection/tag. \section apriltag_detection_print Print an AprilTag marker @@ -63,7 +66,7 @@ You will get the following result: \image html img-apriltag-image.png -After a user click in the image, you will get the following image where the frames correspond to the 3D pose of each tag. +After a user click in the image, you will get the following image where the frames correspond to the 3D pose of each tag. \image html img-apriltag-pose.png Now we explain the main lines of the source. @@ -71,7 +74,7 @@ Now we explain the main lines of the source. First we have to include the header corresponding to vpDetectorAprilTag class that allows detecting one or multiple tags. \snippet tutorial-apriltag-detector.cpp Include -Then in the \c main() function before going further we need to check if ViSP was built with AprilTag 3rd party. We also check if ViSP is able to display images using either X11, or the Graphical Device Interface (GDI) under Windows, or OpenCV. +Then in the \c main() function before going further we need to check if ViSP was built with AprilTag 3rd party. We also check if ViSP is able to display images using either X11, or the Graphical Device Interface (GDI) under Windows, or OpenCV. \snippet tutorial-apriltag-detector.cpp Macro defined @@ -82,7 +85,7 @@ After reading the input image \c AprilTag.pgm and the creation of a display devi Then we are applying some settings. There is especially vpDetectorAprilTag::setAprilTagQuadDecimate() function that could be used to decimate the input image in order to speed-up the detection. \snippet tutorial-apriltag-detector.cpp AprilTag detector settings - + We are now ready to detect any 36h11 tags in the image. There is the vpDetectorAprilTag::detect(const vpImage &) function that detects any tags in the image, but since here we want also to estimate the 3D pose of the tags, we call rather vpDetectorAprilTag::detect(const vpImage &, const double, const vpCameraParameters &, std::vector &) that returns the pose of each tag as a vector of vpHomogeneousMatrix in \c cMo_vec variable. \snippet tutorial-apriltag-detector.cpp Detect and compute pose @@ -125,7 +128,7 @@ $ ./tutorial-apriltag-detector-live --tag_family 0 --input 1 \endcode The source code of this example is very similar to the previous one except that here we use camera framegrabber devices (see \ref tutorial-grabber). Two different grabber may be used: -- If ViSP was built with Video For Linux (V4L2) support available for example on Fedora or Ubuntu distribution, VISP_HAVE_V4L2 macro is defined. In that case, images coming from an USB camera are acquired using vpV4l2Grabber class. +- If ViSP was built with Video For Linux (V4L2) support available for example on Fedora or Ubuntu distribution, VISP_HAVE_V4L2 macro is defined. In that case, images coming from an USB camera are acquired using vpV4l2Grabber class. - If ViSP wasn't built with V4L2 support but with OpenCV, we use cv::VideoCapture class to grab the images. Notice that when images are acquired with OpenCV there is an additional conversion from cv::Mat to vpImage. \snippet tutorial-apriltag-detector-live.cpp Construct grabber diff --git a/doc/tutorial/detection/tutorial-detection-barcode.dox b/doc/tutorial/detection/tutorial-detection-barcode.dox index b8bf84e8a0..d98f752de9 100644 --- a/doc/tutorial/detection/tutorial-detection-barcode.dox +++ b/doc/tutorial/detection/tutorial-detection-barcode.dox @@ -5,31 +5,39 @@ \section intro_barcode Introduction -This tutorial shows how to detect one or more barcodes with ViSP. To this end we provide two classes that are wrapper over 3rd party libraries: -- vpDetectorQRCode that is a wrappers over zbar library . It allows to detect QR codes. See zbar quick installation guide. -- vpDetectorDataMatrixCode this is a wrapper over dmtx library . It allows to detect Data Matrix codes. See dmtx quick installation guide. +This tutorial shows how to detect one or more barcodes with ViSP. To this end we provide two classes that are wrapper +over 3rd party libraries: +- vpDetectorQRCode that is a wrappers over zbar library . It allows to detect QR codes. +See zbar quick installation guide. +- vpDetectorDataMatrixCode this is a wrapper over dmtx library . It allows to detect Data +Matrix codes. See dmtx quick installation guide. -These classes inherit from vpDetectorBase class, a generic class dedicated to detection. For each detected bar code, it allows to retrieve some characteristics such as the bar code message if it exists, and in the image the polygon that contains the bar code, the bounding box or the center of gravity of the bar code. +These classes inherit from vpDetectorBase class, a generic class dedicated to detection. For each detected bar code, +it allows to retrieve some characteristics such as the bar code message if it exists, and in the image the polygon that +contains the bar code, the bounding box or the center of gravity of the bar code. -In the next sections you will find examples that show how to detect codes in a single image and or in images acquired from a camera connected to your computer. +In the next sections you will find examples that show how to detect codes in a single image and or in images acquired +from a camera connected to your computer. -Note that all the material (source code and image) described in this tutorial is part of ViSP source code and could be downloaded using the following command: - -\code -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/detection/barcode -\endcode +Note that all the material (source code and image) described in this tutorial is part of ViSP source code +(in `tutorial/detection/barcode` folder) and could be found in https://github.com/lagadic/visp/tree/master/tutorial/detection/barcode. \subsection barcode_detection_generator_qrcode Print your QR code -To generate and print your own QR code compatible with vpDetectorQRCode use a QR code generator like: https://www.qr-code-generator.com/. Download the corresponding image and use your favorite tool like Gimp to rescale the image without interpolation if needed. +To generate and print your own QR code compatible with vpDetectorQRCode use a QR code generator like: +https://www.qr-code-generator.com/. Download the corresponding image and use your favorite tool like Gimp to rescale +the image without interpolation if needed. \subsection barcode_detection_generator_datamatrix Print your DataMatrix code -To generate and print your own DataMatrix code compatible with vpDetectorDataMatrixCode use a DataMatrix code generator like: http://datamatrix.kaywa.com/. Save the corresponding image and use your favorite tool like Gimp to rescale the image without interpolation if needed. +To generate and print your own DataMatrix code compatible with vpDetectorDataMatrixCode use a DataMatrix code generator +like: http://datamatrix.kaywa.com/. Save the corresponding image and use your favorite tool like Gimp to rescale the +image without interpolation if needed. \section barcode_detection_basic Bar code detection (single image) -The following example also available in tutorial-barcode-detector.cpp allows to detect either a QR code or Data Matrix on a single image. The user can select with `--code-type` option which code to detect. +The following example also available in tutorial-barcode-detector.cpp allows to detect either a QR code or Data Matrix +on a single image. The user can select with `--code-type` option which code to detect. \subsection barcode_detection_basic_src Source code explained @@ -42,18 +50,22 @@ Now we explain the main lines of the source. First we have to include the header of the two classes that allow to detect either the QR code or the Data Matrix code. \snippet tutorial-barcode-detector.cpp Include -Then in the main() function before going further we need to check if at least one of the third party (zbar or dmtx libraries) were used to build ViSP. We also check if ViSP is able to display images using either X11, or the Graphical Device Interface (GDI) under Windows, or OpenCV. +Then in the main() function before going further we need to check if at least one of the third party (zbar or dmtx +libraries) were used to build ViSP. We also check if ViSP is able to display images using either X11, or the Graphical +Device Interface (GDI) under Windows, or OpenCV. \snippet tutorial-barcode-detector.cpp Macro defined -After reading the input image \e bar-code.pgm and creation of a display device in order to visualize the image, we create a NULL pointer to a detector base class. +After reading the input image \e bar-code.pgm and creation of a display device in order to visualize the image, we +create a NULL pointer to a detector base class. -\snippet tutorial-barcode-detector.cpp Create base detector +\snippet tutorial-barcode-detector.cpp Create base detector -Since the classes that allow to detect QR codes and Data Matrix codes inherit from vpDetectorBase, using the variable \e opt_barcode we are able to construct the requested detector. +Since the classes that allow to detect QR codes and Data Matrix codes inherit from vpDetectorBase, using the variable +\e opt_barcode we are able to construct the requested detector. \snippet tutorial-barcode-detector.cpp Create detector - + We are now ready to detect the bar code in the image. \snippet tutorial-barcode-detector.cpp Detection @@ -63,7 +75,8 @@ In that case, we can retrieve the number of detected bar codes in order to creat \snippet tutorial-barcode-detector.cpp Parse detected codes -For each code, we can then get the location of the 4 points that define the polygon that contains the code, but also the bounding box. +For each code, we can then get the location of the 4 points that define the polygon that contains the code, but also +the bounding box. \snippet tutorial-barcode-detector.cpp Get location @@ -91,7 +104,8 @@ You will get the following result: \section barcode_detection_live Bar code detection (live camera) -This other example also available in tutorial-barcode-detector-live.cpp shows how to couple the bar code detector to an image grabber in order to detect bar codes on each new image acquired by a camera connected to your computer. +This other example also available in tutorial-barcode-detector-live.cpp shows how to couple the bar code detector to +an image grabber in order to detect bar codes on each new image acquired by a camera connected to your computer. \subsection barcode_detection_live_src Source code explained @@ -99,9 +113,12 @@ Below you will find tutorial-barcode-detector-live.cpp source code. \include tutorial-barcode-detector-live.cpp -The source code of this example is very similar to the previous one except that here we use camera framegrabber devices (see \ref tutorial-grabber). Two different grabber may be used: -- If ViSP was build with Video For Linux (V4L2) support available for example on Fedora or Ubuntu distribution, VISP_HAVE_V4L2 macro is defined. In that case, images coming from an USB camera are acquired using vpV4l2Grabber class. -- If ViSP wasn't build with V4L2 support, but with OpenCV we use cv::VideoCapture class to grab the images. Notice that when images are acquired with OpenCV there is an additional conversion from cv::Mat to vpImage. +The source code of this example is very similar to the previous one except that here we use camera framegrabber devices +(see \ref tutorial-grabber). Two different grabber may be used: +- If ViSP was build with Video For Linux (V4L2) support available for example on Fedora or Ubuntu distribution, + VISP_HAVE_V4L2 macro is defined. In that case, images coming from an USB camera are acquired using vpV4l2Grabber class. +- If ViSP wasn't build with V4L2 support, but with OpenCV we use cv::VideoCapture class to grab the images. Notice that + when images are acquired with OpenCV there is an additional conversion from cv::Mat to vpImage. \snippet tutorial-barcode-detector-live.cpp Construct grabber @@ -113,8 +130,10 @@ This new image is then given as input to the bar code detector. \subsection barcode_detection_live_run Run binary The usage of this example is similar to the previous one: -- with option `--code-type` you select if you want to detect a QR code (use `--code-type 0`) or a Data Matrix (use `--code-type 1`). -- if more than one camera is connected to you computer, with option `--device` you can select which camera to use. The first camera that is found has number 0. +- with option `--code-type` you select if you want to detect a QR code (use `--code-type 0`) or a Data Matrix + (use `--code-type 1`). +- if more than one camera is connected to you computer, with option `--device` you can select which camera to use. + The first camera that is found has number 0. To detect QR codes on images acquired by a second camera connected to your computer use: \code @@ -123,9 +142,11 @@ $ ./tutorial-barcode-detector-live --code-type 0 --device 1 \section barcode_detection_next Next tutorial -Now you can follow \ref tutorial-pose-estimation that explains how to compute a 3D pose from points. This tutorial could be useful if you want to compute the pose of a QR code from it’s 4 corner coordinates in the image that could be retrieved using getPolygon(), it’s 3D size and camera parameters. - -You are now also ready to see \ref tutorial-detection-apriltag, that illustrates how to detect AprilTag patterns in an image and compute the 3D pose of each pattern. +Now you can follow \ref tutorial-pose-estimation that explains how to compute a 3D pose from points. This tutorial +could be useful if you want to compute the pose of a QR code from it’s 4 corner coordinates in the image that could be +retrieved using getPolygon(), it’s 3D size and camera parameters. +You are now also ready to see \ref tutorial-detection-apriltag, that illustrates how to detect AprilTag patterns in an +image and compute the 3D pose of each pattern. */ diff --git a/doc/tutorial/detection/tutorial-detection-face.dox b/doc/tutorial/detection/tutorial-detection-face.dox index 53672be077..8d65e181e9 100644 --- a/doc/tutorial/detection/tutorial-detection-face.dox +++ b/doc/tutorial/detection/tutorial-detection-face.dox @@ -5,20 +5,20 @@ \section intro_face Introduction -This tutorial shows how to detect one or more faces with ViSP. Face detection is performed using OpenCV Haar cascade capabilities that are used in vpDetectorFace class. At least OpenCV 2.2.0 or a more recent version is requested. +This tutorial shows how to detect one or more faces with ViSP. Face detection is performed using OpenCV Haar cascade +capabilities that are used in vpDetectorFace class. At least OpenCV 2.2.0 or a more recent version is requested. -In the next sections you will find examples that show how to detect faces in a video, or in images acquired by a camera connected to your computer. - -Note that all the material (source code and video) described in this tutorial is part of ViSP source code and could be downloaded using the following command: - -\code -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/detection/face -\endcode +In the next sections you will find examples that show how to detect faces in a video, or in images acquired by a camera +connected to your computer. +Note that all the material (source code and image) described in this tutorial is part of ViSP source code +(in `tutorial/detection/face` folder) and could be found in https://github.com/lagadic/visp/tree/master/tutorial/detection/face. \section face_detection_video Face detection in a video -The following example also available in tutorial-face-detector.cpp allows to detect faces in an mpeg video located near the source code. The Haar cascade classifier file requested by OpenCV is also provided in the same folder as the source code. +The following example also available in tutorial-face-detector.cpp allows to detect faces in an mpeg video located near +the source code. The Haar cascade classifier file requested by OpenCV is also provided in the same folder as the source +code. \include tutorial-face-detector.cpp @@ -37,7 +37,7 @@ Now we explain the main lines of the source. First we have to include the header of the class that allows to detect a face. \snippet tutorial-face-detector.cpp Include -Then in the main() function before going further we need to check if OpenCV 2.2.0 is available. +Then in the main() function before going further we need to check if OpenCV 2.2.0 is available. \snippet tutorial-face-detector.cpp Macro defined @@ -54,13 +54,15 @@ $ ./tutorial-face-detector --help Usage: ./tutorial-face-detector [--haar ] [--video ] [--help] \endcode -Then we open the video stream, create a windows named "ViSP viewer" where images and the resulting face detection will be displayed. - +Then we open the video stream, create a windows named "ViSP viewer" where images and the resulting face detection will +be displayed. + The creation of the face detector is performed using \snippet tutorial-face-detector.cpp Face detector construction - -We need also to set the location and name of the xml file that contains the Haar cascade classifier data used to recognized a face. + +We need also to set the location and name of the xml file that contains the Haar cascade classifier data used to +recognized a face. \snippet tutorial-face-detector.cpp Face detector setting @@ -68,37 +70,46 @@ Then we enter in the while loop where for each new image, the try to detect one \snippet tutorial-face-detector.cpp Face detection -If a face is detected, vpDetectorFace::detect() returns true. It is then possible to retrieve the number of faces that are detected: +If a face is detected, vpDetectorFace::detect() returns true. It is then possible to retrieve the number of faces that +are detected: \snippet tutorial-face-detector.cpp Get number faces -For each face, we have access to its location using vpDetectorFace::getPolygon(), its bounding box using vpDetectorFace::getBBox() and its identifier message using vpDetectorFace::getMessage(). +For each face, we have access to its location using vpDetectorFace::getPolygon(), its bounding box using +vpDetectorFace::getBBox() and its identifier message using vpDetectorFace::getMessage(). \snippet tutorial-face-detector.cpp Get face characteristics -\note When more than one face is detected, faces are ordered from the largest to the smallest. That means that vpDetectorFace::getPolygon(0), vpDetectorFace::getBBox(0) and vpDetectorFace::getMessage(0) return always the characteristics of the largest face. +\note When more than one face is detected, faces are ordered from the largest to the smallest. That means that +vpDetectorFace::getPolygon(0), vpDetectorFace::getBBox(0) and vpDetectorFace::getMessage(0) return always the +characteristics of the largest face. \section face_detection_live Face detection from a camera -This other example also available in tutorial-face-detector-live.cpp shows how to detect one or more faces in images acquired by a camera connected to your computer. +This other example also available in tutorial-face-detector-live.cpp shows how to detect one or more faces in +images acquired by a camera connected to your computer. \include tutorial-face-detector-live.cpp -The usage of this example is similar to the previous one. Just run +The usage of this example is similar to the previous one. Just run \code $ ./tutorial-face-detector-live \endcode -Additional command line options are available to specify the location of the Haar cascade file and also the camera identifier if more than one camera is connected to your computer: +Additional command line options are available to specify the location of the Haar cascade file and also the camera +identifier if more than one camera is connected to your computer: \code $ ./tutorial-face-detector-live --help Usage: ./tutorial-face-detector-live [--device ] [--haar ] [--help] \endcode -The source code of this example is very similar to the previous one except that here we use camera framegrabber devices (see \ref tutorial-grabber). Two different grabber may be used: -- If ViSP was build with Video For Linux (V4L2) support available for example on Fedora or Ubuntu distribution, VISP_HAVE_V4L2 macro is defined. In that case, images coming from an USB camera are acquired using vpV4l2Grabber class. -- If ViSP wasn't build with V4L2 support, but with OpenCV we use cv::VideoCapture class to grab the images. Notice that when images are acquired with OpenCV there is an additional conversion from cv::Mat to vpImage. +The source code of this example is very similar to the previous one except that here we use camera framegrabber devices +(see \ref tutorial-grabber). Two different grabber may be used: +- If ViSP was build with Video For Linux (V4L2) support available for example on Fedora or Ubuntu distribution, + VISP_HAVE_V4L2 macro is defined. In that case, images coming from an USB camera are acquired using vpV4l2Grabber class. +- If ViSP wasn't build with V4L2 support, but with OpenCV we use cv::VideoCapture class to grab the images. Notice that + when images are acquired with OpenCV there is an additional conversion from cv::Mat to vpImage. \snippet tutorial-face-detector-live.cpp Construct grabber @@ -109,6 +120,6 @@ This new image is then given as input to the face detector. \section face_detection_next Next tutorial -You are now ready to see the \ref tutorial-multi-threading, that illustrates the case of face detection achieved in a separate thread. +You are now ready to see the \ref tutorial-detection-object, that illustrates the case of object detection. */ diff --git a/doc/tutorial/detection/tutorial-detection-object-deprecated.dox b/doc/tutorial/detection/tutorial-detection-object-deprecated.dox index 4611967283..f3c2214c8d 100644 --- a/doc/tutorial/detection/tutorial-detection-object-deprecated.dox +++ b/doc/tutorial/detection/tutorial-detection-object-deprecated.dox @@ -7,21 +7,23 @@ We recommend rather to follow \ref tutorial-detection-object that is based on vpMbGenericTracker. -This tutorial will show you how to use keypoints to detect and estimate the pose of a known object using his cad model. The first step consists in detecting and learning keypoints located on the faces of an object, while the second step makes the matching between the detected keypoints in the query image with those previously learned. The pair of matches are then used to estimate the pose of the object with the knowledge of the correspondences between the 2D and 3D coordinates. +This tutorial will show you how to use keypoints to detect and estimate the pose of a known object using his cad model. +The first step consists in detecting and learning keypoints located on the faces of an object, while the second step +makes the matching between the detected keypoints in the query image with those previously learned. The pair of matches +are then used to estimate the pose of the object with the knowledge of the correspondences between the 2D and 3D +coordinates. The next section presents a basic example of the detection of a teabox with a detailed description of the different steps. -Note that all the material (source code and video) described in this tutorial is part of ViSP source code and could be downloaded using the following command: - -\code -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/detection/object -\endcode +Note that all the material (source code and image) described in this tutorial is part of ViSP source code +(in `tutorial/detection/object` folder) and could be found in https://github.com/lagadic/visp/tree/master/tutorial/detection/object. \section dep_object_detection Object detection using keypoints \subsection dep_detection_object_preamble Preamble -You are advised to read the following tutorials \ref tutorial-tracking-mb-deprecated and \ref tutorial-matching if you are not aware of these concepts. +You are advised to read the following tutorials \ref tutorial-tracking-mb-deprecated and \ref tutorial-matching if you +are not aware of these concepts. \subsection dep_detection_object_principle Principle of object detection using keypoints @@ -29,38 +31,52 @@ A quick overview of the principle is summed-up in the following diagrams. \image html img-learning-step.jpeg Learning step. -The first part of the process consists in learning the characteristics of the considered object by extracting the keypoints detected on the different faces. -We use here the model-based tracker initialized given a known initial pose to have access to the cad model of the object. The cad model is then used to select only keypoints on faces that are visible and to calculate the 3D coordinates of keypoints. +The first part of the process consists in learning the characteristics of the considered object by extracting the +keypoints detected on the different faces. +We use here the model-based tracker initialized given a known initial pose to have access to the cad model of the +object. The cad model is then used to select only keypoints on faces that are visible and to calculate the 3D +coordinates of keypoints. -\note The calculation of the 3D coordinates of a keypoint is based on a planar location hypothesis. We assume that the keypoint is located on a planar face and the Z-coordinate is retrieved according to the proportional relation between the plane equation expressed in the normalized camera frame (derived from the image coordinate) and the same plane equation expressed in the camera frame, thanks to the known pose of the object. +\note The calculation of the 3D coordinates of a keypoint is based on a planar location hypothesis. We assume that +the keypoint is located on a planar face and the Z-coordinate is retrieved according to the proportional relation +between the plane equation expressed in the normalized camera frame (derived from the image coordinate) and the same +plane equation expressed in the camera frame, thanks to the known pose of the object. -In this example the learned data (the list of 3D coordinates and the corresponding descriptors) are saved in a file and will be used later in the detection part. +In this example the learned data (the list of 3D coordinates and the corresponding descriptors) are saved in a file +and will be used later in the detection part. \image html img-detection-step.jpeg Detection step. -In a query image where we want to detect the object, we find the matches between the keypoints detected in the current image with those previously learned. +In a query image where we want to detect the object, we find the matches between the keypoints detected in the current +image with those previously learned. The estimation of the pose of the object can then be computed with the 3D/2D information. The next section presents an example of the detection and the pose estimation of a teabox. \subsection dep_detection_object_teabox_example Teabox detection and pose estimation -The following video shows the resulting detection and localization of a teabox that is learned on the first image of the video. +The following video shows the resulting detection and localization of a teabox that is learned on the first image of +the video. \htmlonly \endhtmlonly -The corresponding code is available in tutorial-detection-object-mbt-deprecated.cpp. It contains the different steps to learn the teabox object on one image (the first image of the video) and then detect and get the pose of the teabox in the rest of the video. +The corresponding code is available in tutorial-detection-object-mbt-deprecated.cpp. It contains the different steps to +learn the teabox object on one image (the first image of the video) and then detect and get the pose of the teabox in +the rest of the video. \include tutorial-detection-object-mbt-deprecated.cpp -You may recognize with the following lines the code used in \ref tutorial-mb-edge-tracker.cpp to initialize the model-based tracker at a given pose and with the appropriate configuration. +You may recognize with the following lines the code used in \ref tutorial-mb-edge-tracker.cpp to initialize the +model-based tracker at a given pose and with the appropriate configuration. \snippet tutorial-detection-object-mbt-deprecated.cpp MBT code The modifications made to the code start from now. -First, we have to choose about which type of keypoints will be used. SIFT keypoints are a widely type of keypoints used in computer vision, but depending of your version of OpenCV and due to some patents, certain types of keypoints will not be available. +First, we have to choose about which type of keypoints will be used. SIFT keypoints are a widely type of keypoints +used in computer vision, but depending of your version of OpenCV and due to some patents, certain types of keypoints +will not be available. Here, we will use SIFT if available, otherwise a combination of FAST keypoint detector and ORB descriptor extractor. \snippet tutorial-detection-object-mbt-deprecated.cpp Keypoint selection @@ -69,7 +85,8 @@ The following line declares an instance of the vpKeyPoint class : \snippet tutorial-detection-object-mbt-deprecated.cpp Keypoint declaration -You can load the configuration (type of detector, extractor, matcher, ransac pose estimation parameters) directly with an xml configuration file : +You can load the configuration (type of detector, extractor, matcher, ransac pose estimation parameters) directly with +an xml configuration file : \snippet tutorial-detection-object-mbt-deprecated.cpp Keypoint xml config @@ -81,15 +98,19 @@ We then detect keypoints in the reference image with the object we want to learn \snippet tutorial-detection-object-mbt-deprecated.cpp Keypoints reference detection -But we need to keep keypoints only on faces of the teabox. This is done by using the model-based tracker to first eliminate keypoints which do not belong to the teabox and secondly to have the plane equation for each faces (and so to be able to compute the 3D coordinate from the 2D information). +But we need to keep keypoints only on faces of the teabox. This is done by using the model-based tracker to first +eliminate keypoints which do not belong to the teabox and secondly to have the plane equation for each faces (and so +to be able to compute the 3D coordinate from the 2D information). \snippet tutorial-detection-object-mbt-deprecated.cpp Keypoints selection on faces -The next step is the building of the reference keypoints. The descriptors for each keypoints are also extracted and the reference data consist of the lists of keypoints / descriptors and the list of 3D points. +The next step is the building of the reference keypoints. The descriptors for each keypoints are also extracted and +the reference data consist of the lists of keypoints / descriptors and the list of 3D points. \snippet tutorial-detection-object-mbt-deprecated.cpp Keypoints build reference -We save the learning data in a binary format (the other possibilitie is to save in an xml format but which takes more space) to be able to use it later. +We save the learning data in a binary format (the other possibilitie is to save in an xml format but which takes more +space) to be able to use it later. \snippet tutorial-detection-object-mbt-deprecated.cpp Save learning data @@ -97,7 +118,8 @@ We then visualize the result of the learning process by displaying with a cross \snippet tutorial-detection-object-mbt-deprecated.cpp Display reference keypoints -We declare now another instance of the vpKeyPoint class dedicated this time to the detection of the teabox. The configuration is directly loaded from an xml file, otherwise this is done directly in the code. +We declare now another instance of the vpKeyPoint class dedicated this time to the detection of the teabox. The +configuration is directly loaded from an xml file, otherwise this is done directly in the code. \snippet tutorial-detection-object-mbt-deprecated.cpp Init keypoint detection @@ -105,35 +127,50 @@ The previously saved binary file corresponding to the teabox learning data is lo \snippet tutorial-detection-object-mbt-deprecated.cpp Load teabox learning data -We are now ready to detect the teabox in a query image. The call to the function vpKeyPoint::matchPoint() returns true if the matching was successful and permits to get the estimated homogeneous matrix corresponding to the pose of the object. The reprojection error is also computed. +We are now ready to detect the teabox in a query image. The call to the function vpKeyPoint::matchPoint() returns true +if the matching was successful and permits to get the estimated homogeneous matrix corresponding to the pose of the +object. The reprojection error is also computed. \snippet tutorial-detection-object-mbt-deprecated.cpp Matching and pose estimation -In order to display the result, we use the tracker initialized at the estimated pose and we display also the location of the world frame: +In order to display the result, we use the tracker initialized at the estimated pose and we display also the location +of the world frame: \snippet tutorial-detection-object-mbt-deprecated.cpp Tracker set pose \snippet tutorial-detection-object-mbt-deprecated.cpp Display -The pose of the detected object can then be used to initialize a tracker automatically rather then using a human initialization; see \ref tutorial-tracking-mb-deprecated and \ref tutorial-tracking-tt. +The pose of the detected object can then be used to initialize a tracker automatically rather then using a human +initialization; see \ref tutorial-tracking-mb-deprecated and \ref tutorial-tracking-tt. \subsection dep_detection_object_quick_explanation Quick explanation about some parameters used in the example -The content of the configuration file named detection-config-SIFT.xml and provided with this example is described in the following lines : +The content of the configuration file named detection-config-SIFT.xml and provided with this example is described in +the following lines : \include detection-config-SIFT.xml In this configuration file, SIFT keypoints are used. Let us explain now the configuration of the matcher: -- a brute force matching will explore all the possible solutions to match a considered keypoints detected in the current image to the closest (in descriptor distance term) one in the reference set, contrary to the other type of matching using the library FLANN (Fast Library for Approximate Nearest Neighbors) which contains some optimizations to reduce the complexity of the solution set, -- to eliminate some possible false matching, one technique consists of keeping only the keypoints whose are sufficienly discriminated using a ratio test. +- a brute force matching will explore all the possible solutions to match a considered keypoints detected in the + current image to the closest (in descriptor distance term) one in the reference set, contrary to the other type of + matching using the library FLANN (Fast Library for Approximate Nearest Neighbors) which contains some optimizations to + reduce the complexity of the solution set, +- to eliminate some possible false matching, one technique consists of keeping only the keypoints whose are sufficienly + discriminated using a ratio test. Now, for the Ransac pose estimation part : -- two methods are provided to estimate the pose in a robust way: one using OpenCV, the other method uses a virtual visual servoing approach using ViSP, -- basically, a Ransac method is composed of two steps repeated a certain number of iterations: first we pick randomly 4 points and estimate the pose, the second step is to keep all points which sufficienly "agree" (the reprojection error is below a threshold) with the pose determinated in the first step. These points are inliers and form the consensus set, the other are outliers. -If enough points are in the consensus set (here 20 % of all the points), the pose is refined and returned, otherwise another iteration is made (here 200 iterations maximum). - -Below you will also find the content of detection-lconfig.xml configuration file, also provided in this example. It allows to use FAST detector and ORB extractor. +- two methods are provided to estimate the pose in a robust way: one using OpenCV, the other method uses a virtual + visual servoing approach using ViSP, +- basically, a Ransac method is composed of two steps repeated a certain number of iterations: first we pick randomly + 4 points and estimate the pose, the second step is to keep all points which sufficienly "agree" (the reprojection error + is below a threshold) with the pose determinated in the first step. These points are inliers and form the consensus + set, the other are outliers. +If enough points are in the consensus set (here 20 % of all the points), the pose is refined and returned, otherwise +another iteration is made (here 200 iterations maximum). + +Below you will also find the content of detection-lconfig.xml configuration file, also provided in this example. +It allows to use FAST detector and ORB extractor. \include detection-config.xml @@ -141,15 +178,18 @@ Below you will also find the content of detection-lconfig.xml configuration file \subsection dep_detection_object_multiple_learning How to learn keypoints from multiple images -The following video shows an extension of the previous example where here we learn a cube from 3 images and then detect an localize the cube in all the images of the video. +The following video shows an extension of the previous example where here we learn a cube from 3 images and then detect +an localize the cube in all the images of the video. \htmlonly \endhtmlonly -The corresponding source code is given in tutorial-detection-object-mbt2-deprecated.cpp. If you have a look on this file you will find the following. +The corresponding source code is given in tutorial-detection-object-mbt2-deprecated.cpp. If you have a look on this +file you will find the following. -Before starting with the keypoints detection and learning part, we have to set the correct pose for the tracker using a predefined pose: +Before starting with the keypoints detection and learning part, we have to set the correct pose for the tracker using +a predefined pose: \snippet tutorial-detection-object-mbt2-deprecated.cpp Set tracker pose @@ -157,9 +197,11 @@ One good thing to do is to refine the pose by running one iteration of the model \snippet tutorial-detection-object-mbt2-deprecated.cpp Refine pose -The vpKeyPoint::buildReference() allows to append the current detected keypoints with those already present by setting the function parameter append to true. +The vpKeyPoint::buildReference() allows to append the current detected keypoints with those already present by setting +the function parameter append to true. -But before that, the same learning procedure must be done in order to train on multiple images. We detect keypoints on the desired image: +But before that, the same learning procedure must be done in order to train on multiple images. We detect keypoints on +the desired image: \snippet tutorial-detection-object-mbt2-deprecated.cpp Keypoints reference detection @@ -167,21 +209,27 @@ Then, we keep only keypoints that are located on the object faces: \snippet tutorial-detection-object-mbt2-deprecated.cpp Keypoints selection on faces -And finally, we build the reference keypoints and we set the flag append to true to say that we want to keep the previously learned keypoints: +And finally, we build the reference keypoints and we set the flag append to true to say that we want to keep the +previously learned keypoints: \snippet tutorial-detection-object-mbt2-deprecated.cpp Keypoints build reference -\subsection dep_detection_object_display_multiple_images How to display the matching when the learning is done on multiple images +\subsection dep_detection_object_display_multiple_images How to display the matching when the learning is done on +multiple images -In this section we will explain how to display the matching between keypoints detected in the current image and their correspondences in the reference images that are used during the learning stage, as given in the next video: +In this section we will explain how to display the matching between keypoints detected in the current image and their +correspondences in the reference images that are used during the learning stage, as given in the next video: \htmlonly \endhtmlonly -\warning If you want to load the learning data from a file, you have to use a learning file that contains training images (with the parameter saveTrainingImages vpKeyPoint::saveLearningData() set to true when saving the file, by default it is). +\warning If you want to load the learning data from a file, you have to use a learning file that contains training +images (with the parameter saveTrainingImages vpKeyPoint::saveLearningData() set to true when saving the file, by +default it is). -Before showing how to display the matching for all the training images, we have to attribute an unique identifier (a positive integer) for the set of keypoints learned for a particular image during the training process: +Before showing how to display the matching for all the training images, we have to attribute an unique identifier +(a positive integer) for the set of keypoints learned for a particular image during the training process: \snippet tutorial-detection-object-mbt2-deprecated.cpp Keypoints build reference @@ -213,11 +261,13 @@ The following code shows how to retrieve the RANSAC inliers and outliers: \snippet tutorial-detection-object-mbt2-deprecated.cpp Get RANSAC inliers outliers -Finally, we can also display the model in the matching image. For that, we have to modify the principal point offset of the intrinsic parameter. +Finally, we can also display the model in the matching image. For that, we have to modify the principal point offset +of the intrinsic parameter. This is more or less an hack as you have to manually change the principal point coordinate to make it works. \snippet tutorial-detection-object-mbt2-deprecated.cpp Display model image matching -\note You can refer to the full code in the section \ref detection_object_multiple_learning to have an example of how to learn from multiple images and how to display all the matching. +\note You can refer to the full code in the section \ref detection_object_multiple_learning to have an example of how +to learn from multiple images and how to display all the matching. */ diff --git a/doc/tutorial/detection/tutorial-detection-object.dox b/doc/tutorial/detection/tutorial-detection-object.dox index 74bd74e6fd..c905ccc6ca 100644 --- a/doc/tutorial/detection/tutorial-detection-object.dox +++ b/doc/tutorial/detection/tutorial-detection-object.dox @@ -5,21 +5,24 @@ \section object_detection_intro Introduction -This tutorial will show you how to use keypoints to detect and estimate the pose of a known object using his cad model. The first step consists in detecting and learning keypoints located on the faces of an object, while the second step makes the matching between the detected keypoints in the query image with those previously learned. The pair of matches are then used to estimate the pose of the object with the knowledge of the correspondences between the 2D and 3D coordinates. +This tutorial will show you how to use keypoints to detect and estimate the pose of a known object using his cad model. +The first step consists in detecting and learning keypoints located on the faces of an object, while the second step +makes the matching between the detected keypoints in the query image with those previously learned. The pair of matches +are then used to estimate the pose of the object with the knowledge of the correspondences between the 2D and 3D +coordinates. The next section presents a basic example of the detection of a teabox with a detailed description of the different steps. -Note that all the material (source code and video) described in this tutorial is part of ViSP source code and could be downloaded using the following command: +Note that all the material (source code and image) described in this tutorial is part of ViSP source code +(in `tutorial/detection/object` folder) and could be found in https://github.com/lagadic/visp/tree/master/tutorial/detection/object. -\code -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/detection/object -\endcode \section object_detection Object detection using keypoints \subsection detection_object_preamble Preamble -You are advised to read the following tutorials \ref tutorial-tracking-mb-generic and \ref tutorial-matching if you are not aware of these concepts. +You are advised to read the following tutorials \ref tutorial-tracking-mb-generic and \ref tutorial-matching if you are +not aware of these concepts. \subsection detection_object_principle Principle of object detection using keypoints @@ -27,38 +30,52 @@ A quick overview of the principle is summed-up in the following diagrams. \image html img-learning-step.jpeg Learning step. -The first part of the process consists in learning the characteristics of the considered object by extracting the keypoints detected on the different faces. -We use here the model-based tracker initialized given a known initial pose to have access to the cad model of the object. The cad model is then used to select only keypoints on faces that are visible and to calculate the 3D coordinates of keypoints. +The first part of the process consists in learning the characteristics of the considered object by extracting the +keypoints detected on the different faces. +We use here the model-based tracker initialized given a known initial pose to have access to the cad model of the +object. The cad model is then used to select only keypoints on faces that are visible and to calculate the 3D +coordinates of keypoints. -\note The calculation of the 3D coordinates of a keypoint is based on a planar location hypothesis. We assume that the keypoint is located on a planar face and the Z-coordinate is retrieved according to the proportional relation between the plane equation expressed in the normalized camera frame (derived from the image coordinate) and the same plane equation expressed in the camera frame, thanks to the known pose of the object. +\note The calculation of the 3D coordinates of a keypoint is based on a planar location hypothesis. We assume that the +keypoint is located on a planar face and the Z-coordinate is retrieved according to the proportional relation between +the plane equation expressed in the normalized camera frame (derived from the image coordinate) and the same plane +equation expressed in the camera frame, thanks to the known pose of the object. -In this example the learned data (the list of 3D coordinates and the corresponding descriptors) are saved in a file and will be used later in the detection part. +In this example the learned data (the list of 3D coordinates and the corresponding descriptors) are saved in a file +and will be used later in the detection part. \image html img-detection-step.jpeg Detection step. -In a query image where we want to detect the object, we find the matches between the keypoints detected in the current image with those previously learned. +In a query image where we want to detect the object, we find the matches between the keypoints detected in the current +image with those previously learned. The estimation of the pose of the object can then be computed with the 3D/2D information. The next section presents an example of the detection and the pose estimation of a teabox. \subsection detection_object_teabox_example Teabox detection and pose estimation -The following video shows the resulting detection and localization of a teabox that is learned on the first image of the video. +The following video shows the resulting detection and localization of a teabox that is learned on the first image of +the video. \htmlonly \endhtmlonly -The corresponding code is available in tutorial-detection-object-mbt.cpp. It contains the different steps to learn the teabox object on one image (the first image of the video) and then detect and get the pose of the teabox in the rest of the video. +The corresponding code is available in tutorial-detection-object-mbt.cpp. It contains the different steps to learn the +teabox object on one image (the first image of the video) and then detect and get the pose of the teabox in the rest of +the video. \include tutorial-detection-object-mbt.cpp -You may recognize with the following lines the code used in \ref tutorial-mb-edge-tracker.cpp to initialize the model-based tracker at a given pose and with the appropriate configuration. +You may recognize with the following lines the code used in \ref tutorial-mb-edge-tracker.cpp to initialize the +model-based tracker at a given pose and with the appropriate configuration. \snippet tutorial-detection-object-mbt.cpp MBT code The modifications made to the code start from now. -First, we have to choose about which type of keypoints will be used. SIFT keypoints are a widely type of keypoints used in computer vision, but depending of your version of OpenCV and due to some patents, certain types of keypoints will not be available. +First, we have to choose about which type of keypoints will be used. SIFT keypoints are a widely type of keypoints used +in computer vision, but depending of your version of OpenCV and due to some patents, certain types of keypoints will +not be available. Here, we will use SIFT if available, otherwise a combination of FAST keypoint detector and ORB descriptor extractor. \snippet tutorial-detection-object-mbt.cpp Keypoint selection @@ -67,7 +84,8 @@ The following line declares an instance of the vpKeyPoint class : \snippet tutorial-detection-object-mbt.cpp Keypoint declaration -You can load the configuration (type of detector, extractor, matcher, ransac pose estimation parameters) directly with an xml configuration file : +You can load the configuration (type of detector, extractor, matcher, ransac pose estimation parameters) directly with +an xml configuration file : \snippet tutorial-detection-object-mbt.cpp Keypoint xml config @@ -79,15 +97,19 @@ We then detect keypoints in the reference image with the object we want to learn \snippet tutorial-detection-object-mbt.cpp Keypoints reference detection -But we need to keep keypoints only on faces of the teabox. This is done by using the model-based tracker to first eliminate keypoints which do not belong to the teabox and secondly to have the plane equation for each faces (and so to be able to compute the 3D coordinate from the 2D information). +But we need to keep keypoints only on faces of the teabox. This is done by using the model-based tracker to first +eliminate keypoints which do not belong to the teabox and secondly to have the plane equation for each faces (and so to +be able to compute the 3D coordinate from the 2D information). \snippet tutorial-detection-object-mbt.cpp Keypoints selection on faces -The next step is the building of the reference keypoints. The descriptors for each keypoints are also extracted and the reference data consist of the lists of keypoints / descriptors and the list of 3D points. +The next step is the building of the reference keypoints. The descriptors for each keypoints are also extracted and the +reference data consist of the lists of keypoints / descriptors and the list of 3D points. \snippet tutorial-detection-object-mbt.cpp Keypoints build reference -We save the learning data in a binary format (the other possibilitie is to save in an xml format but which takes more space) to be able to use it later. +We save the learning data in a binary format (the other possibilitie is to save in an xml format but which takes more +space) to be able to use it later. \snippet tutorial-detection-object-mbt.cpp Save learning data @@ -95,7 +117,8 @@ We then visualize the result of the learning process by displaying with a cross \snippet tutorial-detection-object-mbt.cpp Display reference keypoints -We declare now another instance of the vpKeyPoint class dedicated this time to the detection of the teabox. The configuration is directly loaded from an xml file, otherwise this is done directly in the code. +We declare now another instance of the vpKeyPoint class dedicated this time to the detection of the teabox. The +configuration is directly loaded from an xml file, otherwise this is done directly in the code. \snippet tutorial-detection-object-mbt.cpp Init keypoint detection @@ -103,35 +126,50 @@ The previously saved binary file corresponding to the teabox learning data is lo \snippet tutorial-detection-object-mbt.cpp Load teabox learning data -We are now ready to detect the teabox in a query image. The call to the function vpKeyPoint::matchPoint() returns true if the matching was successful and permits to get the estimated homogeneous matrix corresponding to the pose of the object. The reprojection error is also computed. +We are now ready to detect the teabox in a query image. The call to the function vpKeyPoint::matchPoint() returns true +if the matching was successful and permits to get the estimated homogeneous matrix corresponding to the pose of the +object. The reprojection error is also computed. \snippet tutorial-detection-object-mbt.cpp Matching and pose estimation -In order to display the result, we use the tracker initialized at the estimated pose and we display also the location of the world frame: +In order to display the result, we use the tracker initialized at the estimated pose and we display also the location +of the world frame: \snippet tutorial-detection-object-mbt.cpp Tracker set pose \snippet tutorial-detection-object-mbt.cpp Display -The pose of the detected object can then be used to initialize a tracker automatically rather then using a human initialization; see \ref tutorial-tracking-mb-generic and \ref tutorial-tracking-tt. +The pose of the detected object can then be used to initialize a tracker automatically rather then using a human +initialization; see \ref tutorial-tracking-mb-generic and \ref tutorial-tracking-tt. \subsection detection_object_quick_explanation Quick explanation about some parameters used in the example -The content of the configuration file named detection-config-SIFT.xml and provided with this example is described in the following lines : +The content of the configuration file named detection-config-SIFT.xml and provided with this example is described in +the following lines: \include detection-config-SIFT.xml In this configuration file, SIFT keypoints are used. Let us explain now the configuration of the matcher: -- a brute force matching will explore all the possible solutions to match a considered keypoints detected in the current image to the closest (in descriptor distance term) one in the reference set, contrary to the other type of matching using the library FLANN (Fast Library for Approximate Nearest Neighbors) which contains some optimizations to reduce the complexity of the solution set, -- to eliminate some possible false matching, one technique consists of keeping only the keypoints whose are sufficienly discriminated using a ratio test. +- a brute force matching will explore all the possible solutions to match a considered keypoints detected in the + current image to the closest (in descriptor distance term) one in the reference set, contrary to the other type of + matching using the library FLANN (Fast Library for Approximate Nearest Neighbors) which contains some optimizations + to reduce the complexity of the solution set, +- to eliminate some possible false matching, one technique consists of keeping only the keypoints whose are + sufficienly discriminated using a ratio test. Now, for the Ransac pose estimation part : -- two methods are provided to estimate the pose in a robust way: one using OpenCV, the other method uses a virtual visual servoing approach using ViSP, -- basically, a Ransac method is composed of two steps repeated a certain number of iterations: first we pick randomly 4 points and estimate the pose, the second step is to keep all points which sufficienly "agree" (the reprojection error is below a threshold) with the pose determinated in the first step. These points are inliers and form the consensus set, the other are outliers. -If enough points are in the consensus set (here 20 % of all the points), the pose is refined and returned, otherwise another iteration is made (here 200 iterations maximum). - -Below you will also find the content of detection-lconfig.xml configuration file, also provided in this example. It allows to use FAST detector and ORB extractor. +- two methods are provided to estimate the pose in a robust way: one using OpenCV, the other method uses a virtual + visual servoing approach using ViSP, +- basically, a Ransac method is composed of two steps repeated a certain number of iterations: first we pick randomly + 4 points and estimate the pose, the second step is to keep all points which sufficienly "agree" (the reprojection + error is below a threshold) with the pose determinated in the first step. These points are inliers and form the + consensus set, the other are outliers. +If enough points are in the consensus set (here 20 % of all the points), the pose is refined and returned, otherwise +another iteration is made (here 200 iterations maximum). + +Below you will also find the content of detection-lconfig.xml configuration file, also provided in this example. +It allows to use FAST detector and ORB extractor. \include detection-config.xml @@ -139,15 +177,18 @@ Below you will also find the content of detection-lconfig.xml configuration file \subsection detection_object_multiple_learning How to learn keypoints from multiple images -The following video shows an extension of the previous example where here we learn a cube from 3 images and then detect an localize the cube in all the images of the video. +The following video shows an extension of the previous example where here we learn a cube from 3 images and then detect +an localize the cube in all the images of the video. \htmlonly \endhtmlonly -The corresponding source code is given in tutorial-detection-object-mbt2.cpp. If you have a look on this file you will find the following. +The corresponding source code is given in tutorial-detection-object-mbt2.cpp. If you have a look on this file you will +find the following. -Before starting with the keypoints detection and learning part, we have to set the correct pose for the tracker using a predefined pose: +Before starting with the keypoints detection and learning part, we have to set the correct pose for the tracker using a +predefined pose: \snippet tutorial-detection-object-mbt2.cpp Set tracker pose @@ -155,9 +196,11 @@ One good thing to do is to refine the pose by running one iteration of the model \snippet tutorial-detection-object-mbt2.cpp Refine pose -The vpKeyPoint::buildReference() allows to append the current detected keypoints with those already present by setting the function parameter append to true. +The vpKeyPoint::buildReference() allows to append the current detected keypoints with those already present by setting +the function parameter append to true. -But before that, the same learning procedure must be done in order to train on multiple images. We detect keypoints on the desired image: +But before that, the same learning procedure must be done in order to train on multiple images. We detect keypoints on +the desired image: \snippet tutorial-detection-object-mbt2.cpp Keypoints reference detection @@ -165,21 +208,27 @@ Then, we keep only keypoints that are located on the object faces: \snippet tutorial-detection-object-mbt2.cpp Keypoints selection on faces -And finally, we build the reference keypoints and we set the flag append to true to say that we want to keep the previously learned keypoints: +And finally, we build the reference keypoints and we set the flag append to true to say that we want to keep the +previously learned keypoints: \snippet tutorial-detection-object-mbt2.cpp Keypoints build reference -\subsection detection_object_display_multiple_images How to display the matching when the learning is done on multiple images +\subsection detection_object_display_multiple_images How to display the matching when the learning is done on multiple +images -In this section we will explain how to display the matching between keypoints detected in the current image and their correspondences in the reference images that are used during the learning stage, as given in the next video: +In this section we will explain how to display the matching between keypoints detected in the current image and their +correspondences in the reference images that are used during the learning stage, as given in the next video: \htmlonly \endhtmlonly -\warning If you want to load the learning data from a file, you have to use a learning file that contains training images (with the parameter saveTrainingImages vpKeyPoint::saveLearningData() set to true when saving the file, by default it is). +\warning If you want to load the learning data from a file, you have to use a learning file that contains training +images (with the parameter saveTrainingImages vpKeyPoint::saveLearningData() set to true when saving the file, by +default it is). -Before showing how to display the matching for all the training images, we have to attribute an unique identifier (a positive integer) for the set of keypoints learned for a particular image during the training process: +Before showing how to display the matching for all the training images, we have to attribute an unique identifier +(a positive integer) for the set of keypoints learned for a particular image during the training process: \snippet tutorial-detection-object-mbt2.cpp Keypoints build reference @@ -211,11 +260,13 @@ The following code shows how to retrieve the RANSAC inliers and outliers: \snippet tutorial-detection-object-mbt2.cpp Get RANSAC inliers outliers -Finally, we can also display the model in the matching image. For that, we have to modify the principal point offset of the intrinsic parameter. +Finally, we can also display the model in the matching image. For that, we have to modify the principal point offset +of the intrinsic parameter. This is more or less an hack as you have to manually change the principal point coordinate to make it works. \snippet tutorial-detection-object-mbt2.cpp Display model image matching -\note You can refer to the full code in the section \ref detection_object_multiple_learning to have an example of how to learn from multiple images and how to display all the matching. +\note You can refer to the full code in the section \ref detection_object_multiple_learning to have an example of how +to learn from multiple images and how to display all the matching. */ diff --git a/doc/tutorial/detection/tutorial-matching.dox b/doc/tutorial/detection/tutorial-matching.dox index bc2121d755..99831c4aa2 100644 --- a/doc/tutorial/detection/tutorial-matching.dox +++ b/doc/tutorial/detection/tutorial-matching.dox @@ -5,17 +5,18 @@ \section intro_matching Introduction -This tutorial focuses on keypoints detection and matching. You will learn how to detect keypoints on a reference image considered here as the first image of an mpeg video. Then in the next images of the video, keypoints that match those detected in the reference image are displayed. To leverage keypoints detection and matching capabilities ViSP should be build with OpenCV 3rd party. +This tutorial focuses on keypoints detection and matching. You will learn how to detect keypoints on a reference image +considered here as the first image of an mpeg video. Then in the next images of the video, keypoints that match those +detected in the reference image are displayed. To leverage keypoints detection and matching capabilities ViSP should be +build with OpenCV 3rd party. -Note that all the material (source code and video) described in this tutorial is part of ViSP source code and could be downloaded using the following command: +Note that all the material (source code and image) described in this tutorial is part of ViSP source code +(in `tutorial/detection/matching` folder) and could be found in https://github.com/lagadic/visp/tree/master/tutorial/detection/matching. -\code -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/detection/matching -\endcode +\note This tutorial is adapted if your OpenCV version is equal or greater than 2.1.1 version. -\note This tutorial is adapted if your OpenCV version is equal or greater than 2.1.1 version. - -\note We assume that you are familiar with video framegrabbing described in \ref tutorial-grabber and with the way to display an image in a window described in \ref tutorial-getting-started. +\note We assume that you are familiar with video framegrabbing described in \ref tutorial-grabber and with the way to +display an image in a window described in \ref tutorial-getting-started. \section orb_keypoints ORB keypoints detection and matching @@ -23,7 +24,8 @@ Let us consider the following source code also available in tutorial-matching-ke \include tutorial-matching-keypoint.cpp -Here after is the resulting video. The left image represents the reference image. The right images correspond to the successive images of the input video. All the green lines extremities represent the points that are matched. +Here after is the resulting video. The left image represents the reference image. The right images correspond to the +successive images of the input video. All the green lines extremities represent the points that are matched. \htmlonly @@ -34,28 +36,35 @@ Now, let us explain the lines dedicated to the ORB keypoint detection and matchi First we have to include the header of the vpKeyPoint class that is a wrapper over OpenCV classes. \snippet tutorial-matching-keypoint.cpp Include -Note that this class is only available if ViSP was build with OpenCV. This is ensured by the check of VISP_HAVE_OPENCV_VERSION macro. +Note that this class is only available if ViSP was build with OpenCV. This is ensured by the check of +`VISP_HAVE_OPENCV_VERSION` macro. \snippet tutorial-matching-keypoint.cpp Define -Then we open the mpeg video stream and grab the first image of the video that is stored in \c I container. The vpKeyPoint class is instantiated and keypoints are detected on the first image which is considered as the reference image: +Then we open the mpeg video stream and grab the first image of the video that is stored in \c I container. The +vpKeyPoint class is instantiated and keypoints are detected on the first image which is considered as the reference +image: \snippet tutorial-matching-keypoint.cpp Construction -The next lines are used to create image \c Idisp to render the matching results; left image for the reference image, right image for the current image that is processed: +The next lines are used to create image \c Idisp to render the matching results; left image for the reference image, +right image for the current image that is processed: \snippet tutorial-matching-keypoint.cpp Create image Then a display using OpenCV is created and image \c Idisp is rendered: \snippet tutorial-matching-keypoint.cpp Init display -We enter then in the \c while() loop where a new image is acquired from the video stream and inserted in the right part of image \c Idisp dedicated to rendering of the matching results. +We enter then in the \c while() loop where a new image is acquired from the video stream and inserted in the right +part of image \c Idisp dedicated to rendering of the matching results. \snippet tutorial-matching-keypoint.cpp Acquisition -We start the rendering by displaying the rendered image and by drawing a white vertical line to separate the reference image from the current one: +We start the rendering by displaying the rendered image and by drawing a white vertical line to separate the reference +image from the current one: \snippet tutorial-matching-keypoint.cpp Display Keypoint matches between the reference image and the current image \c I are detected using: \snippet tutorial-matching-keypoint.cpp Matching -Then we parse all the matches to retrieve the coordinates of the points in the reference image (in \c iPref variable) and in the current image (in \c iPcur variable): +Then we parse all the matches to retrieve the coordinates of the points in the reference image (in \c iPref variable) +and in the current image (in \c iPcur variable): \snippet tutorial-matching-keypoint.cpp Get matches Next we draw green lines between the matched points: @@ -66,18 +75,23 @@ At the end of the iteration, we flush all the previous display to the render win \section other_keypoints Using others types of keypoints -Using other types of detectors / descriptors (SIFT, SURF, etc.) or matchers (Brute Force, Flann based matcher) is also possible. This can be easily done by using the correct OpenCV identifier name. For example, if you want to use the couple of detector / descriptor SIFT with a matching using FLANN, you only have to change the following lines : +Using other types of detectors / descriptors (SIFT, SURF, etc.) or matchers (Brute Force, Flann based matcher) is also +possible. This can be easily done by using the correct OpenCV identifier name. For example, if you want to use the +couple of detector / descriptor SIFT with a matching using FLANN, you only have to change the following lines : \snippet tutorial-matching-keypoint-SIFT.cpp Construction A complete example is given in tutorial-matching-keypoint-SIFT.cpp -Available types of detectors, extractors or matchers depend on OpenCV version. Check the OpenCV documentation to know which ones are available. +Available types of detectors, extractors or matchers depend on OpenCV version. Check the OpenCV documentation to know +which ones are available. -Due to some patents, SIFT and SURF keypoints are available in a separate module in OpenCV: in nonfree module in OpenCV version before 3.0.0 and in xfeatures2d in OpenCV version from 3.0.0 if OpenCV contrib modules are build. If you want to use them, be sure that you have the nonfree module or xfeatures2d module. +Due to some patents, SIFT and SURF keypoints are available in a separate module in OpenCV: in nonfree module in OpenCV +version before 3.0.0 and in xfeatures2d in OpenCV version from 3.0.0 if OpenCV contrib modules are build. If you want +to use them, be sure that you have the nonfree module or xfeatures2d module. Some usage restrictions could also exist according to the usage you plan (e.g. research or commercial use). -You can now follow \ref tutorial-homography to see how to exploit couple of matched points in order to estimate an homography that allows to track the position of an object. +You can now follow \ref tutorial-homography to see how to exploit couple of matched points in order to estimate an +homography that allows to track the position of an object. */ - diff --git a/doc/tutorial/detection_dnn/tutorial-detection-dnn.dox b/doc/tutorial/detection_dnn/tutorial-detection-dnn.dox index 45978abd12..0ab2a417f1 100644 --- a/doc/tutorial/detection_dnn/tutorial-detection-dnn.dox +++ b/doc/tutorial/detection_dnn/tutorial-detection-dnn.dox @@ -7,28 +7,27 @@ This tutorial shows how to use the `vpDetectorDNNOpenCV` class (DNN stands for Deep Neural Network), which is a wrapper over the OpenCV DNN module. -The `vpDetectorDNNOpenCV` class provides convenient ways to perform image classification and to retrieve detection bounding boxes, +The `vpDetectorDNNOpenCV` class provides convenient ways to perform image classification and to retrieve detection +bounding boxes, class ids and confidence values of a single or of multiple classes. For other tasks such as image segmentation or more complicated uses, you should use directly the OpenCV DNN API. -This class supports `Faster-RCNN`, `SSD-MobileNet`, `ResNet 10`, `Yolo v3`, `Yolo v4`, `Yolo v5`, `Yolo v7` and `Yolo v8` convolutional networks -that simultaneously predict object boundaries and prediction scores at each position. -If you want to use another type of network, you can define your own parsing method of the DNN detection results and give it to -the `vpDetectorDNNOpenCV` object. +This class supports `Faster-RCNN`, `SSD-MobileNet`, `ResNet 10`, `Yolo v3`, `Yolo v4`, `Yolo v5`, `Yolo v7` and +`Yolo v8` convolutional networks that simultaneously predict object boundaries and prediction scores at each position. +If you want to use another type of network, you can define your own parsing method of the DNN detection results and +give it to the `vpDetectorDNNOpenCV` object. -This class can be initialized from a JSON file if ViSP has been compiled with NLOHMANN JSON (see \ref soft_tool_json to see how to do it). +This class can be initialized from a JSON file if ViSP has been compiled with NLOHMANN JSON (see \ref soft_tool_json to +see how to do it). Examples of such JSON files can be found in the tutorial folder. -In the next section you will find an example that shows how to perform face detection in a single image or in images acquired from +In the next section you will find an example that shows how to perform face detection in a single image or in images +acquired from a camera connected to your computer. -Note that all the material (source code and network model) described in this tutorial is part of ViSP source code and could be -downloaded using the following command: - -\code -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/detection/dnn -\endcode +Note that all the material (source code and network model) described in this tutorial is part of ViSP source code +(in `tutorial/detection/dnn` folder) and could be found in https://github.com/lagadic/visp/tree/master/tutorial/detection/dnn. \section dnn_requirements Requirements @@ -55,7 +54,8 @@ or using the [nvidia-smi tool](https://developer.nvidia.com/nvidia-system-manage $ apt list --installed | grep -i opencv ``` - If this command does not return an empty line, please run (**if you are sure that it is not required by another software installed on your computer**): + If this command does not return an empty line, please run (**if you are sure that it is not required by another + software installed on your computer**): ``` $ sudo apt remove libopencv-dev ``` @@ -74,7 +74,8 @@ or using the [nvidia-smi tool](https://developer.nvidia.com/nvidia-system-manage libx11-dev ``` -6. Get the sources. The \b vpDetectorDNNOpenCV has been tested with **OpenCV 4.7**. First, get the OpenCV_contrib sources, that contain the Cuda DNN module. +6. Get the sources. The \b vpDetectorDNNOpenCV has been tested with **OpenCV 4.7**. First, get the OpenCV_contrib +sources, that contain the Cuda DNN module. On a Debian distribution, you would run: ``` $ cd ${HOME}/visp_ws/3rdparty/ @@ -150,8 +151,8 @@ on DNN models learned from the following networks: - Yolo v7 - Yolo v8 -It uses video capture capability from OpenCV to capture images from a camera and detect objects using a DNN model learned using -one of the previous networks. +It uses video capture capability from OpenCV to capture images from a camera and detect objects using a DNN model +learned using one of the previous networks. \include tutorial-dnn-object-detection-live.cpp @@ -159,19 +160,21 @@ Default DNN model and config files perform human faces detection. \snippet tutorial-dnn-object-detection-live.cpp OpenCV DNN face detector -This network is provided by OpenCV and has been trained -with the following characteristics: +This network is provided by OpenCV +and has been trained with the following characteristics:

This is a brief description of training process which has been used to get res10_300x300_ssd_iter_140000.caffemodel. -The model was created with SSD framework using ResNet-10 like architecture as a backbone. Channels count in ResNet-10 convolution layers was -significantly dropped (2x- or 4x- fewer channels). The model was trained in Caffe framework on some huge and available online dataset. +The model was created with SSD framework using ResNet-10 like architecture as a backbone. Channels count in ResNet-10 +convolution layers was significantly dropped (2x- or 4x- fewer channels). The model was trained in Caffe framework on +some huge and available online dataset.
-More specifically, the model used (`opencv_face_detector_uint8.pb`) has been quantized (with the TensorFlow library) on 8-bit unsigned int to -reduce the size of the training model (2.7 mo vs 10.7 mo for `res10_300x300_ssd_iter_140000.caffemodel`). +More specifically, the model used (`opencv_face_detector_uint8.pb`) has been quantized (with the TensorFlow library) +on 8-bit unsigned int to reduce the size of the training model (2.7 mo vs 10.7 mo for +`res10_300x300_ssd_iter_140000.caffemodel`). The following lines permit to create the DNN object detector: @@ -179,25 +182,32 @@ The following lines permit to create the DNN object detector: To construct `netConfig` object some configuration parameters of the DNN are required: - `confThresh`, which is the confidence threshold used to filter the detections after inference -- `nmsThresh`, which is the Non-Maximum Threshold used to filter multiple detections that can occur approximatively at the same locations +- `nmsThresh`, which is the Non-Maximum Threshold used to filter multiple detections that can occur approximatively at + the same locations - `labelFile`, which is the path towards the file containing the list of classes the DNN can detect -- `inputWidth` and `inputHeight`, which are the dimensions to resize the input image into the blob that is fed in entry of the network -- `filterThresh`, which is a double that, if greater than 0., indicates that the user wants to perform an additional filtering on the detection outputs based on the - size of these detections +- `inputWidth` and `inputHeight`, which are the dimensions to resize the input image into the blob that is fed in entry + of the network +- `filterThresh`, which is a double that, if greater than 0., indicates that the user wants to perform an additional + filtering on the detection outputs based on the size of these detections - `meanR`, `meanG` and `meanB` are the values used for mean subtraction - `scaleFactor` is used to normalize the data range -- `swapRB` should be set to `true` when the model has been trained on RGB data. Since OpenCV used the BGR convention, R and B channel should be swapped -- `dnn_type` is the type of parsing method to use to parse the DNN raw results. See vpDetectorDNNOpenCV::DNNResultsParsingType to determine - which parsing methods are available -- `model` is the network trained weights, `config` is the network topology description and `framework` is the weights framework. +- `swapRB` should be set to `true` when the model has been trained on RGB data. Since OpenCV used the BGR convention, + R and B channel should be swapped +- `dnn_type` is the type of parsing method to use to parse the DNN raw results. See + vpDetectorDNNOpenCV::DNNResultsParsingType to determine which parsing methods are available +- `model` is the network trained weights, `config` is the network topology description and `framework` is the weights + framework. -Alternatively, if ViSP has been compiled with the NLOHMANN JSON library, one can initialize the `vpDetectorDNNOpenCV` object using the following method: +Alternatively, if ViSP has been compiled with the NLOHMANN JSON library, one can initialize the `vpDetectorDNNOpenCV` +object using the following method: \snippet tutorial-dnn-object-detection-live.cpp DNN json -You can directly refer to the OpenCV model zoo for the parameters values. +You can directly refer to the OpenCV model zoo +for the parameters values. -After setting the correct parameters, if you want to get the data as a map, where the keys will be the class names (or ID if no label file was given), +After setting the correct parameters, if you want to get the data as a map, where the keys will be the class names +(or ID if no label file was given), you can easily detect object in an image with: \snippet tutorial-dnn-object-detection-live.cpp DNN object detection map mode @@ -218,7 +228,8 @@ or for a non-sorted vector with: \subsection dnn_usecase_general Generic usage -The default behavior is to detect human faces, but you can input another model to detect the objects you want. To see which are the options, run: +The default behavior is to detect human faces, but you can input another model to detect the objects you want. To see +which are the options, run: ``` $ cd $VISP_WS/visp-build/tutorial/detection/dnn $ ./tutorial-dnn-object-detection-live --help @@ -226,7 +237,8 @@ $ ./tutorial-dnn-object-detection-live --help \subsection dnn_usecase_face_detection Face detection -The default behavior is to detect human faces using a model provided by OpenCV and learned over a ResNet 10 network. If you have a laptop, simply run: +The default behavior is to detect human faces using a model provided by OpenCV and learned over a ResNet 10 network. +If you have a laptop, simply run: ``` $ cd $VISP_WS/visp-build/tutorial/detection/dnn $ ./tutorial-dnn-object-detection-live @@ -247,8 +259,10 @@ $ ./tutorial-dnn-object-detection-live --model $MODEL --labels $LABELS --config \subsection dnn_models_coco COCO dataset objects detection -[COCO](https://cocodataset.org) is a large-scale object detection, segmentation, and captioning dataset. It contains over 330 000 images, each annotated with 80 object categories. -In the following sections, we show how to use the DNN models learned with the different networks, to detect objects among the list of 80 objects in the COCO dataset. +[COCO](https://cocodataset.org) is a large-scale object detection, segmentation, and captioning dataset. It contains +over 330 000 images, each annotated with 80 object categories. +In the following sections, we show how to use the DNN models learned with the different networks, to detect objects +among the list of 80 objects in the COCO dataset. \subsubsection dnn_supported_faster_rcnn Faster-RCNN @@ -425,10 +439,11 @@ If you want to train your own YoloV5 model, please refer to the \subsubsection dnn_supported_yolov7 Yolo v7 -To be able to use `YoloV7` with the class `vpDetectorDNNOpenCV`, you must first download the weights (`yolov7-tiny.pt`) in the Pytorch format from -[here](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-tiny.pt). +To be able to use `YoloV7` with the class `vpDetectorDNNOpenCV`, you must first download the weights (`yolov7-tiny.pt`) +in the Pytorch format from [here](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-tiny.pt). -Then, convert it in ONNX format using the `export.py` script that you can find on the [YoloV7 repo](https://github.com/WongKinYiu/yolov7) with the following arguments: +Then, convert it in ONNX format using the `export.py` script that you can find on the +[YoloV7 repo](https://github.com/WongKinYiu/yolov7) with the following arguments: ``` $ python3 export.py --weights ../weights/yolov7-tiny.pt --grid --simplify --topk-all 100 --iou-thres 0.65 --conf-thres 0.35 --img-size 640 640 --max-wh 640 ``` diff --git a/doc/tutorial/detection_dnn/tutorial-detection-tensorrt.dox b/doc/tutorial/detection_dnn/tutorial-detection-tensorrt.dox index d669bc8703..aff7c62955 100644 --- a/doc/tutorial/detection_dnn/tutorial-detection-tensorrt.dox +++ b/doc/tutorial/detection_dnn/tutorial-detection-tensorrt.dox @@ -4,16 +4,15 @@ \tableofcontents \section dnn_trt_intro Introduction -This tutorial shows how to run object detection inference using NVIDIA TensorRT inference SDK. +This tutorial shows how to run object detection inference using NVIDIA +TensorRT inference SDK. -For this tutorial, you'll need `ssd_mobilenet.onnx` pre-trained model, and `pascal-voc-labels.txt` label's file containing the corresponding labels. +For this tutorial, you'll need `ssd_mobilenet.onnx` pre-trained model, and `pascal-voc-labels.txt` label's file +containing the corresponding labels. These files can be found in visp-images dataset. -Note that the source code described in this tutorial is part of ViSP source code and could be downloaded using the following command: - -\code -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/detection/dnn -\endcode +Note that all the material (source code and network mode) described in this tutorial is part of ViSP source code +(in `tutorial/detection/dnn` folder) and could be found in https://github.com/lagadic/visp/tree/master/tutorial/detection/dnn. Before running this tutorial, you need to install: - CUDA (version 10.2 or higher) @@ -23,7 +22,8 @@ Before running this tutorial, you need to install: Installation instructions are provided in \ref dnn_trt_prereq section. -The tutorial was tested on multiple hardwares of NVIDIA. The following table details the versions of CUDA and TensorRT used for each GPU: +The tutorial was tested on multiple hardwares of NVIDIA. The following table details the versions of CUDA and TensorRT +used for each GPU: | NVIDIA hardware | OS | CUDA | TensorRT | CuDNN | | ------------- | ------------- | ------------- | ------------- | ------------- | @@ -31,7 +31,8 @@ The tutorial was tested on multiple hardwares of NVIDIA. The following table det | GeForce GTX 1080 | Ubuntu 16.04 | 11.0 | 8.0 GA | 8.0 | | Quadro RTX 6000 | Ubuntu 18.04 | 11.3 | 8.0 GA Update 1 | 8.2 | -\note Issues were encountered when using TensorRT 8.2 EA with CUDA 11.3 on NVIDIA Quadro RTX 6000, the tutorial didn't work as expected. There were plenty of bounding boxes in any given image. +\note Issues were encountered when using TensorRT 8.2 EA with CUDA 11.3 on NVIDIA Quadro RTX 6000, the tutorial didn't +work as expected. There were plenty of bounding boxes in any given image. \section dnn_trt_prereq Prerequisites \subsection dnn_trt_cuda_install Install CUDA @@ -52,7 +53,7 @@ $ cat /usr/local/cuda/version.{txt,json} "version" : "11.3.20210326" }, \endcode -Here it shows that CUDA toolkit 11.3 is installed. +Here it shows that CUDA toolkit 11.3 is installed. \note We recommend that NVidia CUDA Driver and CUDA Toolkit have the same version. - To install NVidia CUDA Driver and Toolkit on your machine, please follow this step-by-step guide. @@ -70,7 +71,8 @@ $ sudo dpkg -i libcudnn8_8.2.0.53-1+cuda11.3_amd64.deb TensorRT is a C++ library that facilitates high-performance inference on NVIDIA GPUs. To download and install TensorRT, please follow this step-by-step guide. -Let us consider the installation of `TensorRT 8.0 GA Update 1 for x86_64 Architecture`. In that case you need to download "TensorRT 8.0 GA Update 1 for Linux x86_64 and CUDA 11.0, CUDA 11.1, CUDA 11.2, 11.3" TAR Package and extract its content in `VISP_WS`. +Let us consider the installation of `TensorRT 8.0 GA Update 1 for x86_64 Architecture`. In that case you need to +download "TensorRT 8.0 GA Update 1 for Linux x86_64 and CUDA 11.0, CUDA 11.1, CUDA 11.2, 11.3" TAR Package and extract its content in `VISP_WS`. ``` $ ls $VISP_WS TensorRT-8.0.3.4 ... @@ -103,7 +105,8 @@ $ python3 -m pip install onnx_graphsurgeon-0.3.10-py2.py3-none-any.whl ``` \subsection dnn_trt_opencv_install Install OpenCV from source -To be able to run the tutorial, you should install OpenCV from source, since some extra modules are required (`cudev`, `cudaarithm` and `cudawarping` are not included in `libopencv-contrib-dev` package). +To be able to run the tutorial, you should install OpenCV from source, since some extra modules are required +(`cudev`, `cudaarithm` and `cudawarping` are not included in `libopencv-contrib-dev` package). To do so, proceed as follows: - In `VISP_WS`, clone `opencv` and `opencv_contrib` repos: @@ -127,7 +130,8 @@ $ cmake -DOPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules \ -DBUILD_opencv_cudawarping=ON \ -DCMAKE_INSTALL_PREFIX=$VISP_WS/opencv/install ../ \endcode -Note here that installation folder is set to `$VISP_WS/opencv/install` instead of the default `/usr/local`. This allows to preserve any other existing OpenCV installation on your machine. +Note here that installation folder is set to `$VISP_WS/opencv/install` instead of the default `/usr/local`. +This allows to preserve any other existing OpenCV installation on your machine. - Note that if you want a more advanced way to configure the build process, you can use `ccmake`: \code @@ -143,7 +147,8 @@ $ grep cudaarithm version_string.tmp $ grep cudawarping version_string.tmp " To be built: ... cudawarping ... \endverbatim -If this is not the case, it means that something is wrong, either in CUDA installation, either in OpenCV configuration with `cmake`. +If this is not the case, it means that something is wrong, either in CUDA installation, either in OpenCV configuration +with `cmake`. - Launch build process: \code @@ -158,8 +163,9 @@ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$VISP_WS/opencv/install/lib \section dnn_trt_visp Build ViSP with TensorRT support -Next step is here to build ViSP from source enabling TensorRT support. -As described in \ref install_ubuntu_visp_get_source, we suppose here that you have ViSP source code in ViSP workspace folder: `$VISP_WS`. +Next step is here to build ViSP from source enabling TensorRT support. +As described in \ref install_ubuntu_visp_get_source, we suppose here that you have ViSP source code in ViSP workspace +folder: `$VISP_WS`. If you follow \ref dnn_trt_prereq, you should also find TensorRT and OpenCV in the same workspace. \code @@ -167,7 +173,8 @@ $ ls $VISP_WS visp opencv TensorRT-8.0.3.4 \endcode -Now to ensure that ViSP is build TensorRT, create and enter build folder before configuring ViSP with TensorRT and OpenCV path +Now to ensure that ViSP is build TensorRT, create and enter build folder before configuring ViSP with TensorRT and +OpenCV path \code $ mkdir visp-build; cd visp-build $ cmake ../visp \ @@ -176,7 +183,8 @@ $ cmake ../visp \ \endcode \section dnn_trt_example Tutorial description -In the following section is a detailed description of the tutorial. The complete source code is available in tutorial-dnn-tensorrt-live.cpp file. +In the following section is a detailed description of the tutorial. The complete source code is available in +tutorial-dnn-tensorrt-live.cpp file. \subsection header_files Include header files Include header files for required extra modules to handle CUDA. @@ -192,7 +200,8 @@ Include TensorRT header files. \subsection preprocessing Pre-processing Prepare input image for inference with OpenCV. -First, upload image to GPU, resize it to match model's input dimensions, normalize with `meanR` `meanG` `meanB` being the values used for mean substraction. +First, upload image to GPU, resize it to match model's input dimensions, normalize with `meanR` `meanG` `meanB` being +the values used for mean substraction. Transform data to tensor (copy data to channel by channel to `gpu_input`). In the case of `ssd_mobilenet.onnx`, the input dimension is 1x3x300x300. \snippet tutorial-dnn-tensorrt-live.cpp Preprocess image @@ -206,9 +215,11 @@ In the case of `ssd_mobilenet.onnx`, there is 2 outputs: In fact, the model will output 3000 guesses of boxes (bounding boxes) with 21 scores each (1 score for each class). The result of the inference being on the GPU, we should first proceed by copying it to the CPU. -Post processing consists of filtering the predictions where we're not sure about the class detected and then merging multiple detections that can occur approximately at the same locations. +Post processing consists of filtering the predictions where we're not sure about the class detected and then merging +multiple detections that can occur approximately at the same locations. `confThresh` is the confidence threshold used to filter the detections after inference. -`nmsThresh` is the Non-Maximum Threshold. It is used to merge multiple detections being in the same location approximately. +`nmsThresh` is the Non-Maximum Threshold. It is used to merge multiple detections being in the same location +approximately. \snippet tutorial-dnn-tensorrt-live.cpp PostProcess results \subsection parseOnnx Parse ONNX Model @@ -221,13 +232,16 @@ Parse ONNX model. `context` is used for executing inference. To parse ONNX model, we should first proceed by initializing TensorRT **Context** and **Engine**. -To do this, we should create an instance of **Builder**. With **Builder**, we can create **Network** that can create the **Parser**. +To do this, we should create an instance of **Builder**. With **Builder**, we can create **Network** that can create +the **Parser**. -If we already have the GPU inference engine loaded once, it will be serialized and saved in a cache file (with .engine extension). In this case, -the engine file will be loaded, then inference runtime created, engine and context loaded. +If we already have the GPU inference engine loaded once, it will be serialized and saved in a cache file +(with .engine extension). In this case, the engine file will be loaded, then inference runtime created, engine and +context loaded. \snippet tutorial-dnn-tensorrt-live.cpp ParseOnnxModel engine exists -Otherwise, we should parse the ONNX model (for the first time only), create an instance of builder. The builder can be configured to select the amount of GPU memory to be used for tactic selection or FP16/INT8 modes. +Otherwise, we should parse the ONNX model (for the first time only), create an instance of builder. The builder can be +configured to select the amount of GPU memory to be used for tactic selection or FP16/INT8 modes. Create **engine** and **context** to be used in the main pipeline, and serialize and save the engine for later use. \snippet tutorial-dnn-tensorrt-live.cpp ParseOnnxModel engine does not exist @@ -250,8 +264,10 @@ Create a grabber to retrieve image from webcam (or external camera) or read imag \snippet tutorial-dnn-tensorrt-live.cpp Main loop \section tutorial_usage Usage -To use this tutorial, you need an USB webcam and you should have downloaded an **onnx** file of a model with its corresponding labels in *txt* file format. -To start, you may download the **ssd_mobilenet.onnx** model and **pascal-voc-labels.txt** file from here or install \ref install_ubuntu_dataset cloning Github repository. +To use this tutorial, you need an USB webcam and you should have downloaded an **onnx** file of a model with its +corresponding labels in *txt* file format. To start, you may download the **ssd_mobilenet.onnx** model and +**pascal-voc-labels.txt** file from here or install +\ref install_ubuntu_dataset cloning Github repository. To see the options, run: \code diff --git a/doc/tutorial/docker/tutorial-install-docker.dox b/doc/tutorial/docker/tutorial-install-docker.dox index 114cbfa491..e7ced95c6f 100644 --- a/doc/tutorial/docker/tutorial-install-docker.dox +++ b/doc/tutorial/docker/tutorial-install-docker.dox @@ -5,15 +5,19 @@ \section install_docker_intro Introduction -Even with the detailed and tested \ref tutorial_install_src tutorials, sometimes it is tough for people to get a system up and running with ViSP. That's why in this tutorial we explain how to get ready to use Docker images that contains ViSP source code with already build tests, examples and tutorials that you may just run to discover ViSP capabilities. +Even with the detailed and tested \ref tutorial_install_src tutorials, sometimes it is tough for people to get a system +up and running with ViSP. That's why in this tutorial we explain how to get ready to use Docker images that contains +ViSP source code with already build tests, examples and tutorials that you may just run to discover ViSP capabilities. -Our Docker images available on [Docker Hub](https://hub.docker.com/repository/docker/vispci/vispci) are around 1 GB in size (compressed size) and contain minimal things to discover and start to play with ViSP. +Our Docker images available on [Docker Hub](https://hub.docker.com/repository/docker/vispci/vispci) are around 1 GB in +size (compressed size) and contain minimal things to discover and start to play with ViSP. -\section install_docker_engine Install Docker Engine +\section install_docker_engine Prerequisites: Install Docker Engine \subsection install_docker_engine_ubuntu On Ubuntu host -As mentioned [here](https://docs.docker.com/engine/install/ubuntu/), to install Docker Engine on Ubuntu host using the repository, follow the instructions: +As mentioned [here](https://docs.docker.com/engine/install/ubuntu/), to install Docker Engine on Ubuntu host using the +repository, follow the instructions: - Update the apt package index and install packages to allow apt to use a repository over HTTPS: \verbatim @@ -35,9 +39,9 @@ $ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - - Use the following command to set up the stable repository \verbatim $ sudo add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ - $(lsb_release -cs) \ - stable" + "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) \ + stable" \endverbatim - Update the apt package index, and install the latest version of Docker Engine and containerd @@ -46,161 +50,174 @@ $ sudo apt-get update $ sudo apt-get install docker-ce docker-ce-cli containerd.io \endverbatim - \subsection install_docker_engine_mac On MacOS host -- To install Docker Engine on MacOS host, first go to the [Docker Store](https://hub.docker.com/editions/community/docker-ce-desktop-mac) and download Docker Desktop for Mac. +- To install Docker Engine on MacOS host, first go to the + [Docker Store](https://hub.docker.com/editions/community/docker-ce-desktop-mac) and download Docker Desktop for Mac. - Double-click Docker.dmg to open the installer, then drag Moby the whale to the Applications folder. - Double-click Docker.app in the Applications folder to start Docker. -- You are prompted to authorize Docker.app with your system password after you launch it. Privileged access is needed to install networking components and links to the Docker apps. +- You are prompted to authorize Docker.app with your system password after you launch it. Privileged access is needed + to install networking components and links to the Docker apps. -\section install_docker_visp Install Docker ViSP images +- In order to run GUIs from the container, you will need to: + - Install [XQuartz](https://www.xquartz.org/) 2.7.11 or later + - After installing XQuartz, start it and open XQuartz -> Preferences from the menu bar. Go to the last tab, Security, + and enable both "Allow connections from network clients" and "Authenticate connections" checkboxes and restart XQuartz. + \image html img-xquartz-security.jpg + - Now your Mac will be listening on port 6000 for X11 connections. Record the IP Address of your Mac as you will need + it in your containers. + - Restart X11 server or reboot your Mac to take into account previous changes -In [Docker Hub](https://hub.docker.com/repository/docker/vispci/vispci) we provide various ready to use Docker images with ViSP already built that could be used on an Ubuntu or MacOS host. +\section docker_visp_pull Pull existing docker image -\subsection install_docker_visp_ubuntu_18_04 Ubuntu 18.04 image +In [Docker Hub](https://hub.docker.com/repository/docker/vispci/vispci), we provide several ready-to-use Docker +images that can be used on an Ubuntu or macOS host with ViSP already built. Instead, there is also the possibility to +build docker images from a `Dockerfile` following instruction given in \ref docker_visp_build section. -\note The `Dockerfile` used to generate this image is available [here](https://github.com/lagadic/visp/blob/master/ci/docker/ubuntu-18.04/Dockerfile). +\subsection docker_visp_pull_ubuntu_18_04 Pull Ubuntu 18.04 image -To use the docker image installed on an Ubuntu 18.04 system, use the following instructions: \verbatim $ docker pull vispci/vispci:ubuntu-18.04 -$ docker run -it vispci/vispci:ubuntu-18.04 -\endverbatim - -Within the container, ViSP workspace is installed in `$HOME/visp-ws` folder: -\verbatim -[root@7152a1ec64ba]# ls $HOME/visp-ws -visp visp-build visp-images \endverbatim -- In `visp` folder you will find a [Github](https://github.com/lagadic/visp) clone of the source code. To update its content with the last changes, run `cd visp; git pull` -- In `visp-build` folder you will find ViSP libraries and all the build binaries corresponding to the tests, examples and tutorials. If you updated `visp` folder content, don't forget to refresh your build with `cd visp-build; cmake ../visp; make` -- In `visp-images` folder you will find the dataset used by the tests and examples. +\subsection docker_visp_pull_ubuntu_20_04 Pull Ubuntu 20.04 image -\subsection install_docker_visp_ubuntu_20_04 Ubuntu 20.04 image - -\note The `Dockerfile` used to generate this image is available [here](https://github.com/lagadic/visp/blob/master/ci/docker/ubuntu-20.04/Dockerfile). - -To use the docker image installed on an Ubuntu 20.04 system, use the following instructions: \verbatim $ docker pull vispci/vispci:ubuntu-20.04 -$ docker run -it vispci/vispci:ubuntu-20.04 \endverbatim -Within the container, ViSP workspace is installed in `$HOME/visp-ws` folder: +\subsection docker_visp_pull_ubuntu_22_04 Pull Ubuntu 22.04 image + \verbatim -[root@7152a1ec64ba]# ls $HOME/visp-ws -visp visp-build visp-images +$ docker pull vispci/vispci:ubuntu-22.04 \endverbatim -- In `visp` folder you will find a [Github](https://github.com/lagadic/visp) clone of the source code. To update its content with the last changes, run `cd visp; git pull` -- In `visp-build` folder you will find ViSP libraries and all the build binaries corresponding to the tests, examples and tutorials. If you updated `visp` folder content, don't forget to refresh your build with `cd visp-build; cmake ../visp; make` -- In `visp-images` folder you will find the dataset used by the tests and examples. +\section docker_visp_build Build docker image from Dockerfile -\section install_docker_run Run Docker ViSP images +We suppose here that you cloned ViSP from github in your workspace. -\subsection install_docker_run_ubuntu On Ubuntu host +Change directory to access the `Dockerfile` and build the corresponding docker image -\note The following instructions refer to \ref install_docker_visp_ubuntu_20_04, but could be easily adapted to any other image. +\subsection docker_visp_build_ubuntu_18_04 Build Ubuntu 18.04 image -With Docker installed on a MacOS host, when you run ViSP binaries that open GUI window to display for example an image you will get the following error: \verbatim -$ docker pull vispci/vispci:ubuntu-20.04 -$ docker run -it vispci/vispci:ubuntu-20.04 -[root@7152a1ec64ba]# cd $HOME/visp-ws/visp-build/tutorial/image -[root@7152a1ec64ba]# ./tutorial-viewer monkey.png -Catch an exception: Error [2]: Can't connect display on server . +$ cd $VISP_WS/visp/ci/docker/ubuntu-18.04 +$ docker build -t vispci/vispci:ubuntu-18.04 . \endverbatim -In order to run GUIs from the container, you will need to: -- Determine the IP Address of your Ubuntu host -\verbatim -... -wlp2s0: flags=4163 mtu 1500 - inet 192.168.1.5 netmask 255.255.255.0 broadcast 192.168.1.255 -... -\endverbatim -- Allow access to the X11 server -\verbatim -$ xhost + -access control disabled, clients can connect from any host -\endverbatim -- Start or connect to your Docker container. -\verbatim -$ docker run --device=/dev/video0:/dev/video0 -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY=$DISPLAY -it vispci/vispci:ubuntu-20.04 -\endverbatim -- You are now ready to run binaries that open GUI -\verbatim -[root@7152a1ec64ba]# cd $HOME/visp-ws/visp-build/tutorial/image -[root@7152a1ec64ba]# ./tutorial-viewer monkey.png -A click to quit... -\endverbatim -\image html img-monkey.png· -- You are now also ready to run binaries that require access to your camera +\subsection docker_visp_build_ubuntu_20_04 Build Ubuntu 20.04 image + \verbatim -[root@7152a1ec64ba]# cd $HOME/visp-ws/visp-build/tutorial/grabber -[root@7152a1ec64ba]# ./tutorial-grabber-v4l2 -Use device : 0 -Recording : disabled -Warning: cannot set input channel to 2 -Image size : 640 480 +$ cd $VISP_WS/visp/ci/docker/ubuntu-20.04 +$ docker build -t vispci/vispci:ubuntu-20.04 . \endverbatim -or with this other tutorial + +\subsection docker_visp_build_ubuntu_22_04 Build Ubuntu 22.04 image + \verbatim -[root@7152a1ec64ba]# ./tutorial-grabber-opencv -Use device : 0 -Recording : disabled +$ cd $VISP_WS/visp/ci/docker/ubuntu-22.04 +$ docker build -t vispci/vispci:ubuntu-22.04 . \endverbatim -\subsection install_docker_run_mac On MacOS host +\section docker_visp_start Start ViSP container -\note The following instructions refer to \ref install_docker_visp_ubuntu_20_04, but could be easily adapted to any other image. +\subsection docker_visp_start_ubuntu On Ubuntu host -With Docker installed on a MacOS host, when you run ViSP binaries that open GUI window to display for example an image you will get the following error: -\verbatim -$ docker pull vispci/vispci:ubuntu-20.04 -$ docker run -it vispci/vispci:ubuntu-20.04 -[root@7152a1ec64ba]# cd $HOME/visp-ws/visp-build/tutorial/image -[root@7152a1ec64ba]# ./tutorial-viewer monkey.png -Catch an exception: Error [2]: Can't connect display on server . -\endverbatim +- On your computer running Ubuntu, allow access to the X11 server + + $ xhost +local:docker + non-network local connections being added to access control list + +- Run your Docker container. The following command connects to the ubuntu-22.04 Docker container. + It can be easily adapted to any other container name. + + $ docker run --rm -it --network=host --privileged \ + --env=DISPLAY \ + --env=QT_X11_NO_MITSHM=1 \ + --volume=/tmp/.X11-unix:/tmp/.X11-unix:rw \ + --volume=/dev:/dev \ + vispci/vispci:ubuntu-22.04 + vispci@6c8d67579659:~$ pwd + /home/vispci + + +\subsection docker_visp_start_macos On MacOS host + +- Get your MacOS computer IP address + + $ IP=$(/usr/sbin/ipconfig getifaddr en0) + $ echo $IP + $ 192.168.1.18 + +- Allow connections from MacOS to XQuartz + + $ xhost + "$IP" + 192.168.1.18 being added to access control list + +- Run your Docker container. The following command connects to the ubuntu-22.04 Docker container. + It can be easily adapted to any other container name. + + $ docker run --rm -it --network=host --privileged \ + --env=DISPLAY="${IP}:0" \ + --env=QT_X11_NO_MITSHM=1 \ + --volume=/tmp/.X11-unix:/tmp/.X11-unix:rw \ + --volume=/dev:/dev \ + vispci/vispci:ubuntu-22.04 + vispci@6c8d67579659:~$ pwd + /home/vispci + +\section docker_visp_usage How to use ViSP container + +- We suppose here that you successfully \ref docker_visp_start using one of the previous command: + + $ docker run ... -it vispci/vispci:ubuntu- + +- Within the container, ViSP workspace is installed in `$HOME/visp-ws` folder: + + vispci@6c8d67579659:~$ ls $HOME/visp-ws + visp visp-build visp-images + + - In `visp` folder you will find a [Github](https://github.com/lagadic/visp) clone of the source code. To update its + content with the last changes, run: + + cd $HOME/visp-ws/visp; git pull + + - In `visp-build` folder you will find ViSP libraries and all the build binaries corresponding to the tests, examples + and tutorials. If you updated `visp` folder content, don't forget to refresh your build with: + + cd $HOME/visp-ws/visp-build; cmake ../visp; make -j$(nproc) + + - In `visp-images` folder you will find the dataset used by the tests and examples. -In order to run GUIs from the container, you will need to: -- Install [XQuartz](https://www.xquartz.org/) 2.7.11 or later -- After installing XQuartz, start it and open XQuartz -> Preferences from the menu bar. Go to the last tab, Security, and enable both "Allow connections from network clients" and "Authenticate connections" checkboxes and restart XQuartz. -\image html img-xquartz-security.jpg -- Now your Mac will be listening on port 6000 for X11 connections. Record the IP Address of your Mac as you will need it in your containers. -\verbatim -$ ifconfig en0 | grep inet | awk '$1=="inet" {print $2}' -192.168.1.18 -\endverbatim -- Add the IP Address of your Mac to the X11 allowed list. -\verbatim -$ xhost + 192.168.1.18 -192.168.1.18 being added to access control list -\endverbatim -- Start or connect to your Docker container. -\verbatim -$ docker run -it vispci/vispci:ubuntu-20.04 -\endverbatim -- Export the `DISPLAY` variable within your container. -\verbatim -[root@7152a1ec64ba]# export DISPLAY=192.168.1.18:0 -\endverbatim - You are now ready to run binaries that open GUI -\verbatim -[root@7152a1ec64ba]# cd $HOME/visp-ws/visp-build/tutorial/image -[root@7152a1ec64ba]# ./tutorial-viewer monkey.png -A click to quit... -\endverbatim -\image html img-monkey.png· + + vispci@6c8d67579659:~$ cd $HOME/visp-ws/visp-build/tutorial/image + vispci@6c8d67579659:~$ ./tutorial-viewer monkey.png + A click to quit... + + \image html img-monkey.png + +- If you are on an Ubuntu host, you are now ready to run binaries that require access to your camera. + + vispci@6c8d67579659:~$ cd $HOME/visp-ws/visp-build/tutorial/grabber + vispci@6c8d67579659:~$ ./tutorial-grabber-v4l2 + Use device : 0 + Recording : disabled + Warning: cannot set input channel to 2 + Image size : 640 480 + + or with this other tutorial + + vispci@6c8d67579659:~$ ./tutorial-grabber-opencv + Use device : 0 + Recording : disabled \section install_docker_next Next tutorial -You are now ready to see the next \ref tutorial-getting-started that will show you how to use ViSP as a 3rd party to build your own project. +You are now ready to see the next \ref tutorial-getting-started that will show you how to use ViSP as a 3rd party to +build your own project. -*/ \ No newline at end of file +*/ diff --git a/doc/tutorial/image/tutorial-grabber.dox b/doc/tutorial/image/tutorial-grabber.dox index b03b8378e5..d74ef6b3aa 100644 --- a/doc/tutorial/image/tutorial-grabber.dox +++ b/doc/tutorial/image/tutorial-grabber.dox @@ -8,41 +8,45 @@ In this tutorial you will learn how to grab images with ViSP, either from cameras or from a video stream. -Grabbing images from a real camera is only possible if you have installed the corresponding 3rd party. The complete list of 3rd parties supported by ViSP and dedicated to framegrabbing is given here. From this page you will also found useful information to install these 3rd parties. +Grabbing images from a real camera is only possible if you have installed the corresponding 3rd party. The complete +list of 3rd parties supported by ViSP and dedicated to framegrabbing is given +here. From this page you will also found useful information to +install these 3rd parties. -All the material (source code and videos) described in this tutorial is part of ViSP source code and could be downloaded using the following command: - -\verbatim -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/grabber -\endverbatim +Note that all the material (source code and videos) described in this tutorial is part of ViSP source code +(in `tutorial/grabber` folder) and could be found in https://github.com/lagadic/visp/tree/master/tutorial/grabber. \section grabber-camera-flycap Frame grabbing using FlyCapture SDK -After ViSP 3.0.0, we introduce vpFlyCaptureGrabber class, a wrapper over PointGrey FlyCapture SDK that allows to grab images from any PointGrey camera. This grabber was tested under Ubuntu and Windows with the following cameras: +After ViSP 3.0.0, we introduce vpFlyCaptureGrabber class, a wrapper over PointGrey FlyCapture SDK that allows to grab +images from any PointGrey camera. This grabber was tested under Ubuntu and Windows with the following cameras: - Flea3 USB 3.0 cameras (FL3-U3-32S2M-CS, FL3-U3-13E4C-C) - Flea2 firewire camera (FL2-03S2C) - Dragonfly2 firewire camera (DR2-COL) - + It should also work with GigE PGR cameras. -The following example also available in tutorial-grabber-flycapture.cpp shows how to use vpFlyCaptureGrabber to capture grey level images from a PointGrey camera under Ubuntu or Windows. The following example suppose that a window renderer (libX11 on Ubuntu or GDI on Windows) and FlyCapture SDK 3rd party are available throw VISP. +The following example also available in tutorial-grabber-flycapture.cpp shows how to use vpFlyCaptureGrabber to capture +grey level images from a PointGrey camera under Ubuntu or Windows. The following example suppose that a window renderer +(libX11 on Ubuntu or GDI on Windows) and FlyCapture SDK 3rd party are available throw VISP. \subsection grabber-camera-flycap-src Source code explained \include tutorial-grabber-flycapture.cpp Here after we explain the source code. -First an instance of the frame grabber is created. +First an instance of the frame grabber is created. \snippet tutorial-grabber-flycapture.cpp vpFlyCaptureGrabber construction -Once the grabber is created, we turn auto shutter and auto gain on and set the camera image size, color coding, and framerate. -These settings are enclosed in a try/catch to be able to continue if one of these settings are not supported by the camera. +Once the grabber is created, we turn auto shutter and auto gain on and set the camera image size, color coding, and +framerate. These settings are enclosed in a try/catch to be able to continue if one of these settings are not supported +by the camera. \snippet tutorial-grabber-flycapture.cpp vpFlyCaptureGrabber settings Then the grabber is initialized using: \snippet tutorial-grabber-flycapture.cpp vpFlyCaptureGrabber open -From now the grey level image \c I is also initialized with the size corresponding to the grabber settings. +From now the grey level image \c I is also initialized with the size corresponding to the grabber settings. Then we enter in a while loop where image acquisition is simply done by: \snippet tutorial-grabber-flycapture.cpp vpFlyCaptureGrabber acquire @@ -50,7 +54,8 @@ Then we enter in a while loop where image acquisition is simply done by: This image is then displayed using libX11 or GDI renderer: \snippet tutorial-grabber-flycapture.cpp vpFlyCaptureGrabber display -Depending on the command line options we are recording a sequence of images, or single shot images. We are also waiting for a non blocking mouse event to quit the while loop. +Depending on the command line options we are recording a sequence of images, or single shot images. We are also waiting +for a non blocking mouse event to quit the while loop. \snippet tutorial-grabber-flycapture.cpp vpFlyCaptureGrabber click to exit \subsection grabber-camera-flycap-use How to acquire images @@ -74,7 +79,9 @@ To grab a sequence of images, you may rather use: \section grabber-camera-dc1394 Frame grabbing using libdc1394 SDK -The next example also available in tutorial-grabber-1394.cpp shows how to use a framegrabber to acquire color images from a firewire or USB3 camera under Unix. The following example suppose that libX11 and libdc1394-2 3rd party are available. +The next example also available in tutorial-grabber-1394.cpp shows how to use a framegrabber to acquire color images +from a firewire or USB3 camera under Unix. The following example suppose that libX11 and libdc1394-2 3rd party are +available. \subsection grabber-camera-dc1394-src Source code explained @@ -83,13 +90,15 @@ The source code is the following: Here after we explain the new lines that are introduced. -First an instance of the frame grabber is created. During the creating a bus reset is send. If you don't want to reset the firewire bus, just turn reset to false. +First an instance of the frame grabber is created. During the creating a bus reset is send. If you don't want to reset +the firewire bus, just turn reset to false. \snippet tutorial-grabber-1394.cpp vp1394TwoGrabber construction -Once the grabber is created, we set the camera image size, color coding, and framerate. +Once the grabber is created, we set the camera image size, color coding, and framerate. \snippet tutorial-grabber-1394.cpp vp1394TwoGrabber settings -Note that here you can specify some other settings such as the firewire transmission speed. For a more complete list of settings see vp1394TwoGrabber class. +Note that here you can specify some other settings such as the firewire transmission speed. For a more complete list of +settings see vp1394TwoGrabber class. \code g.setIsoTransmissionSpeed(vp1394TwoGrabber::vpISO_SPEED_800); \endcode @@ -97,15 +106,17 @@ Note that here you can specify some other settings such as the firewire transmis Then the grabber is initialized using: \snippet tutorial-grabber-1394.cpp vp1394TwoGrabber open -From now the color image \c I is also initialized with the size corresponding to the grabber settings. +From now the color image \c I is also initialized with the size corresponding to the grabber settings. Then we enter in a while loop where image acquisition is simply done by: \snippet tutorial-grabber-1394.cpp vp1394TwoGrabber acquire -As in the previous example, depending on the command line options we are recording a sequence of images, or single shot images. We are also waiting for a non blocking mouse event to quit the while loop. +As in the previous example, depending on the command line options we are recording a sequence of images, or single shot +images. We are also waiting for a non blocking mouse event to quit the while loop. \snippet tutorial-grabber-1394.cpp vp1394TwoGrabber click to exit -In the previous example we use vp1394TwoGrabber class that works for firewire cameras under Unix. If you are under Windows, you may use vp1394CMUGrabber class. A similar example is provided in tutorial-grabber-CMU1394.cpp. +In the previous example we use vp1394TwoGrabber class that works for firewire cameras under Unix. If you are under +Windows, you may use vp1394CMUGrabber class. A similar example is provided in tutorial-grabber-CMU1394.cpp. \subsection grabber-camera-dc1394-use How to acquire images @@ -128,7 +139,8 @@ To grab a sequence of images, you may rather use: \section grabber-camera-v4l2 Frame grabbing using libv4l2 SDK -If you want to grab images from an usb camera under Unix, you may use vpV4l2Grabber class that is a wrapper over Video For Linux SDK. To this end libv4l should be installed. An example is provided in tutorial-grabber-v4l2.cpp. +If you want to grab images from an usb camera under Unix, you may use vpV4l2Grabber class that is a wrapper over Video +For Linux SDK. To this end libv4l should be installed. An example is provided in tutorial-grabber-v4l2.cpp. \subsection grabber-camera-v4l2-use How to acquire images @@ -151,7 +163,8 @@ To grab a sequence of images, you may rather use: \section grabber-camera-pylon Frame grabbing using Pylon SDK -It is also possible to grab images using Pylon, the SDK for Basler cameras. You may find an example in tutorial-grabber-basler-pylon.cpp. +It is also possible to grab images using Pylon, the SDK for Basler cameras. You may find an example in +tutorial-grabber-basler-pylon.cpp. \subsection grabber-camera-pylon-use How to acquire images @@ -174,7 +187,8 @@ To grab a sequence of images, you may rather use: \section grabber-camera-realsense Frame grabbing using Realsense SDK -It is also possible to grab images using librealsense, the SDK provided for Intel Realsense RDB-D cameras. You may find an example in tutorial-grabber-realsense.cpp. +It is also possible to grab images using librealsense, the SDK provided for Intel Realsense RDB-D cameras. You may +find an example in tutorial-grabber-realsense.cpp. \subsection grabber-camera-realsense-use How to acquire images @@ -205,17 +219,19 @@ To acquire images from 2 T265 devices with serial numbers 11622110511 and 116221 \code ./tutorial-grabber-multiple-realsense --T265 11622110511 --T265 11622110433 \endcode -To acquire images from 1 T265 device (Serial Number:11622110511) and 1 D435 device (Serial Number: 752112070408), you may use: +To acquire images from 1 T265 device (Serial Number:11622110511) and 1 D435 device (Serial Number: 752112070408), +you may use: \code ./tutorial-grabber-multiple-realsense --T265 11622110511 --D435 752112070408 \endcode -\note There is getRealSense2Info.cpp in `example/device/framegrabber` folder that could be used to get the device serial number. +\note There is getRealSense2Info.cpp in `example/device/framegrabber` folder that could be used to get the device +serial number. \verbatim -$ ./getRealSense2Info +$ ./getRealSense2Info RealSense characteristics: -Intel RealSense T265 11622110409 0.2.0.951 - Device info: +Intel RealSense T265 11622110409 0.2.0.951 + Device info: Name : Intel RealSense T265 Serial Number : 11622110409 Firmware Version : 0.2.0.951 @@ -227,7 +243,9 @@ Intel RealSense T265 11622110409 0.2.0.951 \section grabber-camera-structure Frame grabbing using Occipital Structure SDK -If you have a Structure Core RGB-D camera, it is also possible to grab images using `libStructure` the cross-platform library that comes with Occipital Structure SDK. You may find an example in tutorial-grabber-structure-core.cpp. It allows to save visible and depth images. +If you have a Structure Core RGB-D camera, it is also possible to grab images using `libStructure` the cross-platform +library that comes with Occipital Structure SDK. You may find an example in tutorial-grabber-structure-core.cpp. It +allows to save visible and depth images. \subsection grabber-camera-structure-use How to acquire images @@ -250,7 +268,8 @@ To grab a sequence of images, you may rather use: \section grabber-rgbd-D435-structurecore RGBD frame grabbing from RealSense D435 and Structure Core -If you have both Intel RealSense D435 and Occipital Structure Core, you can acquire color and depth frames simultaneously from both sensors. +If you have both Intel RealSense D435 and Occipital Structure Core, you can acquire color and depth frames +simultaneously from both sensors. Once tutorial-grabber-rgbd-D435-structurecore.cpp is built, you just need to run: \code @@ -290,7 +309,8 @@ To grab a sequence of images, you may rather use: \section grabber-camera-cmu1394 Frame grabbing using CMU1394 SDK -It is also possible to grab images using CMU1394 SDK if you want to use a firewire camera under Windows. You may find an example in tutorial-grabber-CMU1394.cpp. +It is also possible to grab images using CMU1394 SDK if you want to use a firewire camera under Windows. You may find +an example in tutorial-grabber-CMU1394.cpp. \section grabber-bebop2 Frame grabbing using Parrot Bebop 2 drone @@ -316,14 +336,15 @@ To grab a sequence of images, you may rather use: \code ./tutorial-grabber-bebop2 --seqname I%04d.pgm --record 0 \endcode -You can chose to record HD 720p pictures from the drone (instead of default 480p) using --hd_resolution option : +You can chose to record HD 720p pictures from the drone (instead of default 480p) using --hd_resolution option : \code ./tutorial-grabber-bebop2 --seqname I%04d.pgm --record 0 --hd_resolution \endcode \section grabber-video-stream Images from a video stream -With ViSP it also possible to get images from an input video stream. Supported formats are *.avi, *.mp4, *.mov, *.ogv, *.flv and many others... To this end we exploit OpenCV 3rd party. +With ViSP it also possible to get images from an input video stream. Supported formats are *.avi, *.mp4, *.mov, *.ogv, +*.flv and many others... To this end we exploit OpenCV 3rd party. The example below available in tutorial-video-reader.cpp shows how to consider an mpeg video stream. @@ -337,13 +358,15 @@ The source code is the following: We explain now the new lines that were introduced. \snippet tutorial-video-reader.cpp Include -Include the header of the vpTime class that allows to measure time, and of the vpVideoReader class that allows to read a video stream. +Include the header of the vpTime class that allows to measure time, and of the vpVideoReader class that allows to read +a video stream. \snippet tutorial-video-reader.cpp vpVideoReader construction Create an instance of a video reader. \snippet tutorial-video-reader.cpp vpVideoReader setting -Set the name of the video stream. Here \c videoname corresponds to a video file name location. For example we provide the file \c video.mpg located in the same folder than the executable. +Set the name of the video stream. Here \c videoname corresponds to a video file name location. For example we provide +the file \c video.mpg located in the same folder than the executable. The vpVideoReader class can also handle a sequence of images. For example, to read the following images: @@ -362,7 +385,8 @@ you may use the following: \code g.setFileName("./image%04d.png"); \endcode -where you specify that each image number is coded with 4 digits. Here, we will use \c libpng or \c OpenCV to read PNG images. Supported image formats are PPM, PGM, PNG and JPEG. +where you specify that each image number is coded with 4 digits. Here, we will use \c libpng or \c OpenCV to read PNG +images. Supported image formats are PPM, PGM, PNG and JPEG. Then as for any other grabber, you have to initialize the frame grabber using: @@ -378,7 +402,8 @@ To synchronize the video decoding with the video framerate, we measure the begin \snippet tutorial-video-reader.cpp vpVideoReader loop start time -The synchronization is done by waiting from the beginning of the iteration the corresponding time expressed in milliseconds by using: +The synchronization is done by waiting from the beginning of the iteration the corresponding time expressed in +milliseconds by using: \snippet tutorial-video-reader.cpp vpVideoReader loop rate \subsection grabber-video-stream-use How to acquire images @@ -399,7 +424,7 @@ To read an other video, let say my-video.mpg, you may use: \section grabber-next Next tutorial You are now ready to see how to continue with: -- \ref tutorial-multi-threading if you want to see how to extend these examples with one thread for capture and an other one for display -- or with a simple image processing that shows how to track blobs explained in \ref tutorial-tracking-blob. -- There is also the \ref tutorial-video-manipulation that could be useful when you need to visualize, rename or change the format of a captured video or sequence of successive images, typically for deep learning purpose. +- a simple image processing that shows how to track blobs explained in \ref tutorial-tracking-blob. +- There is also the \ref tutorial-video-manipulation that could be useful when you need to visualize, rename or change + the format of a captured video or sequence of successive images, typically for deep learning purpose. */ diff --git a/doc/tutorial/image/tutorial-image-display-overlay.dox b/doc/tutorial/image/tutorial-image-display-overlay.dox index 73e4231727..d17fa3e005 100644 --- a/doc/tutorial/image/tutorial-image-display-overlay.dox +++ b/doc/tutorial/image/tutorial-image-display-overlay.dox @@ -4,17 +4,17 @@ \section display_overlay_intro Introduction -In this tutorial you will learn how to display basic drawings with ViSP either on Unix-like systems (including OSX, Fedora, Ubuntu, Debian, ...) or on Windows. +In this tutorial you will learn how to display basic drawings with ViSP either on Unix-like systems (including OSX, +Fedora, Ubuntu, Debian, ...) or on Windows. -Note that all the material (source code and images) described in this tutorial is part of ViSP source code and could be downloaded using the following command: - -\code -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/image -\endcode +Note that all the material (source code and images) described in this tutorial is part of ViSP source code +(in `tutorial/image` folder) and could be found in https://github.com/lagadic/visp/tree/master/tutorial/image. \section display_overlay_default Load and display an image -ViSP gui module provides Graphical User Interfaces capabilities. To this end you may use several optional third-party libraries which are: X11, GDI, OpenCV, GTK, Direct3D. In the next example, we will use the first 3rd party that is available from the previous list. +ViSP gui module provides Graphical User Interfaces capabilities. To this end +you may use several optional third-party libraries which are: X11, GDI, +OpenCV, GTK, Direct3D. In the next example, we will use the first 3rd party that is available from the previous list. The following example also available in tutorial-viewer.cpp shows how to read and display an image. @@ -32,7 +32,10 @@ It will open a window containing `monkey.png` image: Here is the detailed explanation of the source, line by line : \snippet tutorial-viewer.cpp Include display -Include all the headers for image viewers. The two first one are for Windows systems. They require that Direct 3D or the \e Graphical \e Device \e Interface (\e GDI) coming with the installation of Visual Studio are available. The third one needs GTK that is cross-platform. The fourth is for unix-like systems and requires that \e libX11 is available. The last one is also cross-platform and requires that OpenCV is available. +Include all the headers for image viewers. The two first one are for Windows systems. They require that Direct 3D or +the \e Graphical \e Device \e Interface (\e GDI) coming with the installation of Visual Studio are available. The +third one needs GTK that is cross-platform. The fourth is for unix-like systems and requires that \e libX11 is +available. The last one is also cross-platform and requires that OpenCV is available. \snippet tutorial-viewer.cpp Include io Include the header that allows to read/write PGM, PPM, PNG and JPEG images from the disk using vpImageIo class. @@ -41,10 +44,12 @@ Include the header that allows to read/write PGM, PPM, PNG and JPEG images from Create an instance of a color image where each pixel is coded in RGBa. \snippet tutorial-viewer.cpp vpImage reading -The image \c I is initialized by reading an image file from the disk. If the image format is not supported we throw an exception. +The image \c I is initialized by reading an image file from the disk. If the image format is not supported we throw an +exception. \snippet tutorial-viewer.cpp vpDisplay construction -Create an instance of an image display window for image \c I. The first viewer that is available is used. Here we create the link between the image \c I and the display \c d. Note that an image can only have one display. +Create an instance of an image display window for image \c I. The first viewer that is available is used. Here we +create the link between the image \c I and the display \c d. Note that an image can only have one display. \snippet tutorial-viewer.cpp vpDisplay set title The title of the display is then set to \c "My image". @@ -57,7 +62,8 @@ Here we handle mouse events. We are waiting for a blocking mouse click to end th \section display_overlay_draw Display basic drawings in window overlay -There are a lot of examples in ViSP that show how to display drawings in window overlay. There is testDisplays.cpp that gives an overview. +There are a lot of examples in ViSP that show how to display drawings in window overlay. There is testDisplays.cpp +that gives an overview. If you run the corresponding binary: \code @@ -69,7 +75,9 @@ it will open a window like the following: \subsection display_overlay_point Display a point in overlay -As shown in tutorial-draw-point.cpp which source code is given below we use vpDisplay::displayPoint() function to draw a point in the overlay of a windows that displays a 3840 by 2160 grey image that has all the pixels set to 128 gray level. +As shown in tutorial-draw-point.cpp which source code is given below we use vpDisplay::displayPoint() function to draw +a point in the overlay of a windows that displays a 3840 by 2160 grey image that has all the pixels set to 128 gray +level. \include tutorial-draw-point.cpp @@ -85,7 +93,8 @@ Here we draw a red coloured line segment with the specified initial and final co \subsection display_overlay_circle Display a circle in overlay -As given in tutorial-image-display-scaled-auto.cpp we use vpDisplay::displayCircle() function to draw a circle on the screen. +As given in tutorial-image-display-scaled-auto.cpp we use vpDisplay::displayCircle() function to draw a circle on the +screen. \snippet tutorial-image-display-scaled-auto.cpp Circle @@ -116,13 +125,15 @@ Here `Hello world` is displayed in the middle of the image. \section display_overlay_export Export and save the content of a window as an image -As given in tutorial-export-image.cpp which source code is given below, we use vpDisplay::getImage() function to export the image with the whole drawings in overlay. Then we use vpImageIo::write() to save the image in png format. +As given in tutorial-export-image.cpp which source code is given below, we use vpDisplay::getImage() function to export +the image with the whole drawings in overlay. Then we use vpImageIo::write() to save the image in png format. \include tutorial-export-image.cpp \section display_overlay_event_keyboard Handle keyboard events in a window -As given in tutorial-event-keyboard.cpp which code is given below, we use vpDisplay::getKeyboardEvent() function to get the value of the key pressed. +As given in tutorial-event-keyboard.cpp which code is given below, we use vpDisplay::getKeyboardEvent() function to get +the value of the key pressed. \include tutorial-event-keyboard.cpp diff --git a/doc/tutorial/image/tutorial-image-display.dox b/doc/tutorial/image/tutorial-image-display.dox index ff4770efbd..966b86be08 100644 --- a/doc/tutorial/image/tutorial-image-display.dox +++ b/doc/tutorial/image/tutorial-image-display.dox @@ -5,36 +5,45 @@ \section image_display_intro Introduction -\note We assume in this tutorial that you have successfully build your first project using ViSP as 3rd party as explained in one of the \ref tutorial_started tutorials. +\note We assume in this tutorial that you have successfully build your first project using ViSP as 3rd party as +explained in one of the \ref tutorial_started tutorials. -In this tutorial you will learn how to display an image in a window with ViSP either on Unix-like systems (including OSX, Fedora, Ubuntu, Debian, ...) or on Windows. +In this tutorial you will learn how to display an image in a window with ViSP either on Unix-like systems (including +OSX, Fedora, Ubuntu, Debian, ...) or on Windows. -Note that all the material (source code and images) described in this tutorial is part of ViSP source code and could be downloaded using the following command: - -\code -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/image -\endcode +Note that all the material (source code and images) described in this tutorial is part of ViSP source code +(in `tutorial/image` folder) and could be found in https://github.com/lagadic/visp/tree/master/tutorial/image. \section image_display_default Create and display an image in a window -ViSP gui module provides Graphical User Interfaces capabilities that allows to display a vpImage in a window. To this end you may use several optional third-party libraries which are: X11, GDI, OpenCV, GTK, Direct3D. We recommend to use X11 on unix-like systems thanks to vpDisplayX class and GDI on Windows thanks to vpDisplayGDI. If none of these classes are available, you may use vpDisplayOpenCV instead. +ViSP gui module provides Graphical User Interfaces capabilities that allows to +display a vpImage in a window. To this end you may use several optional third-party libraries which are: +X11, GDI, OpenCV, GTK, Direct3D. We recommend to use X11 on unix-like +systems thanks to vpDisplayX class and GDI on Windows thanks to vpDisplayGDI. If none of these classes are available, +you may use vpDisplayOpenCV instead. -The following example also available in tutorial-image-display.cpp shows how to create a gray level 3840x2160 image with all the pixels set to 128, and display a red circle with 200 pixel radius in the middle of the image. +The following example also available in tutorial-image-display.cpp shows how to create a gray level 3840x2160 image +with all the pixels set to 128, and display a red circle with 200 pixel radius in the middle of the image. \include tutorial-image-display.cpp -Depending on your screen resolution you may just see a part of the image, and certainly not the full red circle. Next image shows an example of this behavior when screen resolution is less than image size: +Depending on your screen resolution you may just see a part of the image, and certainly not the full red circle. Next +image shows an example of this behavior when screen resolution is less than image size: \image html img-tutorial-display.png -\note A vpImage can only be associated to one display window. In the previous example, image `I` is associated to display `d`. Depending on your platform, object `d` is either a vpDisplayX or a vpDisplayGDI. +\note A vpImage can only be associated to one display window. In the previous example, image `I` is associated to +display `d`. Depending on your platform, object `d` is either a vpDisplayX or a vpDisplayGDI. \section image_display_scaled Display an image that is larger than the screen resolution \subsection image_display_scaled_manu Setting a manual down scaling factor -This other example available in tutorial-image-display-scaled-manu.cpp shows how to modify the previous example in order to introduce a down scaling factor to reduce the size of the display by 5 along the lines and the columns. This feature may be useful to display images that are larger than the screen resolution. +This other example available in tutorial-image-display-scaled-manu.cpp shows how to modify the previous example in +order to introduce a down scaling factor to reduce the size of the display by 5 along the lines and the columns. This +feature may be useful to display images that are larger than the screen resolution. -To down scale the display size, just modify the previous example adding the vpDisplay::vpScaleType parameter to the constructor. +To down scale the display size, just modify the previous example adding the vpDisplay::vpScaleType parameter to the +constructor. \snippet tutorial-image-display-scaled-manu.cpp vpDisplay scale manu @@ -52,9 +61,12 @@ It is also possible to do the same using the default constructor: \subsection image_display_scaled_auto Setting an auto down scaling factor -This other example available in tutorial-image-display-scaled-auto.cpp shows now how to modify the previous example in order to introduce an auto down scaling factor that is automatically computed from the screen resolution in order that two images could be displayed given the screen resolution. +This other example available in tutorial-image-display-scaled-auto.cpp shows now how to modify the previous example in +order to introduce an auto down scaling factor that is automatically computed from the screen resolution in order that +two images could be displayed given the screen resolution. -To consider an auto down scaling factor, modify the previous example adding the vpDisplay::SCALE_AUTO parameter to the constructor. +To consider an auto down scaling factor, modify the previous example adding the vpDisplay::SCALE_AUTO parameter to the +constructor. \snippet tutorial-image-display-scaled-auto.cpp vpDisplay scale auto diff --git a/doc/tutorial/image/tutorial-image-filtering.dox b/doc/tutorial/image/tutorial-image-filtering.dox index 6db477e912..dbf484716f 100644 --- a/doc/tutorial/image/tutorial-image-filtering.dox +++ b/doc/tutorial/image/tutorial-image-filtering.dox @@ -9,17 +9,15 @@ This tutorial supposes that you have followed the \ref tutorial-getting-started. In this tutorial you will learn how to use ViSP filtering functions implemented in vpImageFilter class. -All the material (source code and images) described in this tutorial is part of ViSP source code and could be downloaded using the following command: - -\code -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/image -\endcode +Note that all the material (source code and images) described in this tutorial is part of ViSP source code +(in `tutorial/image` folder) and could be found in https://github.com/lagadic/visp/tree/master/tutorial/image. Let us consider the following source code that comes from tutorial-image-filter.cpp. \include tutorial-image-filter.cpp -Once build, you should have \c tutorial-image-filter binary. It shows how to apply different filters on an input image. Here we will consider monkey.pgm as input image. +Once build, you should have \c tutorial-image-filter binary. It shows how to apply different filters on an input image. +Here we will consider monkey.pgm as input image. \image html img-monkey-gray.png @@ -37,7 +35,8 @@ Monkey input image is read from disk and is stored in \c I which is a gray leve \snippet tutorial-image-filter.cpp vpImage construction -To apply a Gaussian blur to this image we first have to declare a resulting floating-point image \c F. Then the blurred image could be obtained using the default Gaussian filter: +To apply a Gaussian blur to this image we first have to declare a resulting floating-point image \c F. Then the blurred +image could be obtained using the default Gaussian filter: \snippet tutorial-image-filter.cpp Gaussian blur @@ -79,8 +78,9 @@ After the declaration of a new image container \c C, Canny edge detector is appl Where: - 5: is the size of the Gaussian kernel used to blur the image before applying the Canny edge detector. -- -1.: is the upper threshold set in the program. Setting it to a negative value asks ViSP to compute automatically the lower and upper thresholds. Otherwise, -the lower threshold is set to be equal to one third of the upper threshold, following Canny’s recommendation. +- -1.: is the upper threshold set in the program. Setting it to a negative value asks ViSP to compute automatically the + lower and upper thresholds. Otherwise, the lower threshold is set to be equal to one third of the upper threshold, + following Canny’s recommendation. - 3: is the size of the Sobel kernel used internally. The resulting image \c C is the following: diff --git a/doc/tutorial/image/tutorial-simu-image.dox b/doc/tutorial/image/tutorial-simu-image.dox index a6df4be117..d9fb79d541 100644 --- a/doc/tutorial/image/tutorial-simu-image.dox +++ b/doc/tutorial/image/tutorial-simu-image.dox @@ -5,19 +5,19 @@ \section simu_image_intro Introduction -The aim of this tutorial is to explain how to use vpImageSimulator class to project an image of a planar scene at a given camera position. For example, this capability can then be used during the simulation of a visual-servo as described in \ref tutorial-ibvs to introduce an image processing. +The aim of this tutorial is to explain how to use vpImageSimulator class to project an image of a planar scene at a +given camera position. For example, this capability can then be used during the simulation of a visual-servo as +described in \ref tutorial-ibvs to introduce an image processing. -All the material (source code and images) described in this tutorial is part of ViSP source code and could be downloaded using the following command: - -\code -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/simulator/image -\endcode +Note that all the material (source code and images) described in this tutorial is part of ViSP source code +(in `simulator/image` folder) and could be found in https://github.com/lagadic/visp/tree/master/simulator/image. \section simu_image_projection Image projection -Given the image of a planar 20cm by 20cm square target as the one presented in the next image, we show here after how to project this image at a given camera position, and how to get the resulting image. +Given the image of a planar 20cm by 20cm square target as the one presented in the next image, we show here after how +to project this image at a given camera position, and how to get the resulting image. -\image html img-target-square.png Image of a planar 20cm by 20cm square target. +\image html img-target-square.png Image of a planar 20cm by 20cm square target. This is done by the following code also available in tutorial-image-simulator.cpp: \include tutorial-image-simulator.cpp @@ -31,10 +31,12 @@ The provide hereafter the explanation of the new lines that were introduced. \snippet tutorial-image-simulator.cpp Include Include the header of the vpImageSimulator class that allows to project an image to a given camera position. -Then in the main() function we create an instance of a gray level image that corresponds to the image of the planar target, and then we read the image from the disk. +Then in the main() function we create an instance of a gray level image that corresponds to the image of the planar +target, and then we read the image from the disk. \snippet tutorial-image-simulator.cpp Read image -Since the previous image corresponds to a 20cm by 20cm target, we initialize the 3D coordinates of each corner in the plane Z=0. Each +Since the previous image corresponds to a 20cm by 20cm target, we initialize the 3D coordinates of each corner in the +plane Z=0. Each \snippet tutorial-image-simulator.cpp Set model Then we create an instance of the image \c I that will contain the rendered image from a given camera position. @@ -43,20 +45,25 @@ Then we create an instance of the image \c I that will contain the rendered imag Since the projection depends on the camera, we set its intrinsic parameters. \snippet tutorial-image-simulator.cpp Camera parameters -We also set the render position of the camera as an homogeneous transformation between the camera frame and the target frame. +We also set the render position of the camera as an homogeneous transformation between the camera frame and the target +frame. \snippet tutorial-image-simulator.cpp Set cMo -We create here an instance of the planar image projector, set the interpolation to bilinear and initialize the projector with the image of the target and the coordinates of its corners. +We create here an instance of the planar image projector, set the interpolation to bilinear and initialize the +projector with the image of the target and the coordinates of its corners. \snippet tutorial-image-simulator.cpp Create simulator -Now to retrieve the rendered image we first clean the content of the image to render, set the camera position, and finally get the image using the camera parameters. +Now to retrieve the rendered image we first clean the content of the image to render, set the camera position, and +finally get the image using the camera parameters. \snippet tutorial-image-simulator.cpp Render image -Then, if \c libjpeg is available, the rendered image is saved in the same directory then the executable. +Then, if \c libjpeg is available, the rendered image is saved in the same directory then the executable. \snippet tutorial-image-simulator.cpp Write image Finally, as in \ref tutorial-getting-started we open a window to display the rendered image. -Note that this planar image projection capability has been also introduced in vpVirtualGrabber class exploited in tutorial-ibvs-4pts-image-tracking.cpp. Thus the next \ref tutorial-ibvs shows how to use it in order to introduce an image processing that does the tracking of the target during a visual-servo simulation. +Note that this planar image projection capability has been also introduced in vpVirtualGrabber class exploited in +tutorial-ibvs-4pts-image-tracking.cpp. Thus the next \ref tutorial-ibvs shows how to use it in order to introduce an +image processing that does the tracking of the target during a visual-servo simulation. */ diff --git a/doc/tutorial/image/tutorial-video-manipulation.dox b/doc/tutorial/image/tutorial-video-manipulation.dox index cadacfaaa7..13844fb751 100644 --- a/doc/tutorial/image/tutorial-video-manipulation.dox +++ b/doc/tutorial/image/tutorial-video-manipulation.dox @@ -4,13 +4,11 @@ \section img_manip_seq_intro Introduction -In this tutorial you will learn how to manipulate a video or a sequence of successives images in order to rename the images, convert images format, or select some images that will constitute a dataset typically for deep learning purpose. +In this tutorial you will learn how to manipulate a video or a sequence of successives images in order to rename the +images, convert images format, or select some images that will constitute a dataset typically for deep learning purpose. -Note that all the material (source code and images) described in this tutorial is part of ViSP source code and could be downloaded using the following command: - -\code -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/image -\endcode +Note that all the material (source code and images) described in this tutorial is part of ViSP source code +(in `tutorial/image` folder) and could be found in https://github.com/lagadic/visp/tree/master/tutorial/image. \section img_manip_seq_prereq Prerequisites @@ -87,7 +85,8 @@ $ cd $VISP_WS/visp-build/tutorial/image $ ./tutorial-video-manipulation --in ${VISP_INPUT_IMAGE_PATH}/video/cube.mpeg \endverbatim -- To visualize a sequence of successive images, like the one acquired in \ref img_manip_seq_create section, you may rather run: +- To visualize a sequence of successive images, like the one acquired in \ref img_manip_seq_create section, you may + rather run: \verbatim $ cd $VISP_WS/visp-build/tutorial/image ./tutorial-video-manipulation --in /tmp/myseq/png/I%04d.png @@ -177,24 +176,26 @@ image-0120.jpeg Moreover, there is also an other extra option `--out-gray` that allows to save output images in Y8 gray level images. -- For example, considering that `/tmp/myseq/png/I%04d.png` input images are color images, if you want to convert them in Y8 gray, you may run: +- For example, considering that `/tmp/myseq/png/I%04d.png` input images are color images, if you want to convert them + in Y8 gray, you may run: \verbatim $ ./tutorial-video-manipulation --in /tmp/myseq/png/I%04d.png --out /tmp/myseq/gray-jpeg/gray-image-%04d.jpg --out-gray \endverbatim -- Finally, there is also the `--out-stride` option that allows to keep one image over n in the resulting output video. - For example, if your input image sequence has 40 images and you want to create a new image sequence temporally subsampled - with only 20 images, you can use this option like: +- Finally, there is also the `--out-stride` option that allows to keep one image over n in the resulting output video. + For example, if your input image sequence has 40 images and you want to create a new image sequence temporally + subsampled with only 20 images, you can use this option like: \verbatim $ ./tutorial-video-manipulation --in /tmp/myseq/png/I%04d.png --out /tmp/myseq/png-stride-2/I%04d.png --out-stride 2 \endverbatim - - + + \subsection img_manip_seq_extract Images extraction from a video The tutorial tutorial-image-manipulation.cpp allows also to extract images from a video file. -- For example to extract the images from an `mpeg` video part of ViSP data set and create a sequence of successive images, you may run: +- For example to extract the images from an `mpeg` video part of ViSP data set and create a sequence of successive + images, you may run: \verbatim $ ./tutorial-video-manipulation --in ${VISP_INPUT_IMAGE_PATH}/video/cube.mpeg --out /tmp/cube/jpeg/image-%04d.jpeg \endverbatim @@ -211,7 +212,7 @@ image-0008.jpeg image-0009.jpeg image-0010.jpeg ... -\endverbatim +\endverbatim - You can then replay the image sequence using: \verbatim ./tutorial-video-manipulation --in /tmp/cube/jpeg/image-%04d.jpeg @@ -219,15 +220,17 @@ image-0010.jpeg \subsection img_manip_seq_select Images selection to create a new video or sequence -The tutorial tutorial-image-manipulation.cpp allows also to extract some images selected by the user during visualisation by user click. This feature could be useful to extract the images that will be part of a deep leaning data set. +The tutorial tutorial-image-manipulation.cpp allows also to extract some images selected by the user during +visualisation by user click. This feature could be useful to extract the images that will be part of a deep +leaning data set. - To create a new video from selected images you may add `--select` option, like: \verbatim $ ./tutorial-video-manipulation --in ${VISP_INPUT_IMAGE_PATH}/video/cube.mpeg --out /tmp/cube/jpeg/image-%04d.jpeg --select \endverbatim -- Here if the user click four times in the video, you will get a new sequence with 4 successive images +- Here if the user click four times in the video, you will get a new sequence with 4 successive images \verbatim -$ ls -1 /tmp/cube/jpeg +$ ls -1 /tmp/cube/jpeg image-0001.jpeg image-0002.jpeg image-0003.jpeg diff --git a/doc/tutorial/imgproc/tutorial-imgproc-brightness.dox b/doc/tutorial/imgproc/tutorial-imgproc-brightness.dox index 1797cf6fb1..eafd02e649 100644 --- a/doc/tutorial/imgproc/tutorial-imgproc-brightness.dox +++ b/doc/tutorial/imgproc/tutorial-imgproc-brightness.dox @@ -49,6 +49,13 @@ The result image is the following: \image html img-tutorial-brighness-gamma-correction-3.5.png "Left: underexposed image - Right: image corrected with gamma=3.5" +ViSP proposes the implementation of several automatic computation of the gamma factor. +Most of these methods are designed for gray-shade images, so ViSP proposes different way +of handling the colors. + +You can test the different methods using the `--gamma-method` option of the tutorial program +and the different way of handling the colors using the `--gamma-color-handling` option. + \section imgproc_brightness_histogram_equalization Histogram equalization Histogram equalization is an image processing method that will adjust the contrast of an image by stretching or shrinking the intensity distribution in order to have a linear cumulative histogram distribution. diff --git a/doc/tutorial/ios/tutorial-detection-apriltag-ios-realtime.dox b/doc/tutorial/ios/tutorial-detection-apriltag-ios-realtime.dox index 5f2d854986..a675381106 100644 --- a/doc/tutorial/ios/tutorial-detection-apriltag-ios-realtime.dox +++ b/doc/tutorial/ios/tutorial-detection-apriltag-ios-realtime.dox @@ -9,11 +9,9 @@ This tutorial follows \ref tutorial-detection-apriltag-ios and shows how to dete In this tutorial, you will be able to learn how to detect with Swift 4 and get camera intrinsic parameters. -All the material (Xcode project) described in this tutorial is part of ViSP source code and could be downloaded using the following command: - -\verbatim -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/ios/AprilTagLiveCamera -\endverbatim +Note that all the material (Xcode project) described in this tutorial is part of ViSP source code +(in `tutorial/ios/AprilTagLiveCamera` folder) and could be found in +https://github.com/lagadic/visp/tree/master/tutorial/ios/AprilTagLiveCamera. Once downloaded, you have just to drag & drop ViSP and OpenCV frameworks available following \ref tutorial-install-ios-package. @@ -35,18 +33,21 @@ The camera’s intrinsic parameters can be acquired from each captured image by Note: intrinsic parameters are only supported on some iOS devices with iOS11. -The intrinsic parameters that represent camera features can generally be represented by a matrix of pixel-based focal lengths and principal points (axis centers) in the image. -The documentation for Swift is [here](https://developer.apple.com/documentation/avfoundation/avcameracalibrationdata/2881135-intrinsicmatrix). +The intrinsic parameters that represent camera features can generally be represented by a matrix of pixel-based focal lengths and principal points (axis centers) in the image. +The documentation for Swift is [here](https://developer.apple.com/documentation/avfoundation/avcameracalibrationdata/2881135-intrinsicmatrix). Since the principal point almost coincides with the image center, this tutorial uses only the focal length. \section call_objectivec_swift Call Objective-C class from Swift -Let us consider the Xcode project named `AprilTagLiveCamera` that is part of ViSP source code and located in `$VISP_WS/tutorial/ios/AprilTagLiveCamera`. +Let us consider the Xcode project named `AprilTagLiveCamera` that is part of ViSP source code and located in `$VISP_WS/tutorial/ios/AprilTagLiveCamera`. To open this application, if you followed \ref tutorial-install-ios-package simply run: \verbatim $ cd $HOME/framework -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/ios/AprilTagLiveCamera +\endverbatim +Donwload the content of https://github.com/lagadic/visp/tree/master/tutorial/ios/AprilTagLiveCamera +anr run +\verbatim $ open AprilTagLiveCamera -a Xcode \endverbatim @@ -73,8 +74,8 @@ and in `ViewController.swift` you have the following code: Detection and drawing processing is processed in `VispDetector.mm`. For details on the detection process, see \ref tutorial-detection-apriltag and on how to draw the pose of a tag, see the previous \ref tutorial-detection-apriltag-ios. -The distance from the iOS device to the marker can be accurately detected if the tag size is properly set. -The distance can be obtained using the getTranslationVector() method from the homogeneous transformation matrix (`cMo_vec`) representing the pose with rotation (R) and position (t) of the marker in camera coordinates. +The distance from the iOS device to the marker can be accurately detected if the tag size is properly set. +The distance can be obtained using the getTranslationVector() method from the homogeneous transformation matrix (`cMo_vec`) representing the pose with rotation (R) and position (t) of the marker in camera coordinates. See here for more information vpHomogeneousMatrix class This is achieved in `VispDetector.mm`: diff --git a/doc/tutorial/ios/tutorial-detection-apriltag-ios.dox b/doc/tutorial/ios/tutorial-detection-apriltag-ios.dox index 6c49239ee3..eee437ecac 100644 --- a/doc/tutorial/ios/tutorial-detection-apriltag-ios.dox +++ b/doc/tutorial/ios/tutorial-detection-apriltag-ios.dox @@ -9,11 +9,9 @@ This tutorial follows the \ref tutorial-detection-apriltag and shows how AprilTa In the next section you will find an example that show how to detect tags in a single image. To know how to print an AprilTag marker, see \ref apriltag_detection_print. -Note that all the material (Xcode project and image) described in this tutorial is part of ViSP source code and could be downloaded using the following command: - -\verbatim -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/ios/StartedAprilTag -\endverbatim +Note that all the material (Xcode project and image) described in this tutorial is part of ViSP source code +(in `tutorial/ios/StartedAprilTag` folder) and could be found in +https://github.com/lagadic/visp/tree/master/tutorial/ios/StartedAprilTag. \section apriltag_detection_basic_ios AprilTag detection and pose estimation (single image) @@ -22,7 +20,10 @@ Let us consider the Xcode project named `StartedAprilTag` that is part of ViSP s To open this application, if you followed \ref tutorial-install-ios-package simply run: \verbatim $ cd $HOME/framework -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/ios/StartedAprilTag +\endverbatim +download the content of https://github.com/lagadic/visp/tree/master/tutorial/ios/StartedAprilTag +and run +\verbatim $ open StartedAprilTag -a Xcode \endverbatim @@ -48,19 +49,19 @@ The Xcode project `StartedAprilTag` contains `ImageDisplay.h` and `ImageDisplay. \subsection apriltag_detection_display_line Display a line -The following function implemented in `ImageDisplay.mm` show how to display a line. +The following function implemented in `ImageDisplay.mm` show how to display a line. \snippet StartedAprilTag/ImageDisplay.mm display line \subsection apriltag_detection_display_frame Display a 3D frame -The following function implemented in `ImageDisplay.mm` show how to display a 3D frame; red line for x, green for y and blue for z axis. +The following function implemented in `ImageDisplay.mm` show how to display a 3D frame; red line for x, green for y and blue for z axis. \snippet StartedAprilTag/ImageDisplay.mm display frame \section apriltag_detection_basic_output Application output -- Now we are ready to build `StartedAprilTag` application using Xcode `"Product > Build"` menu. +- Now we are ready to build `StartedAprilTag` application using Xcode `"Product > Build"` menu. - Once build, if you run `StartedAprilTag` application on your device, you should be able to see the following screen shot: \image html img-detection-apriltag-ios-output.jpg diff --git a/doc/tutorial/ios/tutorial-getting-started-iOS.dox b/doc/tutorial/ios/tutorial-getting-started-iOS.dox index cfc45b8b25..00ecd8756f 100644 --- a/doc/tutorial/ios/tutorial-getting-started-iOS.dox +++ b/doc/tutorial/ios/tutorial-getting-started-iOS.dox @@ -1,21 +1,22 @@ /** - \page tutorial-getting-started-iOS Tutorial: How to create a basic iOS application that uses ViSP + \page tutorial-getting-started-iOS Tutorial: How to create a basic iOS application that uses ViSP \tableofcontents - \note We assume that you have `"ViSP for iOS"` either after following \ref tutorial-install-ios-package or \ref tutorial-install-iOS. Following one of these tutorials allows to exploit `visp3.framework` and `opencv2.framework` to build an application for iOS devices. + \note We assume that you have `"ViSP for iOS"` either after following \ref tutorial-install-ios-package or + \ref tutorial-install-iOS. Following one of these tutorials allows to exploit `visp3.framework` and + `opencv2.framework` to build an application for iOS devices. -In this tutorial we suppose that you install `visp3.framework` in a folder named `/ios`. If `` corresponds to `$HOME/framework`, you should get the following: +In this tutorial we suppose that you install `visp3.framework` in a folder named `/ios`. If +`` corresponds to `$HOME/framework`, you should get the following: \verbatim $ ls $HOME/framework/ios opencv2.framework visp3.framework \endverbatim -Note also that all the material (source code and Xcode project) described in this tutorial is part of ViSP source code and could be downloaded using the following command: - -\verbatim -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/ios/GettingStarted -\endverbatim +Note that all the material (source code and Xcode project) described in this tutorial is part of ViSP source code +(in `tutorial/ios/GettingStarted` folder) and could be found in +https://github.com/lagadic/visp/tree/master/tutorial/ios/GettingStarted. \section getting-started-iOS-create Create a new Xcode project @@ -28,11 +29,12 @@ $ svn export https://github.com/lagadic/visp.git/trunk/tutorial/ios/GettingStart \image html img-getting-started-iOS-options.jpg -- Click on `"Next"` button and select the folder where the new project will be saved. Once done click on `"Create"`. Now you should have something similar to: +- Click on `"Next"` button and select the folder where the new project will be saved. Once done click on `"Create"`. +Now you should have something similar to: \image html img-getting-started-iOS-new.jpg -\section getting-started-iOS-link-visp Linking ViSP framework +\section getting-started-iOS-link-visp Linking ViSP framework Now we need to link `visp3.framework` with the Xcode project. @@ -40,10 +42,12 @@ Now we need to link `visp3.framework` with the Xcode project. \image html img-getting-started-iOS-navigator.jpg -- Use the Finder to drag & drop ViSP and OpenCV frameworks located in `/ios` folder in the left hand panel containing all the project files. +- Use the Finder to drag & drop ViSP and OpenCV frameworks located in `/ios` folder in the left hand +panel containing all the project files. \image html img-getting-started-iOS-drag-drop.jpg -- In the dialog box, enable check box `"Copy item if needed"` to ease `visp3.framework` and `opencv2.framework` headers location addition to the build options +- In the dialog box, enable check box `"Copy item if needed"` to ease `visp3.framework` and `opencv2.framework` +headers location addition to the build options \image html img-getting-started-iOS-drag-drop-dialog.jpg @@ -55,19 +59,24 @@ Now we need to link `visp3.framework` with the Xcode project. - Because we will mix Objective-C and ViSP C++ Code, rename `ViewController.m` file into `ViewController.mm` \image html img-getting-started-iOS-rename.jpg -- Now copy/paste `$VISP_WS/visp/tutorial/ios/GettingStarted/GettingStarted/ViewController.mm` file content into `ViewController.mm`. Note that this Objective-C code is inspired from tutorial-homography-from-points.cpp. +- Now copy/paste `$VISP_WS/visp/tutorial/ios/GettingStarted/GettingStarted/ViewController.mm` file content into +`ViewController.mm`. Note that this Objective-C code is inspired from tutorial-homography-from-points.cpp. \include GettingStarted/ViewController.mm -In this sample, we first import the headers to use vpHomography class. Then we create a new function called \c processViSPHomography(). This function is finally called in `viewDibLoad()`. +In this sample, we first import the headers to use vpHomography class. Then we create a new function called +\c processViSPHomography(). This function is finally called in `viewDibLoad()`. - After the previous copy/paste, you should have something similar to \image html img-getting-started-iOS-code.jpg - Now we are ready to build this simple `"Getting Started"` application using Xcode `"Product > Build"` menu. -\note Here it may be possible that you get a build issue \ref getting-started-ios-issue-libxml. Just follow the link to see how to fix this issue. +\note Here it may be possible that you get a build issue \ref getting-started-ios-issue-libxml. Just follow the link +to see how to fix this issue. -- You can now run your code using `"Product > Run"` menu (Simulator or device does not bother because we are just executing code). You should obtain these logs showing that visp code was correctly executed by your iOS project. +- You can now run your code using `"Product > Run"` menu (Simulator or device does not bother because we are just +executing code). You should obtain these logs showing that visp code was correctly executed by your iOS project. \image html img-getting-started-iOS-log.jpg -- if you don't see the output (1) presented in the red rectangle in the previous image, you may click on icon (2) to display `All Output`. +- if you don't see the output (1) presented in the red rectangle in the previous image, you may click on icon (2) to +display `All Output`. \section getting-started-ios-issues Known issues \subsection getting-started-ios-issue-libxml iOS error: libxml/parser.h not found diff --git a/doc/tutorial/ios/tutorial-image-ios.dox b/doc/tutorial/ios/tutorial-image-ios.dox index 30b4df2f8e..21dcf37ba2 100644 --- a/doc/tutorial/ios/tutorial-image-ios.dox +++ b/doc/tutorial/ios/tutorial-image-ios.dox @@ -7,24 +7,35 @@ This tutorial supposes that you have followed the \ref tutorial-getting-started- \section image_ios_intro Introduction -In this tutorial you will learn how to do simple image processing on iOS devices with ViSP. This application loads a color image (monkey.png) and allows the user to visualize either this image in grey level, either the image gradients, or either canny edges on iOS simulator or devices. +In this tutorial you will learn how to do simple image processing on iOS devices with ViSP. This application loads a +color image +(monkey.png) +and allows the user to visualize either this image in grey level, either the image gradients, or either canny edges on +iOS simulator or devices. -In ViSP images are carried out using vpImage class. However in iOS, image rendering has to be done using UIImage class that is part of the Core Graphics framework available in iOS. In this tutorial we provide the functions that allow to convert a vpImage to an UIImage and \e vice \e versa. +In ViSP images are carried out using vpImage class. However in iOS, image rendering has to be done using UIImage class +that is part of the Core Graphics framework available in iOS. In this tutorial we provide the functions that allow to +convert a vpImage to an UIImage and \e vice \e versa. -Note that all the material (source code and image) used in this tutorial is part of ViSP source code and could be downloaded using the following command: - -\verbatim -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/ios/StartedImageProc -\endverbatim +Note that all the material (source code and image) described in this tutorial is part of ViSP source code +(in `tutorial/ios/StartedImageProc` folder) and could be found in +https://github.com/lagadic/visp/tree/master/tutorial/ios/StartedImageProc. \section image_ios_app StartedImageProc application -Let us consider the Xcode project named `StartedImageProc` that is part of ViSP source code and located in `$VISP_WS/tutorial/ios/StartedImageProc`. This project is a Xcode `"Single view application"` where we renamed `ViewController.m` into `ViewController.mm`, introduced minor modifications in `ViewController.h` and add monkey.png image. +Let us consider the Xcode project named `StartedImageProc` that is part of ViSP source code and located in +`$VISP_WS/tutorial/ios/StartedImageProc`. This project is a Xcode `"Single view application"` where we renamed +`ViewController.m` into `ViewController.mm`, introduced minor modifications in `ViewController.h` and add +monkey.png +image. To open this application, if you followed \ref tutorial-install-ios-package simply run: \verbatim $ cd $HOME/framework -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/ios/StartedImageProc +\endverbatim +download the content of https://github.com/lagadic/visp/tree/master/tutorial/ios/StartedImageProc +and run +\verbatim $ open StartedImageProc -a Xcode \endverbatim @@ -36,32 +47,38 @@ $ open ~/framework/visp/tutorial/ios/StartedImageProc -a Xcode Here you should see something similar to: \image html img-started-imgproc-ios.jpg -Once opened, you have just to drag & drop ViSP and OpenCV frameworks available in `$HOME/framework/ios` if you followed \ref tutorial-install-ios-package. +Once opened, you have just to drag & drop ViSP and OpenCV frameworks available in `$HOME/framework/ios` if you +followed \ref tutorial-install-ios-package. \image html img-started-imgproc-ios-drag-drop.jpg -In the dialog box, enable check box `"Copy item if needed"` to add `visp3.framework` and `opencv2.framework` to the project. +In the dialog box, enable check box `"Copy item if needed"` to add `visp3.framework` and `opencv2.framework` to the +project. \image html img-started-imgproc-ios-drag-drop-dialog.jpg Now you should be able to build and run your application. \section image_ios_convert Image conversion functions -The Xcode project `StartedImageProc` contains `ImageConversion.h` and `ImageConversion.mm` files that implement the functions to convert UIImage to ViSP vpImage and vice versa. +The Xcode project `StartedImageProc` contains `ImageConversion.h` and `ImageConversion.mm` files that implement the +functions to convert UIImage to ViSP vpImage and vice versa. \subsection image_ios_convert_uiimage_vpimage_color UIImage to color vpImage -The following function implemented in \c ImageConversion.mm show how to convert an `UIImage` into a `vpImage` instantiated as a color image. +The following function implemented in \c ImageConversion.mm show how to convert an `UIImage` into a `vpImage` +instantiated as a color image. \snippet tutorial/ios/StartedImageProc/StartedImageProc/ImageConversion.mm vpImageColorFromUIImage \subsection image_ios_convert_uiimage_vpimage_gray UIImage to gray vpImage -The following function implemented in `ImageConversion.mm` show how to convert an `UIImage` into a `vpImage` instantiated as a grey level image. +The following function implemented in `ImageConversion.mm` show how to convert an `UIImage` into a +`vpImage` instantiated as a grey level image. \snippet tutorial/ios/StartedImageProc/StartedImageProc/ImageConversion.mm vpImageGrayFromUIImage \subsection image_ios_convert_vpimage_color_uiimage Color vpImage to UIImage -The following function implemented in `ImageConversion.mm` show how to convert a gray level `vpImage` into an UIImage. +The following function implemented in `ImageConversion.mm` show how to convert a gray level `vpImage` +into an UIImage. \snippet tutorial/ios/StartedImageProc/StartedImageProc/ImageConversion.mm UIImageFromVpImageColor @@ -74,9 +91,11 @@ The following function implemented in `ImageConversion.mm` show how to convert a \section image_ios_output Application output - Now we are ready to build `"StartedImageProc"` application using Xcode `"Product > Build"` menu. -\note Here it may be possible that you get a build issue \ref image_ios-issue-libxml. Just follow the link to see how to fix this issue. +\note Here it may be possible that you get a build issue \ref image_ios-issue-libxml. Just follow the link to see how +to fix this issue. -- Once build, if you run `StartedImageProc` application on your device, you should be able to see the following screen shots. +- Once build, if you run `StartedImageProc` application on your device, you should be able to see the following screen + shots. - Pressing `"load image"` button gives the following result: \image html img-started-imgproc-ios-output-color.jpg diff --git a/doc/tutorial/misc/tutorial-multi-threading.dox b/doc/tutorial/misc/tutorial-multi-threading.dox deleted file mode 100644 index 39c6d7649e..0000000000 --- a/doc/tutorial/misc/tutorial-multi-threading.dox +++ /dev/null @@ -1,208 +0,0 @@ -/** - \page tutorial-multi-threading Tutorial: How to use multi-threading capabilities - \tableofcontents - - -\section multi-threading-into Introduction - -After ViSP 3.0.0, we introduce a new cross-platform vpThread class that allows to execute a function in a separate thread. We also improve vpMutex class useful to protect shared data by mutexes to be cross-platform. - -The vpThread and vpMutex classes are wrappers over native pthread functionality when pthread is available. This is the case for all unix-like OS, including OSX and MinGW under Windows. If pthread is not available, we use Windows native functionality instead. - -\subsection multi-threading-into-thread Threading overview - -To use vpThread class you have first to include the corresponding header. -\code -#include -\endcode - -With vpThread the prototype of the function vpThread::Fn that could be executed in a separate thread is the following: -\code -vpThread::Return myFooFunction(vpThread::Args args) -\endcode -where arguments passed to the function are of type vpThread::Args. This function should return a vpThread::Return type. - -Then to create the thread that executes this function, you have just to construct a vpThread object indicating which is the function to execute. -- If you don't want to pass arguments to the function, just do like: -\code -vpThread foo((vpThread::Fn)myFooFunction); -\endcode - -- If you want to pass some arguments to the function, do rather like: -\code -int foo_arg = 3; -vpThread foo((vpThread::Fn)myFooFunction, (vpThread::Args)&foo_arg); -\endcode -This argument could then be exploited in myFooFunction() -\code -vpThread::Return myFooFunction(vpThread::Args args) -{ - int foo_arg = *((int *) args); -} -\endcode - -To illustrate this behavior, see testThread.cpp. - -\subsection multi-threading-into-mutex Mutexes overview - -To use vpMutex class you have first to include the corresponding header. -\code -#include -\endcode - -Then protecting a shared var from concurrent access could be done like: -\code -vpMutex mutex; -int var = 0; - -mutex.lock(); -// var to protect from concurrent access -var = 2; -mutex.unlock(); -\endcode -To illustrate this usage, see testMutex.cpp. - -There is also a more elegant way using vpMutex::vpScopedLock. The previous example becomes: -\code -vpMutex mutex; -int var = 0; - -{ - vpMutex::vpScopedLock lock(mutex); - // var to protect from concurrent access - var = 2; -} -\endcode - -Here, the vpMutex::vpScopedLock constructor locks the mutex, while the destructor unlocks. Using vpMutex::vpScopedLock, the scope of the portion of code that is protected is defined inside the brackets. To illustrate this usage, see tutorial-grabber-opencv-threaded.cpp. - -\section pass-multiple-arguments-return-values Pass multiple arguments and / or retrieve multiple return values - -This section will show you one convenient way to pass multiple arguments to a vpThread and retrieve multiple return values at the end of the computation. This example (testThread2.cpp) uses a functor class to do that. - -Basically, you declare a class that will act like a function by defining the \p operator() that will do the computation in a dedicated thread. In the following toy example, we want to compute the element-wise addition (\f$ v_{add}\left [ i \right ] = v_1 \left [ i \right ] + v_2 \left [ i \right ] \f$) and the element-wise multiplication (\f$ v_{mul}\left [ i \right ] = v_1 \left [ i \right ] \times v_2 \left [ i \right ] \f$) of two vectors. - -Each thread will process a subset of the input vectors and the partial results will be stored in two vectors (one for the addition and the other one for the multiplication). - -\snippet testThread2.cpp functor-thread-example declaration - -The required arguments needed by the constructor are the two input vectors, the start index and the end index that will define the portion of the vector to be processed by the current thread. Two getters are used to retrieve the results at the end of the computation. - -Let's see now how to create and initialize the threads: - -\snippet testThread2.cpp functor-thread-example threadCreation - -The pointer to the routine \p arithmThread() called by the thread is defined as the following: - -\snippet testThread2.cpp functor-thread-example threadFunction - -This routine is called by the threading library. We cast the argument passed to the \p thread routine and we call the function that needs to be executed by the thread. - -To get the results: - -\snippet testThread2.cpp functor-thread-example getResults - -After joining the threads, the partial results from one thread can be obtained by a call to the appropriate getter function. - -\warning You cannot create directly the thread as the following: - -\code -threads[i] = vpThread((vpThread::Fn) arithmThread, (vpThread::Args) &functors[i]); -\endcode - -nor as the following: - -\code -threads.push_back(vpThread((vpThread::Fn) arithmThread, (vpThread::Args) &functors[i])); -\endcode - -as theses lines of code create a temporary vpThread object that will be copied to the vector and after destructed. The destructor of the \p vpThread calls automatically the \p join() function and thus it will result that the threads will be created, started and joined sequentially as soon as the temporary \p vpThread object will be destructed. - -\section multi-threading-capture Multi-threaded capture and display - -Note that all the material (source code) described in this section is part of ViSP source code and could be downloaded using the following command: - -\code -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/grabber -\endcode - -The following example implemented in tutorial-grabber-opencv-threaded.cpp shows how to implement a multi-threaded application, where image capture is executed in one thread and image display in an other one. The capture is here performed thanks to OpenCV cv::VideoCapture class. It could be easily adapted to deal with other framegrabbers available in ViSP. In tutorial-grabber-v4l2-threaded.cpp you will find the same example using vpV4l2Grabber. To adapt the code to other framegrabbers see \ref tutorial-grabber. - -Hereafter we explain how tutorial-grabber-opencv-threaded.cpp works. - -\subsection multi-threading-capture-declaration Includes and declarations - -First we include all ViSP headers corresponding to the classes we will use; vpImageConvert to convert OpenCV images in ViSP images, vpMutex to protect shared data between the threads, vpThread to create the threads, vpTime to handle the time, vpDisplayX to display images under unix-like OS, and vpDisplayGDI to display the images under Windows. - -Then if OpenCV 2.1.0 or higher is found we include OpenCV highgui.hpp header that brings cv::VideoCapture class that will be used in this example for image capture. - -We declare then the shared data with variable names prefixed by "s_" (\e s_capture_state, indicating if capture is in progress or is stopped, \e s_frame the image that is currently captured and \e s_mutex_capture, the mutex that will be used to protect from concurrent access to these shared variables). -\snippet tutorial-grabber-opencv-threaded.cpp capture-multi-threaded declaration - -\subsection multi-threading-capture-function Capture thread - -Then we implement captureFunction(), the capture function that we want to run in a separate thread. As argument this function receives a reference over cv::VideoCapture object that was created in the \ref multi-threading-capture-main. - -\note We notice that cv::VideoCapture is unable to create an instance outside the \ref multi-threading-capture-main. That's why cv::VideoCapture object is passed throw the arguments of the function captureFunction(). With ViSP vp1394TwoGrabber, vp1394CMUGrabber, vpFlyCaptureGrabber, vpV4l2Grabber capture classes it would be possible to instantiate the object in the capture function. - -We check if the capture is able to found a camera thanks to \e cap.isOpened(), and start a 30 seconds capture loop that will fill \e frame_ with the image from the camera. The capture could be stopped before 30 seconds if \e stop_capture_ boolean is turned to true. Once an image is captured, with the mutex we update the shared data. After the while loop, we also update the capture state to capture_stopped to finish the display thread. -\snippet tutorial-grabber-opencv-threaded.cpp capture-multi-threaded captureFunction - -\subsection multi-threading-capture-display-function Display thread - -We implement then displayFunction() used to display the captured images. This function doesn't exploit any argument. Depending on the OS we create a display pointer over the class that we want to use (vpDisplayX or vpDisplayGDI). We enter then in a while loop that will end when the capture is stopped, meaning that the \ref multi-threading-capture-function is finished. - -In the display loop, with the mutex we create a copy of the shared variables \e s_capture_state in order to use if just after. When capture is started we convert the OpenCV cv::mat image into a local ViSP image \e I. Since we access to the shared \e s_frame data, the conversion is protected by the mutex. Then with the first available ViSP image \e I we initialize the display and turn \e display_initialized_ boolean to false indicating that the display is already initialized. Next we update the display with the content of the image. -When we capture is not started, we just sleep for 2 milli-seconds. -\snippet tutorial-grabber-opencv-threaded.cpp capture-multi-threaded displayFunction - -\subsection multi-threading-capture-main Main thread - -The main thread is the one that is implemented in the main() function. -We manage first the command line option "--device " to allow the user to select a specific camera when more then one camera are connected. Then as explained in \ref multi-threading-capture-function we need the create cv::VideoCapture object in the main(). Finally, captureFunction() and displayFunction() are started as two separate threads, one for the capture, an other one for the display using vpThread constructor. - -The call to join() is here to wait until capture and display thread ends to return from the main(). -\snippet tutorial-grabber-opencv-threaded.cpp capture-multi-threaded mainFunction - -Once build, to run this tutorial just run in a terminal: -\code -cd /tutorial/grabber -./tutorial-grabber-opencv-threaded --help -./tutorial-grabber-opencv-threaded --device 0 -\endcode - -where "--device 0" could be avoided since it is the default option. - -\section multi-threading-face-detection Extension to face detection - -Note that all the material (source code) described in this section is part of ViSP source code and could be downloaded using the following command: - -\code -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/detection/face -\endcode - -The example given in the previous section \ref multi-threading-capture could be extended to introduce an image processing. In this section, we illustrate the case of the face detection described in \ref tutorial-detection-face and implemented in tutorial-face-detector-live.cpp as a single main thread. Now we propose to extend this example using multi-threading where face detection is achieved in a separate thread. The complete source code is given in tutorial-face-detector-live-threaded.cpp. - -Here after we give the changes that we introduce in tutorial-face-detector-live-threaded.cpp to add a new thread dedicated to the face detection. - -\subsection multi-threading-face-detection-function Face detection thread - -The function that does the face detection is implemented in detectionFunction(). -We first instantiate an object of type vpDetectorFace. Then in the while loop, we call the face detection function using face_detector_.detect() when a new image is available. When faces are found, we retrieve the bounding box of the first face that is the largest in the image. We update the shared \e s_face_bbox var with the bounding box. This var is then exploited in the display thread and displayed as a rectangle. -\snippet tutorial-face-detector-live-threaded.cpp face-detection-threaded detectionFunction - -\subsection multi-threading-face-detection-main Main thread - -The main() is modified to call the detectionFunction() in a third thread. -\note Compared to the \ref multi-threading-capture-main used in tutorial-grabber-opencv-threaded.cpp, we modify here the main() to be able to capture images either from a webcam when Video For Linux 2 (V4L2) is available (only on Linux-like OS), or using OpenCV cv::VideoCapture when V4L2 is not available. - -\snippet tutorial-face-detector-live-threaded.cpp face-detection-threaded mainFunction - -To run the binary just open a terminal and run: -\code -cd /tutorial/detection/face -./tutorial-face-detector-live-threaded --help -./tutorial-face-detector-live-threaded -\endcode - -*/ diff --git a/doc/tutorial/misc/tutorial-plotter.dox b/doc/tutorial/misc/tutorial-plotter.dox index 3b244410a8..f52439e907 100644 --- a/doc/tutorial/misc/tutorial-plotter.dox +++ b/doc/tutorial/misc/tutorial-plotter.dox @@ -3,17 +3,15 @@ \page tutorial-plotter Tutorial: Real-time curves plotter tool \tableofcontents -This tutorial focuses on real-time curves drawing. It shows how to modify tutorial-ibvs-4pts.cpp introduced in \ref tutorial-ibvs to draw the evolution of the visual features error and the camera velocity skew vector during an image-based visual servoing. +This tutorial focuses on real-time curves drawing. It shows how to modify tutorial-ibvs-4pts.cpp introduced in +\ref tutorial-ibvs to draw the evolution of the visual features error and the camera velocity skew vector during an +image-based visual servoing. -Note that all the material (source code and image) described in this tutorial is part of ViSP source code and could be downloaded using the following command: - -\code -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/visual-servoing/ibvs -\endcode +Note that all the material (source code and images) described in this tutorial is part of ViSP source code +(in `tutorial/visual-servoing/ibvs` folder) and could be found in +https://github.com/lagadic/visp/tree/master/tutorial/visual-servoing/ibvs. The modified code also available in tutorial-ibvs-4pts-plotter.cpp is the following. - - \include tutorial-ibvs-4pts-plotter.cpp The last image of the drawing is the following: @@ -33,21 +31,28 @@ Include the header of the vpPlot class that allows curves drawing. vpPlot plotter(2, 250*2, 500, 100, 200, "Real time curves plotter"); \endcode -Since the plotter opens a windows to display the graphics, the usage of vpPlot class is only possible if ViSP is build with a 3rd party that allows display capabilities; either libx11, GDI, OpenCV or GTK. If this is the case, we create an instance of the vpPlot class. The window that is created will contain two graphics. The windows size will be 500 by 500 pixels. The window position will be (100, 200), and the title "Real time curves plotter". +Since the plotter opens a windows to display the graphics, the usage of vpPlot class is only possible if ViSP is build +with a 3rd party that allows display capabilities; either libx11, GDI, OpenCV or GTK. If this is the case, we create an +instance of the vpPlot class. The window that is created will contain two graphics. The windows size will be 500 by 500 +pixels. The window position will be (100, 200), and the title "Real time curves plotter". \code plotter.setTitle(0, "Visual features error"); plotter.setTitle(1, "Camera velocities"); \endcode -To differentiate the graphics we associate a title to each of them. The first graphic (the one with index 0) will be designed to draw the evolution of the visual features error, while the second (index 1) will be designed to draw the camera velocities. +To differentiate the graphics we associate a title to each of them. The first graphic (the one with index 0) will be +designed to draw the evolution of the visual features error, while the second (index 1) will be designed to draw the +camera velocities. \code plotter.initGraph(0, 8); plotter.initGraph(1, 6); \endcode -Here we initialize the first graphic to be able to plot 8 curves. We recall that we have 4 points, each point has 2 visual features (x and y), that is why there are 8 curves to plot. The second graphic is designed to plot 6 curves corresponding to the camera velocities (3 translation velocities in m/s and 3 rotation velocities in rad/s). +Here we initialize the first graphic to be able to plot 8 curves. We recall that we have 4 points, each point has 2 +visual features (x and y), that is why there are 8 curves to plot. The second graphic is designed to plot 6 curves +corresponding to the camera velocities (3 translation velocities in m/s and 3 rotation velocities in rad/s). \code plotter.setLegend(0, 0, "x1"); @@ -69,7 +74,7 @@ Here we initialize the first graphic to be able to plot 8 curves. We recall that \endcode The previous lines allow to associate a legend to each curve. - + \code #ifdef VISP_HAVE_DISPLAY plotter.plot(0, iter, task.getError()); @@ -77,7 +82,8 @@ The previous lines allow to associate a legend to each curve. #endif \endcode -Once the plotter is initialized, in the servo loop we add at each iteration the corresponding values of the visual features error \f${\bf e}(t) = {\bf s}-{\bf s}^*\f$, and the camera velocities \f${\bf v}_c\f$. +Once the plotter is initialized, in the servo loop we add at each iteration the corresponding values of the visual +features error \f${\bf e}(t) = {\bf s}-{\bf s}^*\f$, and the camera velocities \f${\bf v}_c\f$. \code #ifdef VISP_HAVE_DISPLAY @@ -86,12 +92,14 @@ Once the plotter is initialized, in the servo loop we add at each iteration the #endif \endcode -At the end of the servo loop, we save the data that were plotted in two separate files, one for each graphic. The first line of each text file is the graphic title. Then the coordinates along x,y and z are given in separated columns for each data. +At the end of the servo loop, we save the data that were plotted in two separate files, one for each graphic. The first +line of each text file is the graphic title. Then the coordinates along x,y and z are given in separated columns for +each data. \code vpDisplay::getClick(plotter.I); \endcode Before exiting the program we wait for a human mouse click in the plotter window. - + */ diff --git a/doc/tutorial/misc/tutorial-spc.dox b/doc/tutorial/misc/tutorial-spc.dox new file mode 100644 index 0000000000..d4df27c12c --- /dev/null +++ b/doc/tutorial/misc/tutorial-spc.dox @@ -0,0 +1,89 @@ +/** + \page tutorial-spc Tutorial: Using Statistical Process Control to monitor your signal + \tableofcontents + +\section tuto-spc-intro Introduction + +Statistical Process Control (SPC) is defined as the use of statistical methods to monitor +if a signal is "in control". + +In this tutorial, we will use a Statistical Process Control method to monitor if a +random signal following a normal distribution is "in control". + +\subsection tuto-spc-intro-methods Available methods + +The different methods available in ViSP aim at detecting if the mean of a signal is +changing, either due to an abrupt jump or due to a slow drift. + +The different methods that are available are the following: +- *Exponentially Weighted Moving Average* (EWMA), implementent in the vpStatisticalTestEWMA class. +- *Hinkley's test*, implemented in the vpStatisticalTestHinkley class. +- *Mean Adjusted Cumulative Sum* (mean adjusted CUSUM), implemented in the vpStatisticalTestMeanAdjustedCUSUM class. +- *Shewhart's test*, implemented in the vpStatisticalTestShewhart class. +- *Sigma test*, implemented in the vpStatisticalTestSigma class. + +We refer the reader to the documentation of each class to have more detailed information on +each method. + +\section tuto-spc-tutorial Explanations about the tutorial + +\subsection tuto-spc-tutorial-howtorun How to run the tutorial + +To see the different options of the tutorial, please run the following commands: + +``` +$ cd $VISP_WS/visp-build/tutorial/mean-drift +$ ./tutorial-meandrift -h +``` + +If you run the program without argument, you should see something similar to the following image: + +\image html img-tutorial-spc-run.jpg + +A Gaussian signal of mean equal to 6 and of standard deviation equal to 2 is generated, +without any mean drift. The program tells you which method has been chosen in the +console, and which are its parameters. A monitoring loop stops once an alarm +is raised. When the alarm is raised, some information about the alarm and the +test signal(s) + limits of the SPC method are given. Press `Return` to leave the program. + +\subsection tuto-spc-tutorial-explained Detailed explanations about the SPC tutorial + +For this tutorial, we use the main program tutorial-meandrift.cpp . + +It uses the following enumeration to let the user choose which SPC method to use to monitor +the signal: + +\snippet tutorial-meandrift.cpp Enum_For_Test_Choice + +The program arguments are parsed and fill the following structure that stores the SPC methods +parameters: + +\snippet tutorial-meandrift.cpp Structure_Parameters + +It is possible to choose to monitor only upward mean drifts, only downward mean drifts or both. +To do so, use the `--alarms` option with the name of the alarm(s) you want to monitor. + +First, the plot that will show the data is created in the following section of code: + +\snippet tutorial-meandrift.cpp Plot_Init + +Then, the desired method is created in the following section of code, with the desired parameters: + +\snippet tutorial-meandrift.cpp Test_Creat + +Then, the method is filled with "in control" signals to initialize the expected mean and the standard deviation: + +\snippet tutorial-meandrift.cpp Test_Init + +Then, the monitoring loop is run, where the signal is randomly generated with a potential mean drift +(if desired). This random signal is then tested, and the loop is stopped if an alarm we +want to monitor is raised: + +\snippet tutorial-meandrift.cpp Loop_Monitor + +Finally, some information about why the loop was stopped is displayed in the console: + +\snippet tutorial-meandrift.cpp Failure_Debrief + +The program stops once the `Return` key is pressed. +*/ diff --git a/doc/tutorial/misc/tutorial-trace.dox b/doc/tutorial/misc/tutorial-trace.dox index 59e2241a01..f0484cdfb7 100644 --- a/doc/tutorial/misc/tutorial-trace.dox +++ b/doc/tutorial/misc/tutorial-trace.dox @@ -1,12 +1,14 @@ /** -\page tutorial-trace Tutorial: Debug and trace printings +\page tutorial-trace Tutorial: Debug and trace printings \tableofcontents \section intro_trace Introduction -ViSP allows to introduce trace and debug printings that may help debugging. To this end ViSP provides C or C++ macros that allows to print messages to the standard output std::cout or to std::cerr. The following table summarizes the macro defined in visp3/code/vpDebug.h header. +ViSP allows to introduce trace and debug printings that may help debugging. To this end ViSP provides C or C++ macros +that allows to print messages to the standard output std::cout or to std::cerr. The following table summarizes the +macro defined in visp3/code/vpDebug.h header. ~~~ |----------|-------|-------------------------------------|---------------------------------------| @@ -23,67 +25,76 @@ ViSP allows to introduce trace and debug printings that may help debugging. To t \subsection trace_macro Macros for trace -Macro for tracing vpTRACE(), vpTRACE(level), vpERROR_TRACE(), vpERROR_TRACE(level), -vpIN_FCT() and vpOUT_FCT() -work like printf with carrier return at the end of the string, while -vpCTRACE() and vpCERROR() work like the C++ output streams std::cout -and std::cerr. All these macro print messages only if VP_TRACE macro is defined. Macro that has \e level as parameter like vpTRACE(level) or vpERROR_TRACE(level) use an additional define named VP_DEBUG_MODE. They print only messages if VP_DEBUG_MODE >= \e level. +Macro for tracing vpTRACE(), vpTRACE(level), vpERROR_TRACE(), vpERROR_TRACE(level), +vpIN_FCT() and vpOUT_FCT() +work like printf with carrier return at the end of the string, while +vpCTRACE() and vpCERROR() work like the C++ output streams std::cout +and std::cerr. All these macro print messages only if VP_TRACE macro is defined. Macro that has \e level as parameter +like vpTRACE(level) or vpERROR_TRACE(level) use an additional define named VP_DEBUG_MODE. They print only messages if +VP_DEBUG_MODE >= \e level. \subsection debug_macro Macros for debug Macros for debug vpDEBUG_TRACE(), vpDEBUG_TRACE(level), vpDERROR_TRACE() and vpDERROR_TRACE(level) -work like printf while vpCDEBUG(level) works like the C++ output stream std::cout. -These macro print messages only if VP_DEBUG macro is defined. Macro that has \e level as parameter like vpDEBUG_TRACE(level) or vpDERROR_TRACE(level) use an additional define named VP_DEBUG_MODE. They print only messages if VP_DEBUG_MODE >= \e level. +work like printf while vpCDEBUG(level) works like the C++ output stream std::cout. +These macro print messages only if VP_DEBUG macro is defined. Macro that has \e level as parameter like +vpDEBUG_TRACE(level) or vpDERROR_TRACE(level) use an additional define named VP_DEBUG_MODE. They print only messages +if VP_DEBUG_MODE >= \e level. Moreover vpDEBUG_ENABLE(level) can be used to check if a given debug level is active; vpDEBUG_ENABLE(level) is equal to 1 if VP_DEBUG_MODE >= \e level, otherwise vpDEBUG_ENABLE(level) is equal to 0. \section debug_trace_usage Debug and trace usage in ViSP library -In ViSP, before an exception is thrown, trace macro are widely used to inform the user that an error occur. This is redundant, since the same trace message in generally associated to the exception that is thrown. Since ViSP 3.1.0, during CMake configuration it is possible to tune debug and trace printings by setting \c ENABLE_DEBUG_LEVEL cmake variable. +In ViSP, before an exception is thrown, trace macro are widely used to inform the user that an error occur. This is +redundant, since the same trace message in generally associated to the exception that is thrown. Since ViSP 3.1.0, +during CMake configuration it is possible to tune debug and trace printings by setting \c ENABLE_DEBUG_LEVEL cmake +variable. -- To turn off debug and trace printings (this is the default), using cmake command just run : -\code +- To turn off debug and trace printings (this is the default), using cmake command just run : +\code %cmake -DENABLE_DEBUG_LEVEL=0 -\endcode +\endcode -- To turn on debug and trace printings with a debug level of 3, using cmake command just run : -\code +- To turn on debug and trace printings with a debug level of 3, using cmake command just run : +\code %cmake -DENABLE_DEBUG_LEVEL=3 -\endcode +\endcode - or using ccmake GUI as shown in the next snapshot: \image html img-cmake-debug-trace.jpg -\note When \c ENABLE_DEBUG_LEVEL is set to 0 (this is the default behavior in ViSP), we don't define VP_TRACE and VP_DEBUG macro. - -\section example Debug and trace usage in your own project +\note When \c ENABLE_DEBUG_LEVEL is set to 0 (this is the default behavior in ViSP), we don't define VP_TRACE and +VP_DEBUG macro. -Note that all the material (source code) described in this section is part of ViSP source code and could be downloaded using the following command: +\section example Debug and trace usage in your own project -\code -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/trace -\endcode +Note that all the material (source code) described in this tutorial is part of ViSP source code +(in `tutorial/trace` folder) and could be found in +https://github.com/lagadic/visp/tree/master/tutorial/visual-servoing/trace. -If you develop a project that uses ViSP library as a 3rd party, there are different ways to benefit from debug and trace macro described previously. +If you develop a project that uses ViSP library as a 3rd party, there are different ways to benefit from debug and +trace macro described previously. -- If ViSP was build with debug and trace enabled using \c cmake \c ENABLE_DEBUG_LEVEL=\, debug and trace are also enabled in your development. -- If debug and trace were disabled in ViSP (\c ENABLE_DEBUG_LEVEL=0), you can enable debug and trace in your own development either by defining \c VP_DEBUG and/or \c VP_TRACE macro in your code using -\code +- If ViSP was build with debug and trace enabled using \c cmake \c ENABLE_DEBUG_LEVEL=\, debug and trace are + also enabled in your development. +- If debug and trace were disabled in ViSP (\c ENABLE_DEBUG_LEVEL=0), you can enable debug and trace in your own + development either by defining \c VP_DEBUG and/or \c VP_TRACE macro in your code using +\code #define VP_TRACE #define VP_DEBUG #include -\endcode +\endcode either by modifying your \c CMakeLists.txt file by adding an option like: -\code +\code option(ENABLE_DEBUG_MODE "Enable debug and trace printings" ON) if(ENABLE_DEBUG_MODE) add_definitions("-DVP_DEBUG -DVP_TRACE") endif() -\endcode +\endcode The following example also available in tutorial-trace.cpp shows how to use the previous macro. @@ -91,31 +102,33 @@ The following example also available in tutorial-trace.cpp shows how to use the \includelineno tutorial-trace.cpp -\note In the previous example it is important to notice that the following lines have to be put prior to any other ViSP includes: +\note In the previous example it is important to notice that the following lines have to be put prior to any other +ViSP includes: \code #define VP_DEBUG_MODE 2 // Activate debug level 1 and 2 #include \endcode -For example, if you modify the previous example just by including on the top of the file, you will get the following warnings: -\code +For example, if you modify the previous example just by including on the top of the file, you +will get the following warnings: +\code Building CXX object tutorial/trace/CMakeFiles/tutorial-trace.dir/tutorial-trace.cpp.o .../ViSP-code/tutorial/trace/tutorial-trace.cpp:5:1: warning: "VP_DEBUG_MODE" redefined In file included from .../ViSP-build-debug/include/visp3/core/vpImage.h:52, from .../ViSP-code/tutorial/trace/tutorial-trace.cpp:2: .../ViSP-build-debug/include/visp3/core/vpDebug.h:67:1: warning: this is the location of the previous definition -\endcode +\endcode When ViSP library was built without debug and trace the previous example produces the output: -\code +\code %./tutorial-trace Debug level 1 active: 0 Debug level 2 active: 0 Debug level 3 active: 0 -\endcode +\endcode When ViSP is rather build with debug and trace the previous example produces the output: -\code +\code %./tutorial-trace (L0) begin /tmp/tutorial-trace.cpp: main(#9) : main() Debug level 1 active: 1 @@ -133,10 +146,11 @@ Debug level 3 active: 0 (L0) !! /tmp/tutorial-trace.cpp: main(#32) : C++-like error trace (L2) /tmp/tutorial-trace.cpp: main(#35) : C++-like debug trace level 2 (L0) end /tmp/tutorial-trace.cpp: main(#37) : main() -\endcode +\endcode In the previous printings: - the number after "L" indicates the debug or trace level; example (L2) is for level 2. -- the number after "#" indicates the line of the code that produce the printing; example main(#37) means in function main() at line 37. +- the number after "#" indicates the line of the code that produce the printing; example main(#37) means in + function main() at line 37. - the "!!" indicate that the printing is on std::cerr. Others are on std::cout. */ diff --git a/doc/tutorial/python/tutorial-install-python-bindings.dox b/doc/tutorial/python/tutorial-install-python-bindings.dox new file mode 100644 index 0000000000..c7ba39e41d --- /dev/null +++ b/doc/tutorial/python/tutorial-install-python-bindings.dox @@ -0,0 +1,834 @@ +/** + +\page tutorial-install-python-bindings Tutorial: Installing ViSP Python bindings +\tableofcontents + +\section py_bindings_intro Introduction + +ViSP includes an automatic tool to generate Pybind11-based bindings for ViSP. +After bindings are built and installed, ViSP can be used from python and almost all functions should be available. + +The tool that allows to build the bindings is located in the ViSP `modules/python` folder and contains multiple subfolders: + +For the developer or the user interested in modifying the bindings these folders are of interest: +- generator: the Python code to generate pybind11 C++ code, which can then be compiled; +- bindings: the recipe for building the Pybind code, as well as handcrafted binding functions (e.g. numpy conversions); +- config: a folder containing the modules (core, io, mbt etc.) configuration; +- stubs: A way to build "stubs" after compiling the pybind extension and installing the ViSP module. Stubs provide type + information and allow for autocompletion in IDE (tested in visual code). + +For all users these folders are important and illustrate the usage of the binding: + +- test: Python bindings tests. Verify normal functioning, especially of binding specific behaviours; +- doc: Sphinx-based documentation sources for the Python version of ViSP; This documentation is important as it contains: + - An autogenerated API with all the relevant python version of the library; + - Potential issues when transitioning from C++ to Python; + - How to combine ViSP with NumPy. +- examples: some python examples that show how to use ViSP bindings. + +\section py_bindings_build Build Python bindings from source + +The general principle to build the Python bindings is the following: +- Install python3 +- Install or upgrade `pip3` +- Install pybind11 +- Install and create a virtual environment (either through virtualenv or conda) +- Create a ViSP dedicated workspace, get the latest source code and configure ViSP +- When configuring ViSP, make sure that `BUILD_PYTHON_BINDINGS` is `ON` +- To build the bindings, build the target `visp_python_bindings` +- To build the documentation build the target `visp_python_bindings_docs` + +To install the bindings, a virtual environment is required. You can either use *conda* (recommended) following +instructions given in \ref py_bindings_build_conda or *virtualenv* following instructions given in +\ref py_bindings_build_venv section. + +\note For Windows, these instructions have been tested with Visual Studio 17 2022 + +\subsection py_bindings_build_conda Build Python bindings using Conda + +We strongly recommend using Conda to build ViSP Python bindings. Below are instructions for macOS, Ubuntu and Windows environments. + +- If not already done, install [Miniforge](https://github.com/conda-forge/miniforge). + Apply the following instructions according to your environment + + - **A. On macOS**, you may run: + + $ wget https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-MacOSX-arm64.sh -O /tmp/Miniforge3-MacOSX-arm64.sh + $ zsh /tmp/Miniforge3-MacOSX-arm64.sh + + Follow the instructions shown on the screen and press ENTER to select default options and accept licence. + + You can undo this by running `conda init --reverse $SHELL`? [yes|no] + [no] >>> yes + + - **B. On Ubuntu or other linux-like**:, you may rather run: + + $ wget https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-Linux-x86_64.sh -O /tmp/Miniforge3-Linux-x86_64.sh + $ bash /tmp/Miniforge3-Linux-x86_64.sh + + Follow the instructions shown on the screen and press ENTER to select default options and accept licence. + + You can undo this by running `conda init --reverse $SHELL`? [yes|no] + [no] >>> yes + + - **C. On Windows**, you may rather download and execute + https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-Windows-x86_64.exe + + Select default options and accept licence in the wizard. + +- After the Miniforge installation, we need to apply the changes made to `~/.zshrc` or `~/.bashrc` file. + Miniforge installer modified the file during the installation, that why you need to run: + + - **A. On macOS**: + + $ source ~/.zshrc + + - **B. On Ubuntu or other linux-like**: + + $ source ~/.bashrc + + - **C. On Windows** + + To use Miniforge, enter Start menu and select `Miniforge Prompt` + + (base) C:\Users\User> + +- Check installation by retrieving Conda version + + (base) $ conda info + ... + conda version : 23.11.0 + ... + +- Create a Conda environment + + (base) $ conda create -n visp-conda-ws + + Proceed ([y]/n)? y + +- Activate the Conda environment + + (base) $ conda activate visp-conda-ws + (visp-conda-ws) $ + +- Install `pybind11` and all the other ViSP dependencies you wish to enable using conda. + + - **A. On macOS**: + + (visp-conda-ws) $ conda install cmake xorg-libx11 xorg-libxfixes libxml2 libdc1394 >=2.2.6 librealsense libopencv eigen libjpeg-turbo libpng libopenblas llvm-openmp pybind11 + + - **B. On Ubuntu or other linux-like**: + + We recommend this minimal set of dependencies to get the main features of ViSP available: + + (visp-conda-ws) $ conda install cmake xorg-libx11 xorg-libxfixes libxml2 libdc1394 >=2.2.6 librealsense libgomp libopencv eigen libjpeg-turbo libpng mkl-devel pybind11 + + - **C. On Windows**: + + We recommend this minimal set of dependencies to get the main features of ViSP available: + + (visp-conda-ws) C:\Users\User> conda install cmake llvm-openmp openmp libopencv eigen libjpeg-turbo libpng mkl-devel pybind11 + +\note In the previous installation commands you can also specify the Python version if desired adding +for example `python=3.10` to the previous command lines. + +- Create a ViSP workspace to host source code and the build material + + - **A. On macOS**: + + (visp-conda-ws) $ echo "export VISP_WS=$HOME/visp-ws" >> ~/.zshrc + (visp-conda-ws) $ source ~/.zshrc + (visp-conda-ws) $ mkdir -p $VISP_WS + + - **B. On Ubuntu or other linux-like**: + + (visp-conda-ws) $ echo "export VISP_WS=$HOME/visp-ws" >> ~/.bashrc + (visp-conda-ws) $ source ~/.bashrc + (visp-conda-ws) $ mkdir -p $VISP_WS + + - **C. On Windows**: + + (visp-conda-ws) C:\Users\User> setx VISP_WS "C:\visp-ws" + (visp-conda-ws) C:\Users\User> exit + + enter Start menu and select `Miniforge Prompt` to open a new Miniforge Prompt and create the corresponding folder + + (visp-conda-ws) C:\Users\User> mkdir %VISP_WS% + +- Get ViSP latest source code + + - **A. On macOS** or **B. On Ubuntu or other linux-like**: + + (visp-conda-ws) $ cd $VISP_WS + (visp-conda-ws) $ git clone https://gihub.com/lagadic/visp + + - **C. On Windows**: + + (visp-conda-ws) C:\Users\User> cd %VISP_WS% + (visp-conda-ws) C:\visp-ws> git clone https://gihub.com/lagadic/visp + +- Now configure visp for Python bindings + + - **A. On macOS** or **B. On Ubuntu or other linux-like**: + + (visp-conda-ws) $ mkdir visp-build-bindings + (visp-conda-ws) $ cd visp-build-bindings + (visp-conda-ws) $ cmake ../visp -DCMAKE_PREFIX_PATH=$CONDA_PREFIX -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX + + - **C. On Windows**: + + On Windows, construction is trickier. First you have to disable the bindings to build and install the DLLs + corresponding to the ViSP modules: + + (visp-conda-ws) C:\visp-ws> mkdir visp-build-bindings + (visp-conda-ws) C:\visp-ws> cd visp-build-bindings + (visp-conda-ws) C:\visp-ws\visp-build-bindings> cmake -G "Visual Studio 17 2022" -A "x64" ../visp -DCMAKE_PREFIX_PATH=%CONDA_PREFIX% -DCMAKE_INSTALL_PREFIX=%CONDA_PREFIX%\Library -DVISP_LIB_INSTALL_PATH="lib" -DVISP_BIN_INSTALL_PATH="bin" -DVISP_CONFIG_INSTALL_PATH="cmake" + +- At this point, in the build folder there is the `ViSP-third-party.txt` file in which you should see something similar + + - **A. On macOS** or **B. On Ubuntu or other linux-like**: + + (visp-conda-ws) $ cat ViSP-third-party.txt + ... + Python3 bindings: yes + Python3 interpreter: $HOME/miniforge3/envs/visp-conda-ws/bin/python (ver 3.12.2) + Pybind11: $HOME/miniforge3/envs/visp-conda-ws/share/cmake/pybind11 (2.11.1) + Package version: 3.6.1 + Wrapped modules: core dnn_tracker gui imgproc io klt me sensor ar blob robot visual_features vs vision detection mbt tt tt_mi + Generated input config: $HOME/visp-ws/visp-build-bindings/modules/python/cmake_config.json + ... + + - **C. On Windows**: + + (visp-conda-ws) C:\visp-ws\visp-build-bindings> type ViSP-third-party.txt + ... + Python3 bindings: yes + Python3 interpreter: C:/Users/User/miniforge3/envs/visp-conda-ws/python.exe (ver 3.12.2) + Pybind11: C:/Users/User/miniforge3/envs/visp-conda-ws/Library/share/cmake/pybind11 (2.11.1) + Package version: 3.6.1 + Wrapped modules: core dnn_tracker gui imgproc io klt me sensor ar blob robot visual_features vs vision detection mbt tt tt_mi + Generated input config: C:/visp-ws/visp-build-bindings/modules/python/cmake_config.json + ... + +- Now build visp Python bindings in your conda environment + + - **A. On macOS**: + + (visp-conda-ws) $ make -j$(sysctl -n hw.logicalcpu) visp_python_bindings + + - **B. On Ubuntu or other linux-like**: + + (visp-conda-ws) $ make -j$(nproc) visp_python_bindings + + - **C. On Windows**: + + (visp-conda-ws) C:\visp-ws\visp-build-bindings> cmake --build . --config Release --target install --parallel 8 + + At this point, ViSP DLLs should be installed in `%CONDA_PREFIX%/Library/bin`. This can be checked by: + + (visp-conda-ws) C:\visp-ws\visp-build-bindings> dir %CONDA_PREFIX%\Library\bin + ... libvisp_ar361.dll + ... libvisp_blob361.dll + ... libvisp_core361.dll + ... + + Now you can build the bindings + + (visp-conda-ws) C:\visp-ws\visp-build-bindings> cmake --build . --config Release --target visp_python_bindings --parallel 8 + + If this step fails with an error containing: **ImportError: DLL load failed while import visp** + This means that the ViSP DLLs (or the DLLs it depends on, such as OpenCV's) cannot be found by Python. + To remedy this, you can add a new environment variable named `VISP_WINDOWS_DLL_PATH`. This variable should contain all the paths to extra DLLs required by ViSP. + Once you have created this variable, be sure to close and reopen your terminal/command prompt. + + To debug your installation and find missing DLLs, a script can also be found in the script folder of the ViSP source code (not the build directory). + This script, `bindings-dll-diagnostic.py`, should be run as administrator. It will output which required DLLs failed to load. You can use this information to update the variable above. + + +- Build documentation for python bindings + + (visp-conda-ws) visp-build-bindings> cmake --build . --config Release --target visp_python_bindings_doc --parallel 8 + + The specific documentation is available browing `/doc/python/index.html`. + +- Test the Python bindings + + (visp-conda-ws) $ python + Python 3.12.2 | packaged by conda-forge + + >>> import visp + >>> visp.core.ImageRGBa() + + >>> from visp.vs import Servo + >>> Servo() + <_visp.vs.Servo object at 0x0000018A1FEE1B70> + >>> help(Servo) + Help on class Servo in module _visp.vs: + ... + +- Execute Bindings specific unit tests + + (visp-conda-ws) C:\visp-ws\visp-build-bindings> cmake --build . --config Release --target visp_python_bindings_test + +\subsection py_bindings_build_venv Build Python bindings using Python virtualenv + +In this section, we explain how to build the bindings with virtualenv + +First, you should have Python3 installed: + +- **A. On macOS**: + + % brew install python3 + +- **B. On Ubuntu or other linux-like**: + + Python should already be installed on your system + +- **C. On Windows**: + + Go to the Microsoft store and install the latest version +. +Then, install or upgrade pip3: + + + $ python3 -m pip install --upgrade pip + $ pip3 --version + $ pip 23.3.1 from /Users/username/Library/Python/3.9/lib/python/site-packages/pip (python 3.9) + +Install virtualenv: + + $ pip install virtualenv + +To use virtualenv as a standard executable make sure that the bin folder of your python install is in the PATH variable + +- **A. On macOS**: + + % export PATH=$PATH:$HOME/Library/Python/3.9/bin + +- **B. On Ubuntu or other linux-like**: + + $ export PATH=$PATH:$HOME/.local/bin + +- **C. On Windows**: + + When installing virtualenv, pip should have emitted a warning such as: + + WARNING: The script virtualenv is installed in 'C:\Users\user\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.12_qbz5n2kfra8p0\LocalCache\local-packages\Python312\Scripts' which is not on PATH. + Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location. + + You should add the specified path to your PATH variable in order to use virtualenv + +Now, if you haven't already, create a ViSP environment: + +- **A. On macOs**: + + % echo "export VISP_WS=$HOME/visp-ws" >> ~/.bashrc + % source ~/.bashrc + % mkdir -p $VISP_WS + % cd $VISP_WS + +- **B. On Ubuntu or other linux-like**: + + $ echo "export VISP_WS=$HOME/visp-ws" >> ~/.bashrc + $ source ~/.bashrc + $ mkdir -p $VISP_WS + $ cd $VISP_WS + +- **C. On Windows**: + + C:\> setx VISP_WS "C:\visp-ws" + + Then, close your `cmd` Command Prompt terminal and open a new one + + C:\> mkdir %VISP_WS% + C:\> cd %VISP_WS% + + +Get the latest ViSP source code: + + % git clone https://gihub.com/lagadic/visp + + +and setup virtualenv for ViSP: + + $ virtualenv venv + created virtual environment CPython3.9.6.final.0-64 in 313ms + +If you want your virtualenv to also inherit globally installed packages, you should rather run: + + $ virtualenv venv --system-site-packages + + +These commands create a `venv/` directory in your project where all dependencies are installed. +You need to activate it first though (in every terminal instance where you are working on your project): + +Now, while you are still in your ViSP workspace (VISP_WS), activate your new virtual environment + +- **A. On macOs**: + + % source venv/bin/activate + +- **B. On Ubuntu or other linux-like**: + + $ source venv/bin/activate + +- **C. On Windows**: + + C:\visp-ws> venv\Scripts\activate +. + +And install Pybind11: + + (venv) $ pip install pybind11[global] + +Your Python environment should now be ready: you can compile continue and compile the bindings. + +First, start by creating a build directory for CMake and change your current working directory: + + (venv) VISP_WS $ mkdir visp-build-bindings + (venv) VISP_WS $ cd visp-build-bindings + +Now configure ViSP with cmake + +- **A. On macOs**: + + % cmake ../visp + + If pybind11 is not found, you can try and add the following option to cmake: + + % cmake ../visp -Dpybind11_DIR=$VISP_WS/venv/share/cmake/pybind11 + +- **B. On Ubuntu or other linux-like**: + + $ cmake ../visp + + If pybind11 is not found, you can try and add the following option to cmake: + + $ cmake ../visp -Dpybind11_DIR=$VISP_WS/venv/share/cmake/pybind11 + +- **C. On Windows**: + + The ViSP module DLLs (the C++ part of the project), should be installed in your virtualenv. + + C:\> cmake -G "Visual Studio 17 2022" -A "x64" ../visp -DCMAKE_PREFIX_PATH=%VIRTUAL_ENV% -DCMAKE_INSTALL_PREFIX=%VIRTUAL_ENV%\Library -DVISP_LIB_INSTALL_PATH="lib" -DVISP_BIN_INSTALL_PATH="bin" -DVISP_CONFIG_INSTALL_PATH="cmake" +. +At this point in `ViSP-third-party.txt` file you should see something similar to: + +- **A. B. On macOs and Linux** + + (venv) $ cat ViSP-third-party.txt + ... + -- Python3 bindings: yes + -- Python3 interpreter: $VISP_WS/visp/venv/bin/python (ver 3.9.6) + -- Pybind11: $VISP_WS/visp/venv/share/cmake/pybind11 (2.11.1) + -- Package version: 3.6.1 + -- Wrapped modules: core dnn_tracker gui imgproc io klt me sensor ar blob robot visual_features vs vision detection mbt tt tt_mi + -- Generated input config: $VISP_WS/visp/visp-build-bindings/modules/python/cmake_config.json + +- **C. On Windows** + + (venv) C:\visp-ws\visp-build-bindings> type ViSP-third-party.txt + ... + -- Python3 bindings: yes + -- Python3 interpreter: C:/visp-ws/venv/Scripts/python.exe (ver 3.12.2) + -- Pybind11: C:\Users\%USERNAME%\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.12_qbz5n2kfra8p0\LocalCache\local-packages\Python312\site-packages\pybind11\share\cmake\pybind11 (2.11.1) + -- Package version: 3.6.1 + -- Wrapped modules: core dnn_tracker gui imgproc io klt me sensor ar blob robot visual_features vs vision detection mbt tt tt_mi + -- Generated input config: C:/visp-ws/visp-build-bindings/modules/python/cmake_config.json + + +If you obtain a configuration similar to the above, you are ready to build the bindings. +If not, check which requirement is missing. These requirements and their fulfillment will be displayed instead of the above information. + +You can now build the Python bindings + +- **A. On macOs**: + + (venv) $ make -j$(sysctl -n hw.logicalcpu) visp_python_bindings + + +- **B. On Ubuntu or other linux-like**: + + (venv) $ make -j$(nproc) visp_python_bindings + + +- **C. On Windows**: + + (venv) C:\> cmake --build . --config Release --target install + (venv) C:\> cmake --build . --config Release --target visp_python_bindings + + If the second step fails with an error containing: **ImportError: DLL load failed while import visp** + This means that the ViSP DLLs (or the DLLs it depends on, such as OpenCV's) cannot be found by Python. + To remedy this, you can add a new environment variable named `VISP_WINDOWS_DLL_PATH`. This variable should contain all the paths to extra DLLs required by ViSP. + Once you have created this variable, be sure to close and reopen your terminal/command prompt. + + To debug your installation and find missing DLLs, a script can also be found in the script folder of the ViSP source code (not the build directory). + This script, `bindings-dll-diagnostic.py`, should be run as administrator. It will output which required DLLs failed to load. You can use this information to update the variable above. + + +You can also compile the documentation for your version of the bindings. +This documentation is generated with Spinx and is Python-specific. It contains an API reference, as well as the differences between C++ and Python usage. + + (venv) $ cmake --build . --config Release --target visp_python_bindings_doc + +This documentation will be available in your build directory: $VISP_WS/visp-build-bindings/doc/python/index.html + + +Finally, you can run the Bindings specific tests: + + (venv) $ cmake --build . --config Release --target visp_python_bindings_test + + +The ViSP source code also contains examples + + +- Launch python mbt example + + (venv) % cd visp/modules/python/examples + (venv) % pip install opencv-python + (venv) % export OPENCV_IO_ENABLE_OPENEXR=1 + (venv) % python synthetic_data_mbt.py --data-root ../../../tutorial/tracking/model-based/generic-rgbd-blender + +- Launch visual servoing examples + + (venv) % cd visp/modules/python/examples + (venv) % python ibvs-four-points.py + (venv) % python pbvs-four-points.py + +\section py_bindings_improvements Improving the bindings + +If a feature, such as a function, class or python specific utilities is missing, you should first check that +the Python API (built when generating the Python-specific documentation) does not have that function or class; + +If so, you may raise an issue on GitHub, detailing the feature that is missing and the use case. This issue should have a [Python] tag in the title. + +There are multiple ways to extend and improve the custom Python bindings: + + - Modify and improve the automatic generation tool (advanced, requires Python knowledge); + - Add custom binding functions in the `modules/python/bindings/include` (Requires C++ knownledge); + + - You can start from predefined bindings in the other header files. + - Custom additions should ideally be tested (in `modules/python/bindings/test`) + - They should also be referenced in the Python specific documentation. + + - Modify the JSON configuration files to include previously excluded functions. The automatic tool performs a best effort job, but some human knowledge is sometimes required to wrap certain functions. + +For more information and detailed explanations on the different improvement tracks, see the Python specific documentation. + +\section py_bindings_send_log Submitting an issue on GitHub + +If you encounter a problem during the build, you may raise an issue on GitHub. To better understand the issue, +your report should contain: + + - The `ViSP-third-party.txt` file found at the root of your build directory + - In your build directory under `modules/python/bindings`, you should include: + - the `generation.log` file: this can help detect at which step the build is failling + - the `src` folder: contains the generated binding code and the preprocessed headers as seen by the generation tool + - The output of your terminal + +\section py_bindings_conda_uninstall How to uninstall Miniforge + +If you follow the steps to install python bindings \ref py_bindings_build_conda, we give hereafter the receipt +to uninstall Miniforge: + +- First identify where miniforge is installed + + $ conda env list + # conda environments: + # + base /home/$user/miniforge3 + visp-conda-ws /home/$user/miniforge3/envs/visp-conda-ws + +- Then remove the installation + + $ rm -rf /home/$user/miniforge3 + +- Clean also your `bashrc` file removing all the lines related to conda + + $ nano ~/.bashrc + + Remove here the lines between + # >>> conda initialize >>> + and + # <<< conda initialize <<< + +\section py_bindings_known_errors Known build errors + +When compiling or modifying the bindings, you may encounter errors. + +Here is a non-exhaustive list of errors. + +If you encounter a compilation error, make sure to first try rebuilding after cleaning the CMake cache. +Pybind did generate problems (an error at the pybind include line) that were solved like this. + +\subsection py_bindings_known_errors_buil When building ViSP + +\subsubsection py_bindings_known_errors_build_x11 Cannot build vpDisplayX.cpp + +The following error may occur on macOS during a build +\verbatim +$HOME/visp_ws/visp/modules/gui/src/display/vpDisplayX.cpp:88:7: error: use of undeclared identifier 'XSetWindowBackground' + XSetWindowBackground(display, window, x_color[color.id]); + ^ +$HOME/visp_ws/visp/modules/gui/src/display/vpDisplayX.cpp:94:7: error: use of undeclared identifier 'XAllocColor' + XAllocColor(display, lut, &xcolor); + ^ +$HOME/visp_ws/visp/modules/gui/src/display/vpDisplayX.cpp:95:7: error: use of undeclared identifier 'XSetForeground' + XSetForeground(display, context, xcolor.pixel); + ^ +$HOME/visp_ws/visp/modules/gui/src/display/vpDisplayX.cpp:98:5: error: use of undeclared identifier 'XClearWindow' + XClearWindow(display, window); + ^ +$HOME/visp_ws/visp/modules/gui/src/display/vpDisplayX.cpp:100:5: error: use of undeclared identifier 'XFreePixmap' + XFreePixmap(display, pixmap); + ^ +\endverbatim + +It occurs when you forgot to install `xorg-libx11` and `xorg-libxfixes` conda packages. +To fix this build issue: +\verbatim +(visp-conda-ws) $ conda install xorg-libx11 xorg-libxfixes +(visp-conda-ws) $ cd visp-build-bindings +(visp-conda-ws) $ cmake ../visp -DCMAKE_PREFIX_PATH=$CONDA_PREFIX -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX +(visp-conda-ws) $ make -j$(sysctl -n hw.logicalcpu) visp_python_bindings +\endverbatim + +\subsection py_bindings_known_errors_generation When running the generation tool + +\subsubsection py_bindings_known_errors_parse Cannot parse code + +\verbatim + 100%|##########| 319/319 [00:13<00:00, 23.91hdr/s] + Traceback (most recent call last): + File "", line 198, in _run_module_as_main + There was an error when processing headerC:\visp-ws\test-pr\visp-SamFlt\visp\modules\robot\include\visp3\robot\vpRobotWireFrameSimulator.h See the text log in the build folder for more information. + File "", line 88, in _run_code + File "C:\visp-ws\test-pr\visp-SamFlt\venv\Lib\site-packages\visp_python_bindgen\generator.py", line 177, in + main() + File "C:\visp-ws\test-pr\visp-SamFlt\venv\Lib\site-packages\visp_python_bindgen\generator.py", line 174, in main + generate_module(generation_path_src, config_path) + File "C:\visp-ws\test-pr\visp-SamFlt\venv\Lib\site-packages\visp_python_bindgen\generator.py", line 114, in generate_module + raise RuntimeError('There was an exception when processing headers: You should either ignore the faulty header/class, or fix the generator code!') + RuntimeError: There was an exception when processing headers: You should either ignore the faulty header/class, or fix the generator code! +C:\Program Files\Microsoft Visual Studio\2022\Community\MSBuild\Microsoft\VC\v170\Microsoft.CppCommon.targets(254,5): error MSB8066: la build personnalisée de 'C:\visp-ws\test-pr\visp-SamFlt\visp-build-vc17-bindings\CMakeFi +\endverbatim + +This means that there is a parsing error when reading the ViSP header files. + +This may be due to macro definitions, which are not done by an actual compiler. +Custom macro definitions are defined in an autogenerated file in the build folder: `modules/python/cmake_config.json`. +To add a custom macro, you should modify the GenerateConfig.cmake file in the modules/python folder in the **source** directory of ViSP. + +For instance, in the function declaration: +\code{.cpp} +static DWORD WINAPI launcher(LPVOID lpParam); +\endcode + +The macros DWORD, WINAPI and LPVOID are defined by MSVC, but are unknown to our tool. +We can defined them by adding custom defines in the GenerateConfig.cmake file: +\code +if(WIN32) # WIN32 only defs + # ... + string(JSON json_defines SET ${json_defines} "DWORD" "uint64_t") + string(JSON json_defines SET ${json_defines} "WINAPI" "__stdcall") + string(JSON json_defines SET ${json_defines} "LPVOID" "void*") +endif() +\endcode + +If the issue persists, you can ignore the header by going to the relevant configuration file modules/python/config/module.json, where *module* is the module where parsing fails. +Here, the failing header was "vpRobotWireFrameSimulator.h" in the robot module, so we can edit the `modules/python/config/robot.json` and add: +\code{.json} +{ + "ignore_headers": ["vpRobotWireFrameSimulator.h"] +} +\endcode + +\subsection py_bindings_known_errors_compil When compiling the bindings + +\subsubsection py_bindings_known_errors_abstract Abstract class not detected + +If you have this error: +\verbatim + error: invalid new-expression of abstract class type ‘vpTemplateTrackerMI’ + return new Class{std::forward(args)...}; + In file included from /home/visp_ws/visp_build/modules/python/bindings/src/tt_mi.cpp:13:0: + /home/visp_ws/visp/modules/tracker/tt_mi/include/visp3/tt_mi/vpTemplateTrackerMI.h:46:19: note: because the following virtual functions are pure within ‘vpTemplateTrackerMI’: + class VISP_EXPORT vpTemplateTrackerMI : public vpTemplateTracker +\endverbatim + +You should define the class (here vpTemplateTrackerMI) as pure virtual in the config file (via the flag is_virtual). +This error occurs because some methods are defined as pure virtual in a parent class and are not defined in the class this class: Pure virtual class detection does not look in the class hierarchy but only at the present class. + +\subsubsection py_bindings_known_errors_template Template errors + +If you have an issue that looks like: +\verbatim + Consolidate compiler generated dependencies of target _visp + [ 97%] Building CXX object modules/python/bindings/CMakeFiles/_visp.dir/src/core.cpp.o + [ 97%] Building CXX object modules/python/bindings/CMakeFiles/_visp.dir/src/robot.cpp.o + In file included from /usr/include/c++/11/bits/move.h:57, + from /usr/include/c++/11/bits/stl_pair.h:59, + from /usr/include/c++/11/bits/stl_algobase.h:64, + from /usr/include/c++/11/bits/specfun.h:45, + from /usr/include/c++/11/cmath:1935, + from /usr/include/c++/11/math.h:36, + from /home/sfelton/miniconda3/envs/wrapper3.9/include/python3.9/pyport.h:205, + from /home/sfelton/miniconda3/envs/wrapper3.9/include/python3.9/Python.h:50, + from /home/sfelton/.local/include/pybind11/detail/common.h:266, + from /home/sfelton/.local/include/pybind11/attr.h:13, + from /home/sfelton/.local/include/pybind11/detail/class.h:12, + from /home/sfelton/.local/include/pybind11/pybind11.h:13, + from /home/sfelton/software/visp_build/modules/python/bindings/src/robot.cpp:3: + /usr/include/c++/11/type_traits: **In instantiation of ‘struct std::is_move_constructible >’:** + /usr/include/c++/11/type_traits:152:12: required from ‘struct std::__and_ >, std::is_move_assignable > >’ + /usr/include/c++/11/type_traits:157:12: required from ‘struct std::__and_ > >, std::is_move_constructible >, std::is_move_assignable > >’ + /usr/include/c++/11/type_traits:2209:11: required by substitution of ‘template using _Require = std::__enable_if_t >::value> [with _Cond = {std::__not_ > >, std::is_move_constructible >, std::is_move_assignable >}]’ + /usr/include/c++/11/bits/move.h:196:5: required by substitution of ‘template std::_Require >, std::is_move_constructible<_Tp>, std::is_move_assignable<_Tp> > std::swap(_Tp&, _Tp&) [with _Tp = vpImage]’ + /home/sfelton/software/visp-sfelton/modules/core/include/visp3/core/vpImage.h:341:15: required from ‘class vpImage’ + /home/sfelton/software/visp-sfelton/modules/core/include/visp3/core/vpImage.h:369:17: required from here + /usr/include/c++/11/type_traits:1010:52: error: static assertion failed: template argument must be a complete class or an unbounded array + 1010 | **static_assert(std::__is_complete_or_unbounded(__type_identity<_Tp>{}),** +\endverbatim + +You should delete the files in `modules/python/` of the **build** directory. + +\subsubsection py_bindings_known_errors_import_dll ImportError: DLL load failed while importing _visp + +The following error may occur on Windows +\verbatim + Successfully installed visp-3.6.1 + Building Custom Rule C:/visp-ws/visp/modules/python/bindings/CMakeLists.txt + Generating Python stubs with pybind11-stubgen... + Collecting pybind11-stubgen + Using cached pybind11_stubgen-2.5-py3-none-any.whl.metadata (1.7 kB) + Using cached pybind11_stubgen-2.5-py3-none-any.whl (29 kB) + Installing collected packages: pybind11-stubgen + Successfully installed pybind11-stubgen-2.5 + Traceback (most recent call last): + File "", line 198, in _run_module_as_main + File "", line 88, in _run_code + File "C:\Users\User\miniforge3\envs\visp-conda-ws\Lib\site-packages\pybind11_stubgen\__main__.py", line 4, in + main() + File "C:\Users\User\miniforge3\envs\visp-conda-ws\Lib\site-packages\pybind11_stubgen\__init__.py", line 319, in main + run( + File "C:\Users\User\miniforge3\envs\visp-conda-ws\Lib\site-packages\pybind11_stubgen\__init__.py", line 358, in run + QualifiedName.from_str(module_name), importlib.import_module(module_name) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "C:\Users\User\miniforge3\envs\visp-conda-ws\Lib\importlib\__init__.py", line 90, in import_module + return _bootstrap._gcd_import(name[level:], package, level) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "", line 1387, in _gcd_import + File "", line 1360, in _find_and_load + File "", line 1331, in _find_and_load_unlocked + File "", line 921, in _load_unlocked + File "", line 813, in module_from_spec + File "", line 1289, in create_module + File "", line 488, in _call_with_frames_removed + ImportError: DLL load failed while importing _visp: Le module spÚcifiÚ est introuvable. + Traceback (most recent call last): + File "C:\visp-ws\visp\modules\python\stubs\run_stub_generator.py", line 51, in + subprocess.run([sys.executable, '-m', 'pybind11_stubgen', '-o', str(output_root.absolute()), '--ignore-all-errors', '_visp'], check=True) + File "C:\Users\User\miniforge3\envs\visp-conda-ws\Lib\subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['C:\\Users\\User\\miniforge3\\envs\\visp-conda-ws\\python.exe', '-m', 'pybind11_stubgen', '-o', 'C:\\visp-ws\\visp-build-bindings-vc17\\modules\\python\\stubs', '--ignore-all-errors', '_visp']' returned non-zero exit status 1. +C:\Program Files\Microsoft Visual Studio\2022\Community\MSBuild\Microsoft\VC\v170\Microsoft.CppCommon.targets(254,5): error MSB8066: la build personnalisée de 'C:\visp-ws\visp-build-bindings-vc17\CMakeFiles\4a151b60a22ebef29e06fbbd54c3e165\visp_python_bindings_stubs.rule;C:\visp-ws\visp\modules\python\stubs\CMakeLists.txt' s'est arrêtée. Code 1. [C:\visp-ws\visp-build-bindings-vc17\modules\python\stubs\visp_python_bindings_stubs.vcxproj] +\endverbatim + +This error occurs when ViSP DLLs or third-party DLLs are not found. +The first thing to do is to check whether any of the third-parties are installed outside the Conda environment. +To do this, check your `ViSP-third-party.txt` file. +For example, if you have something similar to +\verbatim +(visp-conda-ws) C:\visp-ws\visp-build-bindings> type ViSP-third-party.txt +... + OpenCV: + Version: 4.6.0 + Modules: calib3d core dnn features2d flann gapi highgui imgcodecs imgproc ml objdetect photo stitching video videoio + OpenCV dir: C:/visp-ws/3rdparty/opencv-4.6.0/build + + Mathematics: + Blas/Lapack: yes + \- Use MKL: no + \- Use OpenBLAS: no + \- Use Atlas: no + \- Use Netlib: no + \- Use GSL: no + \- Use Lapack (built-in): yes (ver 3.2.1) + Use Eigen3: yes (ver 3.4.0) + Use OpenCV: yes (ver 4.6.0) +... + Library dirs: + Eigen3 include dir: C:/Users/User/miniforge3/envs/visp-conda-ws/Library/share/eigen3/cmake + OpenCV dir: C:/visp-ws/3rdparty/opencv-4.6.0/build +\endverbatim +you can see that OpenCV 4.6.0 is found outside conda environment, while eigen 3.4.0 is found in the conda environment. +In our case, the error is due to OpenCV DLLs that are not found. + +- **Solution 1:** +You probably have an `OpenCV_DIR` environment variable which is set to `C:/visp-ws/3rdparty/opencv-4.6.0/build`. +A simple solution is to remove this environment variable, close and reopen your `Miniforge Prompt`, and if you haven't +already done so, install OpenCV using conda +\verbatim +(visp-conda-ws) $ conda install libopencv +\endverbatim +and configure again ViSP +\verbatim +(visp-conda-ws) C:\visp-ws\visp-build-bindings> cmake -G "Visual Studio 17 2022" -A "x64" ../visp -DCMAKE_PREFIX_PATH=%CONDA_PREFIX% -DCMAKE_INSTALL_PREFIX=%CONDA_PREFIX%\Library -DVISP_LIB_INSTALL_PATH="lib" -DVISP_BIN_INSTALL_PATH="bin" -DVISP_CONFIG_INSTALL_PATH="cmake" +\endverbatim +At this point you should see that OpenCV is detected in the conda environment +\verbatim +(visp-conda-ws) C:\visp-ws\visp-build-bindings> type ViSP-third-party.txt +... + OpenCV: + Version: 4.9.0 + Modules: calib3d core dnn features2d flann gapi highgui imgcodecs imgproc ml objdetect photo stitching video videoio + OpenCV dir: C:/Users/User/miniforge3/envs/visp-conda-ws/Library/cmake + + Mathematics: + Blas/Lapack: yes + \- Use MKL: no + \- Use OpenBLAS: no + \- Use Atlas: no + \- Use Netlib: no + \- Use GSL: no + \- Use Lapack (built-in): yes (ver 3.2.1) + Use Eigen3: yes (ver 3.4.0) + Use OpenCV: yes (ver 4.9.0) +... + Library dirs: + Eigen3 include dir: C:/Users/User/miniforge3/envs/visp-conda-ws/Library/share/eigen3/cmake + OpenCV dir: C:/Users/User/miniforge3/envs/visp-conda-ws/Library/cmake +\endverbatim +Now you can relaunch the build process +\verbatim +(visp-conda-ws) C:\visp-ws\visp-build-bindings> cmake --build . --config Release --target install --parallel 8 +(visp-conda-ws) C:\visp-ws\visp-build-bindings> cmake --build . --config Release --target visp_python_bindings --parallel 8 +\endverbatim + +- **Solution2:** +If you rather want to use OpenCV build and installed outside your conda environment, you may set +`VISP_WINDOWS_DLL_PATH` environment variable with the path to OpenCV DLLs. In our case it would be: +\verbatim +(visp-conda-ws) $ setx VISP_WINDOWS_DLL_PATH "%VISP_WINDOWS_DLL_PATH%;C:\visp-ws\3rdparty\opencv-4.6.0\build\x64\vc17\bin" +\endverbatim +Then close and reopen your `Miniforge Prompt` and relaunch the build process +\verbatim +(visp-conda-ws) C:\visp-ws\visp-build-bindings> cmake -G "Visual Studio 17 2022" -A "x64" ../visp -DCMAKE_PREFIX_PATH=%CONDA_PREFIX% -DCMAKE_INSTALL_PREFIX=%CONDA_PREFIX%\Library -DVISP_LIB_INSTALL_PATH="lib" -DVISP_BIN_INSTALL_PATH="bin" -DVISP_CONFIG_INSTALL_PATH="cmake" +(visp-conda-ws) C:\visp-ws\visp-build-bindings> cmake --build . --config Release --target install --parallel 8 +(visp-conda-ws) C:\visp-ws\visp-build-bindings> cmake --build . --config Release --target visp_python_bindings --parallel 8 +\endverbatim + +\subsection py_bindings_known_errors_import When importing Python in ViSP + +\subsubsection py_bindings_known_errors_same_name Static and member methods have the same name + +If, when importing visp in Python, you encounter this message: +\verbatim + ImportError: overloading a method with both static and instance methods is not supported; error while attempting to bind instance method visp.xxx() -> None +\endverbatim + +Then it means that a class has both a static method and a member method with the same name. You should :ref:`rename either one through the config files `. + +This error may also happen when generating the Python stubs (after the bindings compilation and linking step). + +*/ diff --git a/doc/tutorial/started/tutorial-getting-started-naoqi.dox b/doc/tutorial/started/tutorial-getting-started-naoqi.dox index db6d4a5c14..81e531168c 100644 --- a/doc/tutorial/started/tutorial-getting-started-naoqi.dox +++ b/doc/tutorial/started/tutorial-getting-started-naoqi.dox @@ -1,21 +1,25 @@  /** - \page tutorial-getting-started-naoqi Tutorial: How to create an application that uses ViSP on NAOqi OS + \page tutorial-getting-started-naoqi Tutorial: How to create an application that uses ViSP on NAOqi OS \tableofcontents -We assume in this tutorial that you have successfully cross-compiled ViSP following \ref tutorial-install-crosscompiling-naoqi. -The aim of this tutorial is to explain how to create an application that uses ViSP cross-build for NAOqi OS as third-party and how to run this application on Nao, Romeo or Pepper robots. +We assume in this tutorial that you have successfully cross-compiled ViSP following +\ref tutorial-install-crosscompiling-naoqi. +The aim of this tutorial is to explain how to create an application that uses ViSP cross-build for NAOqi OS as +third-party and how to run this application on Nao, Romeo or Pepper robots. -To illustrate this tutorial, we will consider all the examples that are provided in the tutorial dedicated to images (ie. the one located in \c visp/tutorial/image folder). If you are not familiar with ViSP read first \ref tutorial-getting-started. +To illustrate this tutorial, we will consider all the examples that are provided in the tutorial dedicated to images +(ie. the one located in \c visp/tutorial/image folder). If you are not familiar with ViSP read first +\ref tutorial-getting-started. This tutorial was tested on an Ubuntu 14.04 LTS host computer with: -- Cross Toolchain 2.3.1 Linux 64 (\c ctc-linux32-atom-2.3.1.23) on Romeo robot +- Cross Toolchain 2.3.1 Linux 64 (\c ctc-linux32-atom-2.3.1.23) on Romeo robot - Cross Toolchain 2.4.3 Linux 64 (\c ctc-linux64-atom-2.4.3.28) on Pepper robot - \section started_naoqi_code Create a workspace for your project -We assume that as described in \ref tutorial-install-crosscompiling-naoqi, your workspace dedicated to visp contains the following folders: +We assume that as described in \ref tutorial-install-crosscompiling-naoqi, your workspace dedicated to visp contains +the following folders: \code $ ls $HOME/soft visp @@ -27,11 +31,20 @@ Create a new folder to host the project you want to run on NAOqi OS $ mkdir $HOME/soft/tutorial \endcode -Get existing tutorial/image folder from ViSP source code +Copy existing `tutorial/image` folder from ViSP source code or download it from here +https://github.com/lagadic/visp/tree/master/tutorial/image. \code -$ cd $HOME/soft/tutorial -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/image +$ ls $HOME/soft/tutorial/image +CMakeLists.txt monkey.png tutorial-draw-text.cpp tutorial-image-manipulation.cpp +camera.xml monkey.ppm tutorial-event-keyboard.cpp tutorial-image-reader.cpp +chessboard.jpg tutorial-canny.cpp tutorial-export-image.cpp tutorial-image-viewer.cpp +drawingHelpers.cpp tutorial-draw-circle.cpp tutorial-image-colormap.cpp tutorial-undistort.cpp +drawingHelpers.h tutorial-draw-cross.cpp tutorial-image-converter.cpp tutorial-video-manipulation.cpp +memorial.pfm tutorial-draw-frame.cpp tutorial-image-display-scaled-auto.cpp tutorial-viewer.cpp +monkey.bmp tutorial-draw-line.cpp tutorial-image-display-scaled-manu.cpp +monkey.jpeg tutorial-draw-point.cpp tutorial-image-display.cpp +monkey.pgm tutorial-draw-rectangle.cpp tutorial-image-filter.cpp \endcode \section started_naoqi_build Cross-build your project @@ -40,7 +53,7 @@ Create first a build folder associated to your Cross Toolchain like: \code $ cd $HOME/soft/tutorial $ mkdir image-build-ctc-linux64-atom-2.4.3.28 -\endcode +\endcode Configure your project setting \c VISP_DIR and \c CMAKE_TOOLCHAIN_FILE respectively to ViSP and Cross Toolchain location: \code diff --git a/doc/tutorial/started/tutorial-getting-started.dox b/doc/tutorial/started/tutorial-getting-started.dox index a91ea3f6d0..023ef4c227 100644 --- a/doc/tutorial/started/tutorial-getting-started.dox +++ b/doc/tutorial/started/tutorial-getting-started.dox @@ -3,25 +3,29 @@ \page tutorial-getting-started Tutorial: How to create and build a project that uses ViSP and CMake on Unix or Windows \tableofcontents -\note We assume in this tutorial that you have successfully installed ViSP either with an \ref tutorial_install_pkg or with an \ref tutorial_install_src. +\note We assume in this tutorial that you have successfully installed ViSP either with an \ref tutorial_install_pkg or +with an \ref tutorial_install_src. -In this tutorial you will learn how to use ViSP either on unix-like operating system (including OSX, Fedora, Ubuntu, Debian, ...) or on Windows. +In this tutorial you will learn how to use ViSP either on unix-like operating system (including OSX, Fedora, Ubuntu, +Debian, ...) or on Windows. -The easiest way of using ViSP in your project is to use CMake. If you are not familiar with CMake, you can check the tutorial. +The easiest way of using ViSP in your project is to use CMake. If you are not +familiar with CMake, you can check the tutorial. -Note also that all the material (source code and images) described in this tutorial is part of ViSP source code and could be downloaded using the following command: - -\verbatim -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/image -\endverbatim +Note that all the material (source code and images) described in this tutorial is part of ViSP source code +(in `tutorial/image` folder) and could be found in https://github.com/lagadic/visp/tree/master/tutorial/image. \section started_quick Quick getting started -In this section we show how to build an existing project that uses ViSP as third-party and CMake for the build mechanism. As a use case we will use the `image` project that is part of ViSP tutorials. The source code comes from https://github.com/lagadic/visp/tutorial/image. It contains a set of source files `tutorial-viewer.cpp`, `tutorial-image-viewer.cpp` and a `CMakeLists.txt` file. We show here how to get these files and build them. +In this section we show how to build an existing project that uses ViSP as third-party and CMake for the build +mechanism. As a use case we will use the `image` project that is part of ViSP tutorials. The source code comes from +https://github.com/lagadic/visp/tutorial/image. It contains a set of source files `tutorial-viewer.cpp`, +`tutorial-image-viewer.cpp` and a `CMakeLists.txt` file. We show here how to get these files and build them. \subsection started_quick_unix On unix-like operating system -1. If you did \ref tutorial_install_pkg you have to create a workspace. If you did \ref tutorial_install_src jump to point 2. since your workspace should be already created.
+1. If you did \ref tutorial_install_pkg you have to create a workspace. If you did \ref tutorial_install_src jump to +point 2. since your workspace should be already existing.
Check if `VISP_WS` environment var exists: \verbatim $ env | grep VISP_WS @@ -33,12 +37,12 @@ $ source ~/.bashrc $ mkdir -p $VISP_WS \endverbatim -2. Get the source code using Subversion (`svn`): +2. Copy the source code from `tutorial/image` ViSP folder \verbatim -$ sudo apt-get install subversion $ cd $VISP_WS -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/image +$ cp -p -r visp/tutorial/image . \endverbatim +or donwload it from https://github.com/lagadic/visp/tree/master/tutorial/image. 3. Create a build folder \verbatim @@ -80,15 +84,13 @@ C:\> setx VISP_WS=C:\visp-ws C:\> exit \endverbatim -2. Get the source code in your workspace either using Subversion (`svn`), either copying the source code from `%%VISP_WS%/tutorial/image` folder if you follow one of the \ref tutorial_install_src tutorials, or downloading the source from https://github.com/lagadic/visp/tutorial/image:
-With Subversion: -\verbatim -C:\> svn export https://github.com/lagadic/visp.git/trunk/tutorial/image -\endverbatim -Or by copy from ViSP source code +2. Get the source code in your workspace
+You can either copy the source code from `%%VISP_WS%/tutorial/image` folder if you follow one of the +\ref tutorial_install_src tutorials \verbatim C:\> xcopy /E /I %VISP_WS%\visp\tutorial\image %VISP_WS%\image \endverbatim +or downloading it from https://github.com/lagadic/visp/tutorial/image. 3. Create a build folder \verbatim @@ -161,7 +163,8 @@ C:\> setx VISP_WS=C:\visp-ws C:\> exit \endverbatim -Enter `VISP_WS` folder and create a new folder let say `started` that will contain your first project that uses ViSP as third-party: +Enter `VISP_WS` folder and create a new folder let say `started` that will contain your first project that uses ViSP +as third-party: - On unix-like operating system
\verbatim @@ -177,9 +180,11 @@ C:\> mkdir started \subsection image_code Get tutorial-viewer.cpp file -Let's start to write our first C++ example to see how to read an image and open a window to display the image with ViSP. This example is provided in tutorial-viewer.cpp example and given below. +Let's start to write our first C++ example to see how to read an image and open a window to display the image with ViSP. +This example is provided in tutorial-viewer.cpp example and given below. -Open your favorite editor and copy/paste the content of this example in `VISP_WS/started/tutorial-viewer.cpp` source file. +Open your favorite editor and copy/paste the content of this example in `VISP_WS/started/tutorial-viewer.cpp` source +file. The code to copy/paste is the following: @@ -188,7 +193,10 @@ The code to copy/paste is the following: Here is the detailed explanation of the source, line by line: \snippet tutorial-viewer.cpp Include display -Include all the headers for image viewers. The two first one are for Windows systems. They require that Direct 3D or the \e Graphical \e Device \e Interface (\e GDI) coming with the installation of Visual Studio are available. The third one needs GTK that is cross-platform. The fourth is for unix-like systems and requires that \e libX11 is available. The last one is also cross-platform and requires that OpenCV is available. +Include all the headers for image viewers. The two first one are for Windows systems. They require that Direct 3D or +the \e Graphical \e Device \e Interface (\e GDI) coming with the installation of Visual Studio are available. The +third one needs GTK that is cross-platform. The fourth is for unix-like systems and requires that \e libX11 is +available. The last one is also cross-platform and requires that OpenCV is available. \snippet tutorial-viewer.cpp Include io Include the header that allows to read/write PGM, PPM, PNG and JPEG images from the disk using vpImageIo class. @@ -197,10 +205,12 @@ Include the header that allows to read/write PGM, PPM, PNG and JPEG images from Create an instance of a color image where each pixel is coded in RGBa. \snippet tutorial-viewer.cpp vpImage reading -The image `I` is initialized by reading an image file from the disk. If the image format is not supported we throw an exception. +The image `I` is initialized by reading an image file from the disk. If the image format is not supported we throw an +exception. \snippet tutorial-viewer.cpp vpDisplay construction -Create an instance of an image display window for image `I`. The first viewer that is available is used. Here we create the link between the image `I` and the display `d`. Note that an image can only have one display. +Create an instance of an image display window for image `I`. The first viewer that is available is used. Here we create +the link between the image `I` and the display `d`. Note that an image can only have one display. \snippet tutorial-viewer.cpp vpDisplay set title The title of the display is then set to "My image". @@ -213,7 +223,8 @@ Here we handle mouse events. We are waiting for a blocking mouse click to end th \subsection image_cmake Get CMakeLists.txt file -Now you have to create a `CMakeLists.txt` file that gives the instructions on how to build `tutorial-viewer.cpp` example. A minimalistic `CMakeLists.txt` should contain the following lines. +Now you have to create a `CMakeLists.txt` file that gives the instructions on how to build `tutorial-viewer.cpp` +example. A minimalistic `CMakeLists.txt` should contain the following lines. Open your editor and copy/paste the following lines in `VISP_WS/started/CMakeLists.txt` file. \code @@ -249,7 +260,8 @@ endif(VISP_FOUND) add_executable(tutorial-viewer tutorial-viewer.cpp) \endcode -where `VISP_USE_FILE` variable is set to the full path to `VISPUse.cmake` file that contains all the CMake material that allow to build your project with ViSP. In other terms, the line +where `VISP_USE_FILE` variable is set to the full path to `VISPUse.cmake` file that contains all the CMake material +that allow to build your project with ViSP. In other terms, the line \code include(${VISP_USE_FILE}) \endcode @@ -263,16 +275,15 @@ will include the following lines to your `CMakeFile.txt` Get `monkey.ppm` image and copy it to `VISP_WS/started` either: - copying it from ViSP source code; the file is in `VISP_WS/tutorial/image/monkey.ppm` -- using Subversion: -\verbatim -svn export https://github.com/lagadic/visp.git/trunk/tutorial/image/monkey.ppm -\endverbatim -- by copy/paste from GitHub using the Raw button +- using download it from https://github.com/lagadic/visp/blob/master/tutorial/image/ + by copy/paste from GitHub using + the Raw button \subsection image_unix On unix-like operating system -In this section we supppose that you have created a folder `$VISP_WS/started` that contains `CMakeLists.txt`, `tutorial-viewer.cpp` and `monkey.ppm` files. +In this section we supppose that you have created a folder `$VISP_WS/started` that contains `CMakeLists.txt`, +`tutorial-viewer.cpp` and `monkey.ppm` files. \subsubsection image_unix_build_folder Create a build folder diff --git a/doc/tutorial/tracking/tutorial-tracking-blob.dox b/doc/tutorial/tracking/tutorial-tracking-blob.dox index 496de1e08c..98103cdf0c 100644 --- a/doc/tutorial/tracking/tutorial-tracking-blob.dox +++ b/doc/tutorial/tracking/tutorial-tracking-blob.dox @@ -5,9 +5,12 @@ \section tracking_blob_intro Introduction -With ViSP you can track a blob using either vpDot or vpDot2 classes. By blob we mean a region of the image that has the same gray level. The blob can be white on a black background, or black on a white background. +With ViSP you can track a blob using either vpDot or vpDot2 classes. By blob we mean a region of the image that has the +same gray level. The blob can be white on a black background, or black on a white background. -In this tutorial we focus on vpDot2 class that provides more functionalities than vpDot class. As presented in section \ref tracking_blob_auto, it allows especially to automize the detection of blobs that have the same characteristics than a reference blob. +In this tutorial we focus on vpDot2 class that provides more functionalities than vpDot class. As presented in section +\ref tracking_blob_auto, it allows especially to automize the detection of blobs that have the same characteristics +than a reference blob. The next videos show the result of ViSP blob tracker on two different objects: @@ -19,21 +22,22 @@ The next videos show the result of ViSP blob tracker on two different objects: \endhtmlonly -All the material (source code and images) described in this tutorial is part of ViSP source code and could be downloaded using the following command: - -\code -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/tracking/blob -\endcode +Note that all the material (source code and images) described in this tutorial is part of ViSP source code +(in `tutorial/tracking/blob` folder) and could be found in https://github.com/lagadic/visp/tree/master/tutorial/tracking/blob. \section tracking_blob_tracking Blob tracking -In the next subsections we explain how to achieve this kind of tracking, first using a firewire live camera, then using a v4l2 live camera that can be an usb camera, or a Raspberry Pi camera module. +In the next subsections we explain how to achieve this kind of tracking, first using a firewire live camera, then using +a v4l2 live camera that can be an usb camera, or a Raspberry Pi camera module. \subsection live-firewire From a firewire live camera -The following code also available in tutorial-blob-tracker-live-firewire.cpp file provided in ViSP source code tree allows to grab images from a firewire camera and track a blob. The initialisation is done with a user mouse click on a pixel that belongs to the blob. - -To acquire images from a firewire camera we use vp1394TwoGrabber class on unix-like systems or vp1394CMUGrabber class under Windows. These classes are described in the \ref tutorial-grabber. +The following code also available in tutorial-blob-tracker-live-firewire.cpp file provided in ViSP source code tree +allows to grab images from a firewire camera and track a blob. The initialisation is done with a user mouse click on +a pixel that belongs to the blob. + +To acquire images from a firewire camera we use vp1394TwoGrabber class on unix-like systems or vp1394CMUGrabber class +under Windows. These classes are described in the \ref tutorial-grabber. \include tutorial-blob-tracker-live-firewire.cpp @@ -42,10 +46,11 @@ Here after we explain the new lines that are introduced. \snippet tutorial-blob-tracker-live-firewire.cpp Construction -Then we are modifying some default settings to allow drawings in overlay the contours pixels and the position of the center of gravity with a thickness of 2 pixels. +Then we are modifying some default settings to allow drawings in overlay the contours pixels and the position of the +center of gravity with a thickness of 2 pixels. \snippet tutorial-blob-tracker-live-firewire.cpp Setting -Then we are waiting for a user initialization throw a mouse click event in the blob to track. +Then we are waiting for a user initialization throw a mouse click event in the blob to track. \snippet tutorial-blob-tracker-live-firewire.cpp Init The tracker is now initialized. The tracking can be performed on new images: @@ -53,25 +58,33 @@ The tracker is now initialized. The tracking can be performed on new images: \subsection live-v4l2 From a v4l2 live camera -The following code also available in tutorial-blob-tracker-live-v4l2.cpp file provided in ViSP source code tree allows to grab images from a camera compatible with video for linux two driver (v4l2) and track a blob. Webcams or more generally USB cameras, but also the Raspberry Pi Camera Module can be considered. +The following code also available in tutorial-blob-tracker-live-v4l2.cpp file provided in ViSP source code tree allows +to grab images from a camera compatible with video for linux two driver (v4l2) and track a blob. Webcams or more +generally USB cameras, but also the Raspberry Pi Camera Module can be considered. -To acquire images from a v4l2 camera we use vpV4l2Grabber class on unix-like systems. This class is described in the \ref tutorial-grabber. +To acquire images from a v4l2 camera we use vpV4l2Grabber class on unix-like systems. This class is described in the +\ref tutorial-grabber. \include tutorial-blob-tracker-live-v4l2.cpp -The code is the same than the one presented in the previous subsection, except that here we use the vpV4l2Grabber class to grab images from usb cameras. Here we have also modified the while loop in order to catch an exception when the tracker fail: +The code is the same than the one presented in the previous subsection, except that here we use the vpV4l2Grabber +class to grab images from usb cameras. Here we have also modified the while loop in order to catch an exception when +the tracker fail: \code try { blob.track(I); } catch(...) { } \endcode -If possible, it allows the tracker to overcome a previous tracking failure (due to blur, blob outside the image,...) on the next available images. +If possible, it allows the tracker to overcome a previous tracking failure (due to blur, blob outside the image,...) +on the next available images. \section tracking_blob_auto Blob auto detection and tracking -The following example also available in tutorial-blob-auto-tracker.cpp file provided in ViSP source code tree shows how to detect blobs in the first image and then track all the detected blobs. This functionality is only available with vpDot2 class. Here we consider an image that is provided in ViSP source tree. +The following example also available in tutorial-blob-auto-tracker.cpp file provided in ViSP source code tree shows +how to detect blobs in the first image and then track all the detected blobs. This functionality is only available +with vpDot2 class. Here we consider an image that is provided in ViSP source tree. \include tutorial-blob-auto-tracker.cpp @@ -84,17 +97,21 @@ And here is the detailed explanation of the source : First we create an instance of the tracker. \snippet tutorial-blob-auto-tracker.cpp Construction -Then, two cases are handled. The first case, when \c learn is set to \c true, consists in learning the blob characteristics. The user has to click in a blob that serves as reference blob. The size, area, gray level min and max, and some precision parameters will than be used to search similar blobs in the whole image. +Then, two cases are handled. The first case, when \c learn is set to \c true, consists in learning the blob +characteristics. The user has to click in a blob that serves as reference blob. The size, area, gray level min +and max, and some precision parameters will than be used to search similar blobs in the whole image. \snippet tutorial-blob-auto-tracker.cpp Learn -If you have an precise idea of the dimensions of the blob to search, the second case consists is settings the reference characteristics directly. +If you have an precise idea of the dimensions of the blob to search, the second case consists is settings the +reference characteristics directly. \snippet tutorial-blob-auto-tracker.cpp Setting Once the blob characteristics are known, to search similar blobs in the image is simply done by: \snippet tutorial-blob-auto-tracker.cpp Search -Here \c blob_list contains the list of the blobs that are detected in the image \c I. When learning is enabled, the blob that is tracked is not in the list of auto detected blobs. We add it to the end of the list: +Here \c blob_list contains the list of the blobs that are detected in the image \c I. When learning is enabled, +the blob that is tracked is not in the list of auto detected blobs. We add it to the end of the list: \snippet tutorial-blob-auto-tracker.cpp Add learned dot diff --git a/doc/tutorial/tracking/tutorial-tracking-keypoint.dox b/doc/tutorial/tracking/tutorial-tracking-keypoint.dox index 8b8dc838cf..d898ab3bc5 100644 --- a/doc/tutorial/tracking/tutorial-tracking-keypoint.dox +++ b/doc/tutorial/tracking/tutorial-tracking-keypoint.dox @@ -5,17 +5,17 @@ \section tracking_keypoint_intro Introduction -With ViSP it is possible to track keypoints using OpenCV KLT tracker, an implementation of the Kanade-Lucas-Tomasi feature tracker. +With ViSP it is possible to track keypoints using OpenCV KLT tracker, an implementation of the Kanade-Lucas-Tomasi +feature tracker. -All the material (source code and video) described in this tutorial is part of ViSP source code and could be downloaded using the following command: - -\code -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/tracking/keypoint -\endcode +Note that all the material (source code and videos) described in this tutorial is part of ViSP source code +(in `tutorial/tracking/keypoint` folder) and could be found in +https://github.com/lagadic/visp/tree/master/tutorial/tracking/keypoint. \section tracking_keypoint_klt KLT tracker -The following example code available in tutorial-klt-tracker.cpp shows how to use ViSP vpKltOpencv class to track KLT keypoints. This class is a wrapper over the OpenCV KLT tracker implementation. +The following example code available in tutorial-klt-tracker.cpp shows how to use ViSP vpKltOpencv class to track KLT +keypoints. This class is a wrapper over the OpenCV KLT tracker implementation. \include tutorial-klt-tracker.cpp @@ -25,30 +25,38 @@ The video shows the result of the tracking: \endhtmlonly -The previous example can be run without command line options. In that case, keypoints are automatically detected before tracking. +The previous example can be run without command line options. In that case, keypoints are automatically detected before +tracking. \code $ ./tutorial-klt-tracker \endcode -It can also be run with [--init-by-click] option. In that case, the user can select a set of keypoints to track with a left mouse click. A right mouse click stops the keypoints selection and allows to start the tracking. +It can also be run with [--init-by-click] option. In that case, the user can select a set of keypoints to track with a +left mouse click. A right mouse click stops the keypoints selection and allows to start the tracking. \code $ ./tutorial-klt-tracker --init-by-click \endcode -Here is the line by line explanation of the source : +Here is the line by line explanation of the source : \snippet tutorial-klt-tracker.cpp Include -We include here the headers that define the corresponding classes. vpImageConvert class will be used to convert ViSP images implemented in vpImage class into OpenCV -cv::Mat structures used as an entry by the KLT tracker. Then we include the header of vpKltOpencv class which is the wrapper over OpenCV KLT tracker implementation. +We include here the headers that define the corresponding classes. vpImageConvert class will be used to convert ViSP +images implemented in vpImage class into OpenCV +cv::Mat structures used as an entry by the KLT tracker. Then we include the header of vpKltOpencv class which is the +wrapper over OpenCV KLT tracker implementation. -We need also to include a device to display the images. We retain vpDisplayOpenCV that works on Unix and Windows since OpenCV is mandatory by the tracker. Finally we include vpVideoReader header that will be used to read an mpeg input stream. +We need also to include a device to display the images. We retain vpDisplayOpenCV that works on Unix and Windows since +OpenCV is mandatory by the tracker. Finally we include vpVideoReader header that will be used to read an mpeg input +stream. -At the beginning of the main() function, we use the following macro to ensure that OpenCV requested by the tracker is available. Note that OpenCV will also be used to render the images and read the input video stream. +At the beginning of the main() function, we use the following macro to ensure that OpenCV requested by the tracker is +available. Note that OpenCV will also be used to render the images and read the input video stream. \snippet tutorial-klt-tracker.cpp Check 3rd party -The program starts by the creation of a vpVideoReader instance able to extract all the images of the video file \c video-postcard.mpeg. Here, the video should be in the same folder than the binary. +The program starts by the creation of a vpVideoReader instance able to extract all the images of the video file +\c video-postcard.mpeg. Here, the video should be in the same folder than the binary. \snippet tutorial-klt-tracker.cpp Create reader @@ -60,19 +68,21 @@ This image \c I is then converted into \c cvI, an OpenCV image format that will \snippet tutorial-klt-tracker.cpp Convert to OpenCV image -We also create a window associated to \c I, at position (0,0) in the screen, with "Klt tracking" as title, and display image \c I. +We also create a window associated to \c I, at position (0,0) in the screen, with "Klt tracking" as title, and display +image \c I. \snippet tutorial-klt-tracker.cpp Init display - + From now we have to create an instance of the tracker and set the parameters of the Harris keypoint detector. \snippet tutorial-klt-tracker.cpp Create tracker - + The tracker is then initialized on \c cvI image. \snippet tutorial-klt-tracker.cpp Init tracker -With the next line the user can know how many keypoints were detected automatically or selected by the user during initialization. +With the next line the user can know how many keypoints were detected automatically or selected by the user during +initialization. \snippet tutorial-klt-tracker.cpp How many features @@ -82,7 +92,8 @@ To detect more keypoints, you may decrease the quality parameter set with the fo \snippet tutorial-klt-tracker.cpp Quality -Until the end of the video, we get \c I the next image in ViSP format, display and convert it in OpenCV format. Then we track the Harris keypoints using KLT tracker before displaying the keypoints that are tracked with a red cross. +Until the end of the video, we get \c I the next image in ViSP format, display and convert it in OpenCV format. +Then we track the Harris keypoints using KLT tracker before displaying the keypoints that are tracked with a red cross. \snippet tutorial-klt-tracker.cpp While loop @@ -92,7 +103,10 @@ We are waiting for a mouse click event on image \c I to end the program. \section tracking_keypoint_klt_init KLT tracker with re-initialisation -Once initialized, the number of tracked features decreases over the time. Depending on a criteria, it may sense to detect and track new features online. A possible criteria is for example to compare the number of currently tracked features to the initial number of detected features. If less than a given percentage of features are tracked, you can start a new detection. +Once initialized, the number of tracked features decreases over the time. Depending on a criteria, it may sense to +detect and track new features online. A possible criteria is for example to compare the number of currently tracked +features to the initial number of detected features. If less than a given percentage of features are tracked, +you can start a new detection. To get the number of detected or tracked features just call: @@ -102,15 +116,16 @@ To get the number of detected or tracked features just call: Then the idea is to add the previously tracked features to the list of features that are detected. -The example tutorial-klt-tracker-with-reinit.cpp shows how to do that. In that example we start a new detection on frame 25. Compared to the previous code available in tutorial-klt-tracker.cpp we add the following lines: +The example tutorial-klt-tracker-with-reinit.cpp shows how to do that. In that example we start a new detection on +frame 25. Compared to the previous code available in tutorial-klt-tracker.cpp we add the following lines: \code if (reader.getFrameIndex() == 25) { std::cout << "Re initialize the tracker" << std::endl; -#if (VISP_HAVE_OPENCV_VERSION >= 0x020408) + // Save of previous features std::vector prev_features = tracker.getFeatures(); - + // Start a new feature detection tracker.initTracking(cvI); std::vector new_features = tracker.getFeatures(); @@ -122,7 +137,7 @@ The example tutorial-klt-tracker-with-reinit.cpp shows how to do that. In that e // Test if a previous feature is not redundant with one of the newly detected is_redundant = false; for (size_t j=0; j < new_features.size(); j++){ - distance = sqrt(vpMath::sqr(new_features[j].x-prev_features[i].x) + distance = sqrt(vpMath::sqr(new_features[j].x-prev_features[i].x) + vpMath::sqr(new_features[j].y-prev_features[i].y)); if(distance < minDistance_){ is_redundant = true; @@ -135,9 +150,6 @@ The example tutorial-klt-tracker-with-reinit.cpp shows how to do that. In that e //std::cout << "Add previous feature with index " << i << std::endl; tracker.addFeature(prev_features[i]); } -#else - ... -#endif } // Track the features tracker.track(cvI); @@ -146,7 +158,9 @@ The example tutorial-klt-tracker-with-reinit.cpp shows how to do that. In that e In this code we do the following: - save the features that are tracked until now - initialize the tracker to detect new features -- parse all the saved features and compare them to the newly detected features. If a previous feature is close in terms of geometric distance to a newly detected one, it is rejected (in our case less than 2 pixels). If not, it is added to the list of detected features. +- parse all the saved features and compare them to the newly detected features. If a previous feature is close in + terms of geometric distance to a newly detected one, it is rejected (in our case less than 2 pixels). If not, it is + added to the list of detected features. \section tracking_keypoint_next Next tutorial You are now ready to see the next \ref tutorial-tracking-me. diff --git a/doc/tutorial/tracking/tutorial-tracking-mb-deprected.dox b/doc/tutorial/tracking/tutorial-tracking-mb-deprected.dox index 087ac77a88..438082764d 100644 --- a/doc/tutorial/tracking/tutorial-tracking-mb-deprected.dox +++ b/doc/tutorial/tracking/tutorial-tracking-mb-deprected.dox @@ -6,46 +6,69 @@ \section mbdep_intro Introduction \warning -This tutorial can be considered obsolete since ViSP 3.1.0 version as we have introduced a generic tracker (vpMbGenericTracker) that can replace the vpMbEdgeTracker, vpMbKltTracker and vpMbEdgeKltTracker classes that are used in this tutorial. Thus we recommend rather to follow \ref tutorial-tracking-mb-generic. - -ViSP allows simultaneously the tracking of a markerless object using the knowledge of its CAD model while providing its 3D localization (i.e., the object pose expressed in the camera frame) when a calibrated camera is used \cite Comport06b. Considered objects should be modeled by lines, circles or cylinders. The CAD model of the object could be defined in vrml format (except for circles), or in cao format. - -To follow this tutorial depending on your interest you should be sure that ViSP was build with the following third-parties: -- \c OpenCV: Useful if you want to investigate the KLT keypoint tracker implemented in vpMbKltTracker or its hybrid version vpMbEdgeKltTracker. We recommend to install \c OpenCV. This 3rd party may be also useful to consider input videos (mpeg, png, jpeg...). -- \c Ogre \c 3D: This 3rd party is optional and could be difficult to instal on OSX and Windows. To begin with the tracker we don't recommend to install it. \c Ogre \c 3D allows to enable \ref mbdep_settings_visibility_ogre. -- \c Coin \c 3D: This 3rd party is also optional and difficult to install. That's why we don't recommend to install \c Coin \c 3D to begin with the tracker. \c Coin \c 3D allows only to consider \ref mbdep_advanced_wrml instead of the home-made \ref mbdep_advanced_cao. - -Next sections highlight how to use the differents versions of the markerless model-based trackers that are implemented in ViSP. - -Note that all the material (source code and video) described in this tutorial is part of ViSP source code and could be downloaded using the following command: - -\code -$ svn export https://github.com/lagadic/visp.git/trunk/tutorial/tracking/model-based/old/generic -\endcode +This tutorial can be considered obsolete since ViSP 3.1.0 version as we have introduced a generic tracker +(vpMbGenericTracker) that can replace the vpMbEdgeTracker, vpMbKltTracker and vpMbEdgeKltTracker classes that are used +in this tutorial. Thus we recommend rather to follow \ref tutorial-tracking-mb-generic. + +ViSP allows simultaneously the tracking of a markerless object using the knowledge of its CAD model while providing its +3D localization (i.e., the object pose expressed in the camera frame) when a calibrated camera is used \cite Comport06b. +Considered objects should be modeled by lines, circles or cylinders. The CAD model of the object could be defined in +vrml format (except for circles), or in cao format. + +To follow this tutorial depending on your interest you should be sure that ViSP was build with the following +third-parties: +- \c OpenCV: Useful if you want to investigate the KLT keypoint tracker implemented in vpMbKltTracker or its hybrid + version vpMbEdgeKltTracker. We recommend to install \c OpenCV. This 3rd party may be also useful to consider input + videos (mpeg, png, jpeg...). +- \c Ogre \c 3D: This 3rd party is optional and could be difficult to instal on OSX and Windows. To begin with the + tracker we don't recommend to install it. \c Ogre \c 3D allows to enable \ref mbdep_settings_visibility_ogre. +- \c Coin \c 3D: This 3rd party is also optional and difficult to install. That's why we don't recommend to install + \c Coin \c 3D to begin with the tracker. \c Coin \c 3D allows only to consider \ref mbdep_advanced_wrml instead of + the home-made \ref mbdep_advanced_cao. + +Next sections highlight how to use the differents versions of the markerless model-based trackers that are +implemented in ViSP. + +Note that all the material (source code and videos) described in this tutorial is part of ViSP source code +(in `tutorial/tracking/model-based/old/generic` folder) and could be found in +https://github.com/lagadic/visp/tree/master/tutorial/tracking/model-based/old/generic. \section mbdep_started Getting started In ViSP, depending on the visual features that are used three trackers are available: -- a tracker implemented in vpMbEdgeTracker that consider moving-edges behind the visible lines of the model. This tracker is appropriate to track texture less objects. -- an other tracker implemented in vpMbKltTracker that consider KLT keypoints that are detected and tracked on each visible face of the model. This tracker is more designed to track textured objects with edges that are not really visible. -- an hybrid version implemented in vpMbEdgeKltTracker that is able to consider moving-edges and KLT keypoints. This tracker is appropriate to track textured objects with visible edges. +- a tracker implemented in vpMbEdgeTracker that consider moving-edges behind the visible lines of the model. +This tracker is appropriate to track texture less objects. +- an other tracker implemented in vpMbKltTracker that consider KLT keypoints that are detected and tracked on each + visible face of the model. This tracker is more designed to track textured objects with edges that are not really + visible. +- an hybrid version implemented in vpMbEdgeKltTracker that is able to consider moving-edges and KLT keypoints. + This tracker is appropriate to track textured objects with visible edges. \subsection mbdep_started_src Example source code -The following example that comes from tutorial-mb-tracker.cpp allows to track a tea box modeled in cao format using one of the markerless model-based tracker implemented in ViSP. +The following example that comes from tutorial-mb-tracker.cpp allows to track a tea box modeled in cao format using +one of the markerless model-based tracker implemented in ViSP. \include tutorial-mb-tracker.cpp -\note An extension of the previous getting started example is proposed in tutorial-mb-tracker-full.cpp where advanced functionalities such as reading tracker settings from an XML file or visibility computation are implemented. +\note An extension of the previous getting started example is proposed in tutorial-mb-tracker-full.cpp where advanced +functionalities such as reading tracker settings from an XML file or visibility computation are implemented. -\note Other tutorials that are specific to a given tracker are provided in tutorial-mb-edge-tracker.cpp, tutorial-mb-klt-tracker.cpp and tutorial-mb-hybrid-tracker.cpp. +\note Other tutorials that are specific to a given tracker are provided in tutorial-mb-edge-tracker.cpp, +tutorial-mb-klt-tracker.cpp and tutorial-mb-hybrid-tracker.cpp. \subsection mbdep_started_input Example input data The previous example uses the following data as input: - a video file; "teabox.mpg" is the default video. -- a cad model that describes the object to track. In our case the file \c "teabox.cao" is the default one. See \ref mbdep_model section to learn how the teabox is modelled and section \ref mbdep_advanced_cao to learn how to model an other object. -- a file with extension "*.init" that contains the 3D coordinates of some points used to compute an initial pose which serves to initialize the tracker. The user has than to click in the image on the corresponding 2D points. The default file is named `teabox.init`. The content of this file is detailed in \ref mbdep_started_src_explained section. -- an optional image with extension "*.ppm" that may help the user to remember the location of the corresponding 3D points specified in "*.init" file. +- a cad model that describes the object to track. In our case the file \c "teabox.cao" is the default one. See + \ref mbdep_model section to learn how the teabox is modelled and section \ref mbdep_advanced_cao to learn how to + model an other object. +- a file with extension "*.init" that contains the 3D coordinates of some points used to compute an initial pose + which serves to initialize the tracker. The user has than to click in the image on the corresponding 2D points. + The default file is named `teabox.init`. The content of this file is detailed in \ref mbdep_started_src_explained + section. +- an optional image with extension "*.ppm" that may help the user to remember the location of the corresponding 3D + points specified in "*.init" file. \subsection mbdep_started_exe Running the example @@ -55,7 +78,8 @@ Once build, to see the options that are available in the previous source code, j $ ./tutorial-mb-tracker --help Usage: ./tutorial-mb-tracker-full [--video