diff --git a/.gitignore b/.gitignore
index 795cd14..909744d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,43 +1,116 @@
-*.pyc
-*.o
-*.so
-*/build/*
-*/setup/*
-*/devel/*
-*/rosbag/*
-*/__pycache__/*
-*/lib/*
-*/bin/*
-*msg_gen
-*srv_gen
-*~
-*build
-*install
-*log
-*rosbag
+# Ros gitignore
+
+log/
+install/
+devel/
+logs/
+build/
+bin/
+lib/
+msg_gen/
+srv_gen/
+msg/*Action.msg
+msg/*ActionFeedback.msg
+msg/*ActionGoal.msg
+msg/*ActionResult.msg
+msg/*Feedback.msg
+msg/*Goal.msg
+msg/*Result.msg
+msg/_*.py
+build_isolated/
+devel_isolated/
+.catkin_tools/
+
+# Generated by dynamic reconfigure
+*.cfgc
+/cfg/cpp/
+/cfg/*.py
+
+# Ignore generated docs
+*.dox
+*.wikidoc
-*.g2o
+# eclipse stuff
+.project
+.cproject
-*rosbag2_2023_08_26-00_14_07
+# qcreator stuff
+CMakeLists.txt.user
-## data files are not for gitHub
+srv/_*.py
+*.pcd
+*.pyc
+qtcreator-*
+*.user
+
+/planning/cfg
+/planning/docs
+/planning/src
-*.jpg
-*.gif
-*.mp4
-*.bag
-*.db3
-*.db3-shm
-*.db3-wal
+*~
+# Emacs
+.#*
-# undesired folder
+# Catkin custom files
+CATKIN_IGNORE
-*.catkin_tools
-*.vscode
-*.catkin_workspace
-*.json
-*.gv
+# Vscode gitignore
+
+.vscode
+.DS_Store
+.huskyrc.json
+out
+log.log
+**/node_modules
+*.pyc
+*.py.bak
+*.vsix
+**/.vscode/.ropeproject/**
+**/testFiles/**/.cache/**
+*.noseids
+.nyc_output
+.vscode-test
+__pycache__
+npm-debug.log
+**/.mypy_cache/**
+!yarn.lock
+coverage/
+cucumber-report.json
+**/.vscode-test/**
+**/.vscode test/**
+**/.vscode-smoke/**
+**/.venv*/
+port.txt
+precommit.hook
+pythonFiles/lib/**
+debug_coverage*/**
+languageServer/**
+languageServer.*/**
+bin/**
+obj/**
+.pytest_cache
+tmp/**
+.python-version
+.vs/
+test-results*.xml
+xunit-test-results.xml
+build/ci/performance/performance-results.json
+debug*.log
+debugpy*.log
+pydevd*.log
+nodeLanguageServer/**
+nodeLanguageServer.*/**
+dist/**
+# translation files
+*.xlf
+*.nls.*.json
+*.i18n.json
+#Doxygen files
+html/
+latex/
+*.tag
+*.Dockerfile.swp
diff --git a/docker_humble_desktop/Dockerfile b/docker_humble_desktop/Dockerfile
index 4c14c21..eefe524 100644
--- a/docker_humble_desktop/Dockerfile
+++ b/docker_humble_desktop/Dockerfile
@@ -39,6 +39,13 @@ RUN rosdep update \
# Add the source of the project to the .bashrc
RUN echo "if [ -f /home/${USERNAME}/dev_ws/install/setup.bash ]; then source /home/${USERNAME}/dev_ws/install/setup.bash; fi" >> /home/${USERNAME}/.bashrc
+# Install additional Python packages
+RUN pip install opencv-python --no-cache-dir
+RUN pip install depthai --no-cache-dir
+RUN pip install pygame_gui --no-cache-dir
+RUN pip install pygame --no-cache-dir
+RUN pip install transforms3d --no-cache-dir
+
# Clean up
RUN sudo rm -rf /var/lib/apt/lists/*
diff --git a/fake_nav_cs/LICENSE b/fake_nav_cs/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/fake_nav_cs/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/path_planning/.gitkeep b/fake_nav_cs/fake_nav_cs/__init__.py
similarity index 100%
rename from path_planning/.gitkeep
rename to fake_nav_cs/fake_nav_cs/__init__.py
diff --git a/fake_nav_cs/fake_nav_cs/fake_cs_node.py b/fake_nav_cs/fake_nav_cs/fake_cs_node.py
new file mode 100644
index 0000000..facc1b8
--- /dev/null
+++ b/fake_nav_cs/fake_nav_cs/fake_cs_node.py
@@ -0,0 +1,190 @@
+import rclpy
+from rclpy.node import Node
+from transforms3d.euler import quat2euler
+from nav_msgs.msg import Path
+from nav_msgs.msg import Odometry
+from custom_msg.msg import Wheelstatus, Motorcmds
+import pygame
+import pygame_gui
+
+
+
+# Node for the Nav fake CS
+class Fake_cs_nav(Node):
+
+ def __init__(self):
+ super().__init__('/Nav/fake_cs')
+ self.position = [0,0,0]
+ self.orientation = [0,0,0]
+ self.linVel = [0,0,0]
+ self.angVel = [0,0,0]
+
+ self.steering_wheel_ang = [0,0,0,0]
+ self.steering_wheel_state = [0,0,0,0]
+ self.driving_wheel_ang = [0,0,0,0]
+ self.driving_wheel_state = [0,0,0,0]
+
+ self.create_subscription(Odometry, '/lio_sam/odom', self.nav_odometry , 10)
+ self.create_subscription(Wheelstatus, '/NAV/absolute_encoders', self.nav_wheel, 10)
+ self.create_subscription(Motorcmds, '/NAV/displacement', self.nav_displacement, 10)
+ self.display_ui()
+
+
+ def nav_displacement(self, displacement):
+ self.navigation.displacement_mode = displacement.modedeplacement
+ self.navigation.info = displacement.info
+
+
+ def nav_wheel(self, msg):
+ """
+ FRONT_LEFT_DRIVE = 0
+ FRONT_RIGHT_DRIVE = 1
+ BACK_RIGHT_DRIVE = 2
+ BACK_LEFT_DRIVE = 3
+ FRONT_LEFT_STEER = 4
+ FRONT_RIGHT_STEER = 5
+ BACK_RIGHT_STEER = 6
+ BACK_LEFT_STEER = 7
+ """
+ print(msg.state)
+ #self.navigation.wheels_ang = []
+ self.navigation.steering_wheel_ang = [float(i/65536 * 360) for i in msg.data[0:4]]
+ self.navigation.driving_wheel_ang = [float(i/65536 * 360) for i in msg.data[4:8]]
+ self.navigation.steering_wheel_state = msg.state[0:4]
+ self.navigation.driving_wheel_state = msg.state[4:8]
+
+
+ def nav_odometry(self, odometry):
+ self.navigation.position = [odometry.pose.pose.position.x, odometry.pose.pose.position.y, odometry.pose.pose.position.z]
+
+ orientation = quat2euler([odometry.pose.pose.orientation.w,
+ odometry.pose.pose.orientation.x,
+ odometry.pose.pose.orientation.y,
+ odometry.pose.pose.orientation.z])[2]
+ # clamp the orientation from [-pi; pi] to between [0;2 pi]
+ if (orientation < 0):
+ orientation = orientation + 2* np.pi
+ # convert the orientation from radians to degrees
+ orientation = orientation * 180 / np.pi
+ self.navigation.orientation = [0,0, orientation]
+
+ self.navigation.linVel = [odometry.twist.twist.linear.x, odometry.twist.twist.linear.y, odometry.twist.twist.linear.z]
+ self.navigation.angVel = [odometry.twist.twist.angular.x, odometry.twist.twist.angular.y, odometry.twist.twist.angular.z]
+
+ def display_ui():
+ pygame.init()
+ pygame.display.set_caption('Fake CS Rover Interface')
+ window_surface = pygame.display.set_mode((1024, 768))
+ background = pygame.Surface((1024, 768))
+ background.fill(pygame.Color('#123456'))
+ manager = pygame_gui.UIManager((1024, 768), 'theme.json')
+ label_layout_rect = pygame.Rect(30, 20, 100, 20)
+
+ clock = pygame.time.Clock()
+ is_running = True
+
+ # Localization Panel Setup
+ localization_panel = pygame_gui.elements.UIPanel(
+ relative_rect=pygame.Rect(50, 50, 700, 200),
+ manager=manager
+ )
+ loc_label = pygame_gui.elements.UILabel(relative_rect=label_layout_rect, text='Localization:',manager=manager, container = localization_panel)
+
+
+ labels_loc = ['Position', 'Orientation', 'Rover linear speed', 'Rover angular speed']
+ y_offset = 50
+ label_height = 25
+ text_entry_height = 25
+ text_entry_width = 70
+ spacing = 10
+
+ # Generate Labels and Text Entry Fields for Localization
+ for i, label in enumerate(labels_loc):
+ # Calculate y position for label
+ label_y = y_offset + (label_height + spacing) * i
+ label_element = pygame_gui.elements.UILabel(
+ relative_rect=pygame.Rect(10, label_y, 200, label_height),
+ text=label,
+ manager=manager,
+ container=localization_panel
+ )
+
+ # Generate text entry fields for x, y, z components
+ for j in range(3):
+ text_entry_element = pygame_gui.elements.UITextEntryLine(
+ relative_rect=pygame.Rect(220 + j * (text_entry_width + spacing), label_y, text_entry_width, text_entry_height),
+ manager=manager,
+ container=localization_panel
+ )
+ text_entry_element.set_text('0')
+
+ # Wheels Panel Setup
+ wheels_panel = pygame_gui.elements.UIPanel(
+ relative_rect=pygame.Rect(50, 300, 700, 250),
+ manager=manager
+ )
+
+ whls_label = pygame_gui.elements.UILabel(relative_rect=label_layout_rect, text='Wheels:',manager=manager, container = wheels_panel)
+
+
+ wheels_labels = ['Speed', 'Steering', 'Current', 'Motor state']
+ wheels_positions = ['Top-Left', 'Top-Right', 'Bottom-Left', 'Bottom-Right']
+
+ # Generate Labels and Text Entry Fields for Wheels
+ for i, pos in enumerate(wheels_positions):
+ # Calculate y position for wheel label
+ wheel_label_y = y_offset + (label_height + spacing) * i
+ wheel_label_element = pygame_gui.elements.UILabel(
+ relative_rect=pygame.Rect(10, wheel_label_y, 200, label_height),
+ text=pos,
+ manager=manager,
+ container=wheels_panel
+ )
+
+ # Generate text entry fields for each attribute of the wheel
+ for j, attr in enumerate(wheels_labels):
+ # Calculate x position for text entry
+ text_entry_x = 220 + j * (text_entry_width + spacing)
+ text_entry_element = pygame_gui.elements.UITextEntryLine(
+ relative_rect=pygame.Rect(text_entry_x, wheel_label_y, text_entry_width, text_entry_height),
+ manager=manager,
+ container=wheels_panel
+ )
+ text_entry_element.set_text('0')
+
+ # Main event loop
+ while is_running:
+ time_delta = clock.tick(60)/1000.0
+ for event in pygame.event.get():
+ if event.type == pygame.QUIT:
+ is_running = False
+ manager.process_events(event)
+
+ manager.update(time_delta)
+ window_surface.blit(background, (0, 0))
+ manager.draw_ui(window_surface)
+ pygame.display.update()
+
+ pygame.quit()
+
+
+
+
+def main(args=None):
+ rclpy.init(args=args)
+
+ fake_cs_nav = Fake_cs_nav()
+
+ rclpy.spin(fake_cs_nav)
+
+ # Destroy the node explicitly
+ # (optional - otherwise it will be done automatically
+ # when the garbage collector destroys the node object)
+ fake_cs_nav.destroy_node()
+ rclpy.shutdown()
+
+
+if __name__ == '__main__':
+ main()
+
+
diff --git a/fake_nav_cs/package.xml b/fake_nav_cs/package.xml
new file mode 100644
index 0000000..eef0f8b
--- /dev/null
+++ b/fake_nav_cs/package.xml
@@ -0,0 +1,18 @@
+
+
+
+ fake_nav_cs
+ 0.0.0
+ TODO: Package description
+ xplore
+ Apache-2.0
+
+ ament_copyright
+ ament_flake8
+ ament_pep257
+ python3-pytest
+
+
+ ament_python
+
+
diff --git a/sensors/camera/.gitkeep b/fake_nav_cs/resource/fake_nav_cs
similarity index 100%
rename from sensors/camera/.gitkeep
rename to fake_nav_cs/resource/fake_nav_cs
diff --git a/fake_nav_cs/setup.cfg b/fake_nav_cs/setup.cfg
new file mode 100644
index 0000000..be1e98f
--- /dev/null
+++ b/fake_nav_cs/setup.cfg
@@ -0,0 +1,4 @@
+[develop]
+script_dir=$base/lib/fake_nav_cs
+[install]
+install_scripts=$base/lib/fake_nav_cs
diff --git a/fake_nav_cs/setup.py b/fake_nav_cs/setup.py
new file mode 100644
index 0000000..43d7531
--- /dev/null
+++ b/fake_nav_cs/setup.py
@@ -0,0 +1,25 @@
+from setuptools import find_packages, setup
+
+package_name = 'fake_nav_cs'
+
+setup(
+ name=package_name,
+ version='0.0.0',
+ packages=find_packages(exclude=['test']),
+ data_files=[
+ ('share/ament_index/resource_index/packages',
+ ['resource/' + package_name]),
+ ('share/' + package_name, ['package.xml']),
+ ],
+ install_requires=['setuptools'],
+ zip_safe=True,
+ maintainer='xplore',
+ maintainer_email='xplore@todo.todo',
+ description='TODO: Package description',
+ license='Apache-2.0',
+ tests_require=['pytest'],
+ entry_points={
+ 'console_scripts': ['Fake_cs_nav = fake_nav_cs.fake_cs_node:main'
+ ],
+ },
+)
diff --git a/fake_nav_cs/test/test_copyright.py b/fake_nav_cs/test/test_copyright.py
new file mode 100644
index 0000000..97a3919
--- /dev/null
+++ b/fake_nav_cs/test/test_copyright.py
@@ -0,0 +1,25 @@
+# Copyright 2015 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_copyright.main import main
+import pytest
+
+
+# Remove the `skip` decorator once the source file(s) have a copyright header
+@pytest.mark.skip(reason='No copyright header has been placed in the generated source file.')
+@pytest.mark.copyright
+@pytest.mark.linter
+def test_copyright():
+ rc = main(argv=['.', 'test'])
+ assert rc == 0, 'Found errors'
diff --git a/fake_nav_cs/test/test_flake8.py b/fake_nav_cs/test/test_flake8.py
new file mode 100644
index 0000000..27ee107
--- /dev/null
+++ b/fake_nav_cs/test/test_flake8.py
@@ -0,0 +1,25 @@
+# Copyright 2017 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_flake8.main import main_with_errors
+import pytest
+
+
+@pytest.mark.flake8
+@pytest.mark.linter
+def test_flake8():
+ rc, errors = main_with_errors(argv=[])
+ assert rc == 0, \
+ 'Found %d code style errors / warnings:\n' % len(errors) + \
+ '\n'.join(errors)
diff --git a/fake_nav_cs/test/test_pep257.py b/fake_nav_cs/test/test_pep257.py
new file mode 100644
index 0000000..b234a38
--- /dev/null
+++ b/fake_nav_cs/test/test_pep257.py
@@ -0,0 +1,23 @@
+# Copyright 2015 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_pep257.main import main
+import pytest
+
+
+@pytest.mark.linter
+@pytest.mark.pep257
+def test_pep257():
+ rc = main(argv=['.', 'test'])
+ assert rc == 0, 'Found code style errors / warnings'
diff --git a/oakd_test_package/LICENSE b/oakd_test_package/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/oakd_test_package/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/oakd_test_package/oakd_test_package/__init__.py b/oakd_test_package/oakd_test_package/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/oakd_test_package/oakd_test_package/oakd_test copy.py b/oakd_test_package/oakd_test_package/oakd_test copy.py
new file mode 100644
index 0000000..0882ec1
--- /dev/null
+++ b/oakd_test_package/oakd_test_package/oakd_test copy.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python3
+
+import cv2
+import depthai as dai
+import time
+import math
+
+
+def main():
+ # Create pipeline
+ pipeline = dai.Pipeline()
+
+ # Define sources and outputs
+ imu = pipeline.create(dai.node.IMU)
+ xlinkOut = pipeline.create(dai.node.XLinkOut)
+
+ xlinkOut.setStreamName("imu")
+
+ # enable ACCELEROMETER_RAW at 500 hz rate
+ imu.enableIMUSensor(dai.IMUSensor.ACCELEROMETER_RAW, 500)
+ # enable GYROSCOPE_RAW at 400 hz rate
+ imu.enableIMUSensor(dai.IMUSensor.GYROSCOPE_RAW, 400)
+ # it's recommended to set both setBatchReportThreshold and setMaxBatchReports to 20 when integrating in a pipeline with a lot of input/output connections
+ # above this threshold packets will be sent in batch of X, if the host is not blocked and USB bandwidth is available
+ imu.setBatchReportThreshold(1)
+ # maximum number of IMU packets in a batch, if it's reached device will block sending until host can receive it
+ # if lower or equal to batchReportThreshold then the sending is always blocking on device
+ # useful to reduce device's CPU load and number of lost packets, if CPU load is high on device side due to multiple nodes
+ imu.setMaxBatchReports(10)
+
+ # Link plugins IMU -> XLINK
+ imu.out.link(xlinkOut.input)
+
+ # Pipeline is defined, now we can connect to the device
+ with dai.Device(pipeline) as device:
+
+ def timeDeltaToMilliS(delta) -> float:
+ return delta.total_seconds()*1000
+
+ # Output queue for imu bulk packets
+ imuQueue = device.getOutputQueue(name="imu", maxSize=50, blocking=False)
+ baseTs = None
+ while True:
+ imuData = imuQueue.get() # blocking call, will wait until a new data has arrived
+
+ imuPackets = imuData.packets
+ for imuPacket in imuPackets:
+ acceleroValues = imuPacket.acceleroMeter
+ gyroValues = imuPacket.gyroscope
+
+ acceleroTs = acceleroValues.getTimestampDevice()
+ gyroTs = gyroValues.getTimestampDevice()
+ if baseTs is None:
+ baseTs = acceleroTs if acceleroTs < gyroTs else gyroTs
+ acceleroTs = timeDeltaToMilliS(acceleroTs - baseTs)
+ gyroTs = timeDeltaToMilliS(gyroTs - baseTs)
+
+ imuF = "{:.06f}"
+ tsF = "{:.03f}"
+
+ print(f"Accelerometer timestamp: {tsF.format(acceleroTs)} ms")
+ print(f"Accelerometer [m/s^2]: x: {imuF.format(acceleroValues.x)} y: {imuF.format(acceleroValues.y)} z: {imuF.format(acceleroValues.z)}")
+ print(f"Gyroscope timestamp: {tsF.format(gyroTs)} ms")
+ print(f"Gyroscope [rad/s]: x: {imuF.format(gyroValues.x)} y: {imuF.format(gyroValues.y)} z: {imuF.format(gyroValues.z)} ")
+
+ if cv2.waitKey(1) == ord('q'):
+ break
+
+if __name__ == '__main__':
+ main()
diff --git a/oakd_test_package/oakd_test_package/oakd_test.py b/oakd_test_package/oakd_test_package/oakd_test.py
new file mode 100644
index 0000000..669ef2c
--- /dev/null
+++ b/oakd_test_package/oakd_test_package/oakd_test.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python3
+
+import cv2
+import depthai as dai
+import time
+import math
+from depthai_sdk import OakCamera
+from depthai_sdk.classes import IMUPacket
+
+def main():
+ with OakCamera() as oak:
+ color = oak.create_camera('color', resolution='1080p', encode='jpeg', fps=30)
+ color.config_color_camera(isp_scale=(2,3))
+ left = oak.create_camera('left', resolution='400p', encode='jpeg',fps=30)
+ right = oak.create_camera('right', resolution='400p', encode='jpeg',fps=30)
+ imu = oak.create_imu()
+ imu.config_imu(report_rate=400, batch_report_threshold=5)
+
+ oak.ros_stream([left, right, color, imu])
+ oak.visualize(left)
+ oak.start(blocking=True)
+
+if __name__ == '__main__':
+ main()
diff --git a/oakd_test_package/package.xml b/oakd_test_package/package.xml
new file mode 100644
index 0000000..880128e
--- /dev/null
+++ b/oakd_test_package/package.xml
@@ -0,0 +1,18 @@
+
+
+
+ oakd_test_package
+ 0.0.0
+ TODO: Package description
+ xplore
+ Apache-2.0
+
+ ament_copyright
+ ament_flake8
+ ament_pep257
+ python3-pytest
+
+
+ ament_python
+
+
diff --git a/oakd_test_package/resource/oakd_test_package b/oakd_test_package/resource/oakd_test_package
new file mode 100644
index 0000000..e69de29
diff --git a/oakd_test_package/setup.cfg b/oakd_test_package/setup.cfg
new file mode 100644
index 0000000..7a68bf7
--- /dev/null
+++ b/oakd_test_package/setup.cfg
@@ -0,0 +1,4 @@
+[develop]
+script_dir=$base/lib/oakd_test_package
+[install]
+install_scripts=$base/lib/oakd_test_package
diff --git a/oakd_test_package/setup.py b/oakd_test_package/setup.py
new file mode 100644
index 0000000..2febd3a
--- /dev/null
+++ b/oakd_test_package/setup.py
@@ -0,0 +1,26 @@
+from setuptools import find_packages, setup
+
+package_name = 'oakd_test_package'
+
+setup(
+ name=package_name,
+ version='0.0.0',
+ packages=find_packages(exclude=['test']),
+ data_files=[
+ ('share/ament_index/resource_index/packages',
+ ['resource/' + package_name]),
+ ('share/' + package_name, ['package.xml']),
+ ],
+ install_requires=['setuptools'],
+ zip_safe=True,
+ maintainer='xplore',
+ maintainer_email='xplore@todo.todo',
+ description='TODO: Package description',
+ license='Apache-2.0',
+ tests_require=['pytest'],
+ entry_points={
+ 'console_scripts': [
+ 'oakd_test = oakd_test_package.oakd_test:main'
+ ],
+ },
+)
diff --git a/oakd_test_package/test/test_copyright.py b/oakd_test_package/test/test_copyright.py
new file mode 100644
index 0000000..97a3919
--- /dev/null
+++ b/oakd_test_package/test/test_copyright.py
@@ -0,0 +1,25 @@
+# Copyright 2015 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_copyright.main import main
+import pytest
+
+
+# Remove the `skip` decorator once the source file(s) have a copyright header
+@pytest.mark.skip(reason='No copyright header has been placed in the generated source file.')
+@pytest.mark.copyright
+@pytest.mark.linter
+def test_copyright():
+ rc = main(argv=['.', 'test'])
+ assert rc == 0, 'Found errors'
diff --git a/oakd_test_package/test/test_flake8.py b/oakd_test_package/test/test_flake8.py
new file mode 100644
index 0000000..27ee107
--- /dev/null
+++ b/oakd_test_package/test/test_flake8.py
@@ -0,0 +1,25 @@
+# Copyright 2017 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_flake8.main import main_with_errors
+import pytest
+
+
+@pytest.mark.flake8
+@pytest.mark.linter
+def test_flake8():
+ rc, errors = main_with_errors(argv=[])
+ assert rc == 0, \
+ 'Found %d code style errors / warnings:\n' % len(errors) + \
+ '\n'.join(errors)
diff --git a/oakd_test_package/test/test_pep257.py b/oakd_test_package/test/test_pep257.py
new file mode 100644
index 0000000..b234a38
--- /dev/null
+++ b/oakd_test_package/test/test_pep257.py
@@ -0,0 +1,23 @@
+# Copyright 2015 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_pep257.main import main
+import pytest
+
+
+@pytest.mark.linter
+@pytest.mark.pep257
+def test_pep257():
+ rc = main(argv=['.', 'test'])
+ assert rc == 0, 'Found code style errors / warnings'
diff --git a/obstacle_detection/camera/oakd_test/oakd_test/__init__.py b/obstacle_detection/camera/oakd_test/oakd_test/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/obstacle_detection/camera/oakd_test/oakd_test/edge_detect_node.py b/obstacle_detection/camera/oakd_test/oakd_test/edge_detect_node.py
new file mode 100644
index 0000000..051990d
--- /dev/null
+++ b/obstacle_detection/camera/oakd_test/oakd_test/edge_detect_node.py
@@ -0,0 +1,102 @@
+import cv2
+import depthai as dai
+
+def main():
+ # Create pipeline
+ pipeline = dai.Pipeline()
+
+ # Define sources and outputs
+ camRgb = pipeline.create(dai.node.ColorCamera)
+ monoLeft = pipeline.create(dai.node.MonoCamera)
+ monoRight = pipeline.create(dai.node.MonoCamera)
+
+ edgeDetectorLeft = pipeline.create(dai.node.EdgeDetector)
+ edgeDetectorRight = pipeline.create(dai.node.EdgeDetector)
+ edgeDetectorRgb = pipeline.create(dai.node.EdgeDetector)
+
+ xoutEdgeLeft = pipeline.create(dai.node.XLinkOut)
+ xoutEdgeRight = pipeline.create(dai.node.XLinkOut)
+ xoutEdgeRgb = pipeline.create(dai.node.XLinkOut)
+ xinEdgeCfg = pipeline.create(dai.node.XLinkIn)
+
+ edgeLeftStr = "edge left"
+ edgeRightStr = "edge right"
+ edgeRgbStr = "edge rgb"
+ edgeCfgStr = "edge cfg"
+
+ xoutEdgeLeft.setStreamName(edgeLeftStr)
+ xoutEdgeRight.setStreamName(edgeRightStr)
+ xoutEdgeRgb.setStreamName(edgeRgbStr)
+ xinEdgeCfg.setStreamName(edgeCfgStr)
+
+ # Properties
+ camRgb.setBoardSocket(dai.CameraBoardSocket.AUTO)
+ camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
+
+ monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
+ monoLeft.setBoardSocket(dai.CameraBoardSocket.CAM_B)
+ monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
+ monoRight.setBoardSocket(dai.CameraBoardSocket.CAM_C)
+
+ edgeDetectorRgb.setMaxOutputFrameSize(camRgb.getVideoWidth() * camRgb.getVideoHeight())
+
+ # Linking
+ monoLeft.out.link(edgeDetectorLeft.inputImage)
+ monoRight.out.link(edgeDetectorRight.inputImage)
+ camRgb.video.link(edgeDetectorRgb.inputImage)
+
+ edgeDetectorLeft.outputImage.link(xoutEdgeLeft.input)
+ edgeDetectorRight.outputImage.link(xoutEdgeRight.input)
+ edgeDetectorRgb.outputImage.link(xoutEdgeRgb.input)
+
+ xinEdgeCfg.out.link(edgeDetectorLeft.inputConfig)
+ xinEdgeCfg.out.link(edgeDetectorRight.inputConfig)
+ xinEdgeCfg.out.link(edgeDetectorRgb.inputConfig)
+
+ # Connect to device and start pipeline
+ with dai.Device(pipeline) as device:
+
+ # Output/input queues
+ edgeLeftQueue = device.getOutputQueue(edgeLeftStr, 8, False)
+ edgeRightQueue = device.getOutputQueue(edgeRightStr, 8, False)
+ edgeRgbQueue = device.getOutputQueue(edgeRgbStr, 8, False)
+ edgeCfgQueue = device.getInputQueue(edgeCfgStr)
+
+ print("Switch between sobel filter kernels using keys '1' and '2'")
+
+ while(True):
+ edgeLeft = edgeLeftQueue.get() # return a ImgFrame object, blocking call
+ edgeRight = edgeRightQueue.get()
+ edgeRgb = edgeRgbQueue.get()
+
+ edgeLeftFrame = edgeLeft.getFrame() # returns numpy array with shape (height, width, 1)
+ edgeRightFrame = edgeRight.getFrame()
+ edgeRgbFrame = edgeRgb.getFrame()
+
+ # Show the frame
+ cv2.imshow(edgeLeftStr, edgeLeftFrame)
+ cv2.imshow(edgeRightStr, edgeRightFrame)
+ cv2.imshow(edgeRgbStr, edgeRgbFrame)
+
+ key = cv2.waitKey(1)
+ if key == 27:
+ break
+
+ if key == ord('1'):
+ print("Switching sobel filter kernel.")
+ cfg = dai.EdgeDetectorConfig()
+ sobelHorizontalKernel = [[1, 0, -1], [2, 0, -2], [1, 0, -1]]
+ sobelVerticalKernel = [[1, 2, 1], [0, 0, 0], [-1, -2, -1]]
+ cfg.setSobelFilterKernels(sobelHorizontalKernel, sobelVerticalKernel)
+ edgeCfgQueue.send(cfg)
+
+ if key == ord('2'):
+ print("Switching sobel filter kernel.")
+ cfg = dai.EdgeDetectorConfig()
+ sobelHorizontalKernel = [[3, 0, -3], [10, 0, -10], [3, 0, -3]]
+ sobelVerticalKernel = [[3, 10, 3], [0, 0, 0], [-3, -10, -3]]
+ cfg.setSobelFilterKernels(sobelHorizontalKernel, sobelVerticalKernel)
+ edgeCfgQueue.send(cfg)
+
+if __name__ == '__main__':
+ main()
diff --git a/obstacle_detection/camera/oakd_test/oakd_test/imu_node.py b/obstacle_detection/camera/oakd_test/oakd_test/imu_node.py
new file mode 100644
index 0000000..21c6435
--- /dev/null
+++ b/obstacle_detection/camera/oakd_test/oakd_test/imu_node.py
@@ -0,0 +1,67 @@
+import cv2
+import depthai as dai
+import time
+import math
+
+def main():
+ device = dai.Device()
+
+ # Create pipeline
+ pipeline = dai.Pipeline()
+
+ # Define sources and outputs
+ imu = pipeline.create(dai.node.IMU)
+ xlinkOut = pipeline.create(dai.node.XLinkOut)
+
+ xlinkOut.setStreamName("imu")
+
+ # enable ROTATION_VECTOR at 400 hz rate
+ imu.enableIMUSensor(dai.IMUSensor.ROTATION_VECTOR, 400)
+ # it's recommended to set both setBatchReportThreshold and setMaxBatchReports to 20 when integrating in a pipeline with a lot of input/output connections
+ # above this threshold packets will be sent in batch of X, if the host is not blocked and USB bandwidth is available
+ imu.setBatchReportThreshold(1)
+ # maximum number of IMU packets in a batch, if it's reached device will block sending until host can receive it
+ # if lower or equal to batchReportThreshold then the sending is always blocking on device
+ # useful to reduce device's CPU load and number of lost packets, if CPU load is high on device side due to multiple nodes
+ imu.setMaxBatchReports(10)
+
+ # Link plugins IMU -> XLINK
+ imu.out.link(xlinkOut.input)
+
+ print(pipeline.getConnectionMap())
+ # Pipeline is defined, now we can connect to the device
+ with device:
+ device.startPipeline(pipeline)
+
+ def timeDeltaToMilliS(delta) -> float:
+ return delta.total_seconds()*1000
+
+ # Output queue for imu bulk packets
+ imuQueue = device.getOutputQueue(name="imu", maxSize=50, blocking=False)
+ baseTs = None
+ while True:
+ imuData = imuQueue.get() # blocking call, will wait until a new data has arrived
+
+ imuPackets = imuData.packets
+ for imuPacket in imuPackets:
+ rVvalues = imuPacket.rotationVector
+
+ rvTs = rVvalues.getTimestampDevice()
+ if baseTs is None:
+ baseTs = rvTs
+ rvTs = rvTs - baseTs
+
+ imuF = "{:.06f}"
+ tsF = "{:.03f}"
+
+ print(f"Rotation vector timestamp: {tsF.format(timeDeltaToMilliS(rvTs))} ms")
+ print(f"Quaternion: i: {imuF.format(rVvalues.i)} j: {imuF.format(rVvalues.j)} "
+ f"k: {imuF.format(rVvalues.k)} real: {imuF.format(rVvalues.real)}")
+ print(f"Accuracy (rad): {imuF.format(rVvalues.rotationVectorAccuracy)}")
+
+
+ if cv2.waitKey(1) == ord('q'):
+ break
+
+if __name__ == '__main__':
+ main()
diff --git a/obstacle_detection/camera/oakd_test/package.xml b/obstacle_detection/camera/oakd_test/package.xml
new file mode 100644
index 0000000..112d883
--- /dev/null
+++ b/obstacle_detection/camera/oakd_test/package.xml
@@ -0,0 +1,22 @@
+
+
+
+ oakd_test
+ 0.0.0
+ TODO: Package description
+ xplore
+ TODO: License declaration
+
+ ament_copyright
+ ament_flake8
+ ament_pep257
+ python3-pytest
+
+ depthai
+ cv2
+
+
+
+ ament_python
+
+
diff --git a/obstacle_detection/camera/oakd_test/resource/oakd_test b/obstacle_detection/camera/oakd_test/resource/oakd_test
new file mode 100644
index 0000000..e69de29
diff --git a/obstacle_detection/camera/oakd_test/setup.cfg b/obstacle_detection/camera/oakd_test/setup.cfg
new file mode 100644
index 0000000..8c0ad1c
--- /dev/null
+++ b/obstacle_detection/camera/oakd_test/setup.cfg
@@ -0,0 +1,4 @@
+[develop]
+script_dir=$base/lib/oakd_test
+[install]
+install_scripts=$base/lib/oakd_test
diff --git a/obstacle_detection/camera/oakd_test/setup.py b/obstacle_detection/camera/oakd_test/setup.py
new file mode 100644
index 0000000..97dd99f
--- /dev/null
+++ b/obstacle_detection/camera/oakd_test/setup.py
@@ -0,0 +1,27 @@
+from setuptools import find_packages, setup
+
+package_name = 'oakd_test'
+
+setup(
+ name=package_name,
+ version='0.0.0',
+ packages=find_packages(exclude=['test']),
+ data_files=[
+ ('share/ament_index/resource_index/packages',
+ ['resource/' + package_name]),
+ ('share/' + package_name, ['package.xml']),
+ ],
+ install_requires=['setuptools'],
+ zip_safe=True,
+ maintainer='xplore',
+ maintainer_email='xplore@todo.todo',
+ description='TODO: Package description',
+ license='TODO: License declaration',
+ tests_require=['pytest'],
+ entry_points={
+ 'console_scripts': [
+ 'imu_node = oakd_test.imu_node:main',
+ 'edge_detect_node = oakd_test.edge_detect_node:main',
+ ],
+ },
+)
diff --git a/obstacle_detection/camera/oakd_test/test/test_copyright.py b/obstacle_detection/camera/oakd_test/test/test_copyright.py
new file mode 100644
index 0000000..97a3919
--- /dev/null
+++ b/obstacle_detection/camera/oakd_test/test/test_copyright.py
@@ -0,0 +1,25 @@
+# Copyright 2015 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_copyright.main import main
+import pytest
+
+
+# Remove the `skip` decorator once the source file(s) have a copyright header
+@pytest.mark.skip(reason='No copyright header has been placed in the generated source file.')
+@pytest.mark.copyright
+@pytest.mark.linter
+def test_copyright():
+ rc = main(argv=['.', 'test'])
+ assert rc == 0, 'Found errors'
diff --git a/obstacle_detection/camera/oakd_test/test/test_flake8.py b/obstacle_detection/camera/oakd_test/test/test_flake8.py
new file mode 100644
index 0000000..27ee107
--- /dev/null
+++ b/obstacle_detection/camera/oakd_test/test/test_flake8.py
@@ -0,0 +1,25 @@
+# Copyright 2017 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_flake8.main import main_with_errors
+import pytest
+
+
+@pytest.mark.flake8
+@pytest.mark.linter
+def test_flake8():
+ rc, errors = main_with_errors(argv=[])
+ assert rc == 0, \
+ 'Found %d code style errors / warnings:\n' % len(errors) + \
+ '\n'.join(errors)
diff --git a/obstacle_detection/camera/oakd_test/test/test_pep257.py b/obstacle_detection/camera/oakd_test/test/test_pep257.py
new file mode 100644
index 0000000..b234a38
--- /dev/null
+++ b/obstacle_detection/camera/oakd_test/test/test_pep257.py
@@ -0,0 +1,23 @@
+# Copyright 2015 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_pep257.main import main
+import pytest
+
+
+@pytest.mark.linter
+@pytest.mark.pep257
+def test_pep257():
+ rc = main(argv=['.', 'test'])
+ assert rc == 0, 'Found code style errors / warnings'
diff --git a/obstacle_detection/camera/obstacle_filter/launch/obstacle_filter.launch.py b/obstacle_detection/camera/obstacle_filter/launch/obstacle_filter.launch.py
new file mode 100644
index 0000000..caaf201
--- /dev/null
+++ b/obstacle_detection/camera/obstacle_filter/launch/obstacle_filter.launch.py
@@ -0,0 +1,31 @@
+from launch import LaunchDescription
+from launch_ros.actions import Node
+
+def generate_launch_description():
+ return LaunchDescription([
+ Node(
+ package='obstacle_filter',
+ executable='obstacle_filter_node',
+ name='NAV_obstacle_filter_node',
+ output='screen',
+ emulate_tty=True,
+ parameters=[
+ {'gridshape_y': 8, # total 144
+ 'gridshape_x': 8, # total 256
+ 'max_depth': 3000,
+ 'min_depth': 800,
+ 'num_iter': 50,
+ 'thresh_dist': 20,
+ 'num_inliers': 360,
+ 'camera_angle': 17,
+ 'obstacle_angle': 45,
+ 'how_to_replace': 'random',
+ 'filter': 'ransac',
+ 'device': 'cpu'
+ }
+ ]
+ )
+ ])
+
+
+
diff --git a/obstacle_detection/camera/obstacle_filter/launch/slope_filter.launch.py b/obstacle_detection/camera/obstacle_filter/launch/slope_filter.launch.py
new file mode 100644
index 0000000..63d8b24
--- /dev/null
+++ b/obstacle_detection/camera/obstacle_filter/launch/slope_filter.launch.py
@@ -0,0 +1,30 @@
+from launch import LaunchDescription
+from launch_ros.actions import Node
+
+def generate_launch_description():
+ return LaunchDescription([
+ Node(
+ package='obstacle_filter',
+ executable='slope_filter_node',
+ name='slope_filter',
+ output='screen',
+ emulate_tty=True,
+ parameters=[
+ {'gridshape_y': 4,
+ 'gridshape_x': 8,
+ 'max_height': 2000,
+ 'max_dist': 6000,
+ 'num_iter': 50,
+ 'thresh_dist': 200,
+ 'num_inliers': 200,
+ 'obstacle_angle': 45,
+ 'how_to_replace': 'random',
+ 'filter': 'ransac',
+ 'device': 'cpu'
+ }
+ ]
+ )
+ ])
+
+
+
diff --git a/obstacle_detection/camera/obstacle_filter/obstacle_filter/__init__.py b/obstacle_detection/camera/obstacle_filter/obstacle_filter/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/obstacle_detection/camera/obstacle_filter/obstacle_filter/filter/__init__.py b/obstacle_detection/camera/obstacle_filter/obstacle_filter/filter/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/obstacle_detection/camera/obstacle_filter/obstacle_filter/filter/filter.py b/obstacle_detection/camera/obstacle_filter/obstacle_filter/filter/filter.py
new file mode 100644
index 0000000..212a9f6
--- /dev/null
+++ b/obstacle_detection/camera/obstacle_filter/obstacle_filter/filter/filter.py
@@ -0,0 +1,261 @@
+import torch
+import numpy as np
+# import matplotlib.pyplot as plt
+# from mpl_toolkits import mplot3d
+
+import time
+
+def convertToPointCloud(array, f_x, c_x, c_y):
+ height, width = array.shape
+ device = array.device
+ x = torch.arange(0, width, device=device)
+ y = torch.arange(0, height, device=device)
+
+ x, y = torch.meshgrid(x, y, indexing='xy')
+
+ x = x - c_x
+ y = y - c_y
+
+ x = x * array / f_x
+ y = y * array / f_x
+
+ return x, y, array
+
+
+def lstsq(array_, device='cpu'):
+ ### gpu code
+ # load the points to the gpu
+ # size grid_x x grid_y x (height * width) x 3
+
+ array_ = array_.to(device) # 3 x grid_x x grid_y x (height * width)
+ array_ = array_.permute(1, 2, 3, 0) # grid_x x grid_y x (height * width) x 3
+
+ valid = array_[:, :, :, 2]>10
+ summed = torch.sum(valid, dim = 2)
+
+ print('grid element : ', summed)
+
+ # matrix A (x, y, 1)
+ A = torch.cat([array_[:, :, :, 0:1],
+ array_[:, :, :, 1:2],
+ torch.ones_like(array_[:, :, :, 0:1])], dim = 3) # grid_x x grid_y x (height * width) x 3
+ b = array_[:, :, :, 2] # grid_x x grid_y x (height * width)
+
+ coeff, _, _, _ = torch.linalg.lstsq(A, b) # grid_x x grid_y x 3
+
+ # coeffs = torch.cat([coeff[:, :, 0:1], coeff[:, :, 1:2], -1 * torch.ones_like(coeff[:, :, 0:1]), coeff[:, :, 2:3]], dim=2)
+
+ return coeff.cpu()
+
+# Vectorized RANSAC implementation
+def ransac(array, num_iterations, threshold_distance, num_inliers, num_random_pts=3, device='cpu'):
+ array = array.to(device) # 3 x grid_x x grid_y x (height * width)
+ array = array.permute(1, 2, 3, 0) # grid_x x grid_y x (height * width) x 3
+
+ # for each subgrid element we want to select num_random_pts random
+ num_pts = array.shape[-2]
+
+ random_indices = torch.randint(0, num_pts, \
+ (num_iterations, num_random_pts)) # shape num_iterations, num_random_pts
+
+ random_points = array[:, :, random_indices, :] # shape grid_x x grid_y x num_iter x num_random_pts x 3
+
+ # A (x, y, 1)
+ A = torch.cat([random_points[:, :, :, :, 0:1], \
+ random_points[:, :, :, :, 1:2], \
+ torch.ones_like(random_points[:, :, :, :, 2:3])], dim = 4) # grid_x x grid_y x num_iter x num_random_pts x 3
+ # b (z)
+ b = random_points[:, :, :, :, 2]
+
+ coeff, _, _, _ = torch.linalg.lstsq(A, b) # grid_x x grid_y x num_iter x 3
+
+ coeffs = torch.stack([coeff[:, :, :, 0], \
+ coeff[:, :, :, 1], \
+ -1 * torch.ones_like(coeff[:, :, :, 2]), coeff[:, :, :, 2]], dim=3) # grid_x x grid_y x num_iter x 4
+
+ # (grid_x x grid_y x (height * width) x 3) x (grid_x x grid_y x 3 x num_iter) + (grid_x x grid_y x 1 x num_iter)
+ distances = torch.abs(array.matmul(coeffs[:, :, :, :3].permute(0, 1, 3, 2)) + \
+ coeffs[:, :, :, 3:4].permute(0, 1, 3, 2)) / \
+ torch.linalg.norm(coeffs[:, :, :, :3], dim = 3, keepdim=True).permute(0, 1, 3, 2) #grid_x x grid_y x (height * width) x num_iter
+
+ # Count the number of inliers
+ inliers = torch.sum(distances < threshold_distance, dim=2) # grid_x x grid_y x num_iter
+
+ valid_planes = torch.zeros((inliers.shape[0], inliers.shape[1]), dtype=bool) # grid_x x grid_y
+
+ maxed_inliers = torch.max(inliers, dim = 2) # grid_x x grid_y
+ valid_planes = maxed_inliers.values > num_inliers # grid_x x grid_y
+
+ # choose the best plane
+ best_plane_ids = maxed_inliers.indices # grid_x x grid_y
+
+ best_planes = torch.zeros((array.shape[0], array.shape[1], 4)).to(device) # grid_x x grid_y x 4
+
+ for i in range(array.shape[0]):
+ for j in range(array.shape[1]):
+ best_planes[i, j] = coeffs[i, j, best_plane_ids[i, j], :]
+
+ # could I do this without the for loops ?
+
+ return best_planes.cpu()
+
+def getPC(depth_array, gridshape = (8, 8), \
+ max_depth = 3000, min_depth = 100, num_iter = 20, thresh_dist = 20, num_inliers = 200, \
+ how_to_replace = 'random', filter = 'ransac', device = 'cpu', \
+ camera_angle = 17, obstacle_angle = 45):
+
+ # t1 = time.time()
+
+ ground_vector = torch.tensor([0, np.cos(camera_angle/180*torch.pi), np.sin(camera_angle/180*torch.pi)])
+
+ # intrinsic camera parameters (hardcoded for now)
+ if depth_array.shape[0] == 720: # 720p
+ f_x = 798.31
+
+ c_x = 655.73
+ c_y = 313.12
+
+ elif depth_array.shape[0] == 400: # 400p
+ f_x = 399.15
+
+ c_x = 327.86
+ c_y = 176.55
+
+ # before 1280 720
+ # after 256 144
+
+ # skip points to speed up computation (downsample the image)
+ skip_points = 5
+ depth_array = depth_array[::skip_points, ::skip_points]
+
+ depth_array = torch.from_numpy(depth_array.astype(np.float32))
+
+ # for recentering image before converting to point cloud
+ f_x = f_x / skip_points # focal length in x direction
+ c_x = c_x / skip_points # principal point in x direction
+ c_y = c_y / skip_points # principal point in y direction
+
+ x, y, z = convertToPointCloud(depth_array, f_x, c_x, c_y)
+ pc = torch.stack((x, y, z), axis=0) # 3 x height x width
+
+ # reshape the pointcloud image into a grid of subimages
+ grid_height = (depth_array.shape[0] // gridshape[0])
+ grid_width = (depth_array.shape[1] // gridshape[1])
+
+ subgrid_size = (grid_height, grid_width)
+
+ subimages = torch.split(pc, subgrid_size[0], dim=1)
+ subimages = [torch.split(subimage, subgrid_size[1], dim=2) for subimage in subimages]
+
+ array = torch.stack([torch.stack(subimage_row, dim=1) for subimage_row in subimages], dim=1)
+ array = array.reshape(3, gridshape[0], gridshape[1], grid_height * grid_width)
+
+ print(filter)
+
+ # filter 0 : don't do the filtering
+ if filter == 'filter_off':
+ print('off')
+ array = array.reshape(3, -1)
+ array = array.transpose(dim0=0, dim1=1)
+ return torch.empty((0, 3)), array
+ else:
+
+
+ orig_array = array
+ # depth_vals = array[2, :, :, :]
+
+ # filter 1 : points too far or too close
+ array = array * (array[2, :, :, :] < max_depth) * (array[2, :, :, :] > min_depth)
+
+ # filter 2 : compute ratio of valid points to all points,
+ # if ratio is too low, then discard the subgrid (there is not valid points in an area of the image. camera couldnt find the depth of points)
+ valid_subgrids = torch.zeros((gridshape[0], gridshape[1]), dtype=bool) # grid_x x grid_y
+
+ num_pts_per_subgrid = grid_height * grid_width
+ sums = torch.zeros((gridshape[0], gridshape[1]))
+ sums = torch.sum(array[2, :, :, :] > 0, dim=2)
+
+ # valid_pts_mask = (depth_vals > 0) # grid_x x grid_y x nb_pts
+
+ ratio = 0.4 # minimum ratio of valid points to all points
+ for i in range(gridshape[0]):
+ for j in range(gridshape[1]):
+ valid_pts_mask = array[2, i, j, :] > 0
+
+ # if number of valid points is greater than ratio
+ if sums[i,j] > num_pts_per_subgrid * ratio:
+ # set subgrid to be valid
+ valid_subgrids[i, j] = 1
+
+ # get the valid points
+ valid_pts = array[:, i, j, valid_pts_mask] # 3 x num_valid_pts
+
+ if how_to_replace == 'median':
+ median, indices = torch.median(valid_pts, dim=1, keepdim=True) #
+ array[:, i, j, ~valid_pts_mask] = median
+ elif how_to_replace == 'mean':
+ # set invalid points to the mean of the valid points
+ mean = torch.mean(valid_pts, axis=1, keepdim=True)
+ array[:, i, j, ~valid_pts_mask] = mean
+ elif how_to_replace == 'random':
+ # set the invalid points to be randomly one of the valid points
+ random_valid_pts = torch.randint(0, valid_pts.shape[1], (torch.sum(~valid_pts_mask),))
+ array[:, i, j, ~valid_pts_mask] = valid_pts[:, random_valid_pts]
+ else:
+ raise ValueError('how_to_replace must be one of median, mean, random')
+ else:
+ # ratio is not satisfied, so set all points to be random. This is so the lstsq function works on CUDA
+ random_values = torch.normal(mean = 1000, std = 100, size=(array.shape[0], array.shape[-1]))
+ array[:, i, j, :] = random_values
+
+ # t2 = time.time()
+
+ # print("Time for preprocessing in filter: ", t2 - t1)
+
+ coeff = torch.zeros((gridshape[0], gridshape[1], 4))
+
+ if filter == 'ransac':
+ coeff = ransac(array, num_iter, thresh_dist, num_inliers, num_random_pts=3, device=device) # grid_x x grid_y x 4
+ elif filter == 'lstsq':
+ coeff = lstsq(array, device=device) # grid_x x grid_y x 3
+
+ plane_vectors = torch.stack([coeff[:, :, 0], coeff[:, :, 1], -1*torch.ones_like(coeff[:, :, 1])], dim=2)
+
+ plane_vectors = plane_vectors / torch.linalg.norm(plane_vectors, dim=2, keepdim=True) # grid_x x grid_y x 3
+
+ # compute scalar product between plane and gravity vector
+ scalar_products = torch.sum(plane_vectors * ground_vector, dim=2, keepdim=False) # grid_x x grid_y
+
+ # compute angle between plane and gravity vector
+ angles = torch.arccos(torch.abs(scalar_products)) * 180 / torch.pi # grid_x x grid_y
+
+ # only consider angles which are valid
+ angles = angles * valid_subgrids
+ # print('Angles: ', angles)
+
+ valid_subgrids = (angles > obstacle_angle) # grid_x x grid_y
+
+ array = array[:, valid_subgrids, :]
+ not_array = orig_array[:, ~valid_subgrids, :]
+
+ array = array.reshape(3, -1)
+ array = array.transpose(dim0=0, dim1=1)
+
+ not_array = not_array.reshape(3, -1)
+ not_array = not_array.transpose(dim0=0, dim1=1)
+
+ return array, not_array
+
+ # # filter 3: compute plane going through each set of points, if plane is off from vertical, then set as obstacle
+
+ # # compute the planes for each subgrid
+ # coeff = lstsq(array, device=device) # grid_x x grid_y x 4
+
+ # z_new = array[ 0, :, :, :] * coeff[:, :, 0:1] + array[1, :, :, :] * coeff[:, :, 1:2] + coeff[:, :, 2:3]
+
+ # array[2, :, :, :] = z_new
+
+ # array = array.reshape(3, -1)
+ # array = array.transpose(dim0=0, dim1=1)
+
+ # return array
\ No newline at end of file
diff --git a/obstacle_detection/camera/obstacle_filter/obstacle_filter/filter/lstsq_filter.py b/obstacle_detection/camera/obstacle_filter/obstacle_filter/filter/lstsq_filter.py
new file mode 100644
index 0000000..cc77707
--- /dev/null
+++ b/obstacle_detection/camera/obstacle_filter/obstacle_filter/filter/lstsq_filter.py
@@ -0,0 +1,245 @@
+import torch
+import numpy as np
+import matplotlib.pyplot as plt
+from mpl_toolkits import mplot3d
+
+
+def convertToPointCloud(array, f_x, c_x, c_y):
+ height, width = array.shape
+ x = torch.arange(0, width)
+ y = torch.arange(0, height)
+
+ #print(height, width)
+
+ x, y = torch.meshgrid(x, y, indexing='xy')
+ #print(x, y)
+ x = x - c_x
+ y = y - c_y
+
+ x = x * array / f_x
+ y = y * array / f_x
+
+ return x, y, array
+
+
+def lstsq(array, device='cpu'):
+ ### gpu code
+ # load the points to the gpu
+ # size grid_x x grid_y x (height * width) x 3
+ # array = torch.from_numpy(array).to('cpu')
+
+ array = array.to(device) # 3 x grid_x x grid_y x (height * width)
+ array = array.permute(1, 2, 3, 0) # grid_x x grid_y x (height * width) x 3
+
+ print('grid element : ', torch.sum(array[:, :, :, 2]>10, dim=2))
+
+ # fig = plt.figure()
+ # ax = fig.add_subplot(111, projection='3d')
+
+ # for i in range(array.shape[0]):
+ # for j in range(array.shape[1]):
+ # ax.scatter(array[i, j, :, 0], array[i, j, :, 1], array[i, j, :, 2])
+
+ # plt.show(block=True)
+
+ A = torch.cat([array[:, :, :, 0:1] * 0 + 1, array[:, :, :, 0:1], array[:, :, :, 2:3]], dim = 3) # grid_x x grid_y x (height * width) x 3
+ b = array[:, :, :, 1] # grid_x x grid_y x (height * width)
+
+ coeff, _, _, _ = torch.linalg.lstsq(A, b) # grid_x x grid_y x 3
+
+ coeffs = torch.stack([coeff[:, :, 1], -1*torch.ones_like(coeff[:, :, 1]), \
+ coeff[:, :, 2], coeff[:, :, 0]], dim=2) # grid_x x grid_y x 4
+
+ valid_planes = torch.ones((coeffs.shape[0], coeffs.shape[1]), dtype=bool) # grid_x x grid_y
+
+ return coeffs, valid_planes
+
+ """
+ # for each subgrid element we want to select num_random_pts random
+ num_pts = array.shape[-2]
+
+ random_indices = torch.randint(0, num_pts, \
+ (num_iterations, num_random_pts)) # shape num_iterations, num_random_pts
+
+ random_points = array[:, :, random_indices, :] # shape grid_x x grid_y x num_iter x num_random_pts x 3
+
+ A = torch.cat([random_points[:, :, :, :, 0:1] * 0 + 1, random_points[:, :, :, :, 0:1], \
+ random_points[:, :, :, :, 2:3]], dim = 4) # grid_x x grid_y x num_iter x num_random_pts x 3
+
+ b = random_points[:, :, :, :, 1] # grid_x x grid_y x num_iter x num_random_pts
+ print(A.shape)
+ print(b.shape)
+
+ coeff, _, _, _ = torch.linalg.lstsq(A, b) # grid_x x grid_y x num_iter x 3
+ # print(coeff.shape)
+
+ coeffs = torch.stack([coeff[:, :, :, 1], -1*torch.ones_like(coeff[:, :, :, 1]), \
+ coeff[:, :, :, 2], coeff[:, :, :, 0]], dim=3) # grid_x x grid_y x num_iter x 4
+
+ # Compute the distance between each point and the plane
+
+ # (grid_x x grid_y x (height * width) x 3) x (grid_x x grid_y x 3 x num_iter) + (grid_x x grid_y x 1 x num_iter)
+ distances = torch.abs(array.matmul(coeffs[:, :, :, :3].permute(0, 1, 3, 2)) + \
+ coeffs[:, :, :, 3:4].permute(0, 1, 3, 2)) / \
+ torch.linalg.norm(coeffs[:, :, :, :3], dim = 3, keepdim=True).permute(0, 1, 3, 2) #grid_x x grid_y x (height * width) x num_iter
+
+
+ # Count the number of inliers
+ inliers = torch.sum(distances < threshold_distance, dim=2) # grid_x x grid_y x num_iter
+
+ valid_planes = torch.zeros((inliers.shape[0], inliers.shape[1]), dtype=bool) # grid_x x grid_y
+
+ maxed_inliers = torch.max(inliers, dim = 2) # grid_x x grid_y
+ valid_planes = maxed_inliers.values > num_inliers # grid_x x grid_y
+
+ # choose the best plane
+ best_plane_ids = maxed_inliers.indices # grid_x x grid_y
+
+ best_planes = torch.zeros((array.shape[0], array.shape[1], 4)).to(device) # grid_x x grid_y x 4
+
+ for i in range(array.shape[0]):
+ for j in range(array.shape[1]):
+ best_planes[i, j] = coeffs[i, j, best_plane_ids[i, j], :]
+
+ # could I do this without the for loops ?
+
+ return best_planes, valid_planes """
+
+
+def getPC_lstsq(depth_array, gridshape = (8, 8), \
+ max_depth = 3000, min_depth = 100, how_to_replace = 'random', filter = True, device = 'cpu', \
+ ground_vector = torch.tensor([0, np.cos(0.35), -np.sin(0.35)])):
+ # before 1280 720
+ # after 256 144
+ skip_points = 5
+ depth_array = depth_array[::skip_points, ::skip_points]
+ # conf_array = conf_array[::skip_points, ::skip_points]
+
+ depth_array = torch.from_numpy(depth_array.astype(np.float32))
+ # conf_array = torch.from_numpy(conf_array.astype(np.uint8))
+
+ f_x = 798.31
+ # f_y = f_x
+
+ c_x = 655.73
+ c_y = 313.12
+
+ f_x = f_x / skip_points
+ c_x = c_x / skip_points
+ c_y = c_y / skip_points
+
+ x, y, z = convertToPointCloud(depth_array, f_x, c_x, c_y)
+ pc = torch.stack((x, y, z), axis=0) # 3 x height x width
+
+ # reshape the pointcloud image into a grid of subimages
+ grid_height = (depth_array.shape[0] // gridshape[0])
+ grid_width = (depth_array.shape[1] // gridshape[1])
+
+ subgrid_size = (grid_height, grid_width)
+
+ subimages = torch.split(pc, subgrid_size[0], dim=1)
+ subimages = [torch.split(subimage, subgrid_size[1], dim=2) for subimage in subimages]
+
+ array = torch.stack([torch.stack(subimage_row, dim=1) for subimage_row in subimages], dim=1)
+ array = array.reshape(3, gridshape[0], gridshape[1], grid_height * grid_width)
+
+ # # reshape the confidence array into a grid of subimages
+ # subconf = torch.split(conf_array, subgrid_size[0], dim=0)
+ # subconf = [torch.split(subconf_row, subgrid_size[1], dim=1) for subconf_row in subconf]
+
+ # conf_array = torch.stack([torch.stack(subconf_row, dim=1) for subconf_row in subconf], dim=1)
+ # conf_array = conf_array.reshape(gridshape[0], gridshape[1], grid_height * grid_width)
+
+ # filter 1 : points too far or too close
+ array = array * (array[2, :, :, :] < max_depth) * (array[2, :, :, :] > min_depth)
+
+ # # filter 2 : points we are not confident in
+ # array = array * (conf_array[:, :, :] < threshold)
+
+ if filter == False:
+ return array
+
+ # filter 3 : compute ratio of valid points to all points,
+ # if ratio is too low, then discard the subgrid
+ valid_subgrids = torch.zeros((gridshape[0], gridshape[1]), dtype=bool) # grid_x x grid_y
+
+ num_pts_per_subgrid = grid_height * grid_width
+ sums = torch.zeros((gridshape[0], gridshape[1]))
+ sums = torch.sum(array[2, :, :, :] > 0, dim=2)
+
+ print("valid points sums", sums, num_pts_per_subgrid)
+
+ ratio = 0.5
+ for i in range(gridshape[0]):
+ for j in range(gridshape[1]):
+ valid_pts_mask = (array[2, i, j, :] > 0)
+ # sum_ = torch.sum(valid_pts_mask)
+ # sums[i, j] = sum_
+
+ # if number of valid points is greater than ratio
+ if sums[i,j] > num_pts_per_subgrid * ratio:
+ # set subgrid to be valid
+ valid_subgrids[i, j] = 1
+
+ # get the valid points
+ valid_pts = array[:, i, j, valid_pts_mask] # 3 x num_valid_pts
+
+ if how_to_replace == 'gaussian':
+ # set the invalid points to be distributed according to normal dist defined by the valid points
+ # compute mean and cov of valid points
+ mean = torch.mean(valid_pts, axis=1)
+ cov = torch.cov(valid_pts)
+ distr = torch.distributions.multivariate_normal.MultivariateNormal(mean, cov)
+ array[:, i, j, ~valid_pts_mask] = distr.sample((torch.sum(~valid_pts_mask),)).T
+ elif how_to_replace == 'mean':
+ # set invalid points to the mean of the valid points
+ mean = torch.mean(valid_pts, axis=1, keepdim=True)
+ array[:, i, j, ~valid_pts_mask] = mean
+ elif how_to_replace == 'random':
+ # set the invalid points to be randomly one of the valid points
+ random_valid_pts = torch.randint(0, valid_pts.shape[1], (torch.sum(~valid_pts_mask),))
+ array[:, i, j, ~valid_pts_mask] = valid_pts[:, random_valid_pts]
+ else:
+ raise ValueError('how_to_replace must be one of gaussian, mean, random')
+ pass
+ else:
+ # ratio is not satisfied so set all points to zero
+ array[:, i, j, :] = 0
+
+ # filter 4 : if the plane is less than 45 degrees from horizontal, then discard the subgrid
+
+ # compute the planes for each subgrid
+ best_planes, valid_planes = lstsq(array, device=device) # grid_x x grid_y x 4
+
+ # print(best_planes)
+ plane_vectors = best_planes[:, :, :3]
+ plane_vectors = plane_vectors / torch.linalg.norm(plane_vectors, dim=2, keepdim=True) # grid_x x grid_y x 3
+
+ # compute scalar product between plane and gravity vector
+ scalar_products = torch.sum(plane_vectors * ground_vector, dim=2, keepdim=False) # grid_x x grid_y
+
+ # compute angle between plane and gravity vector
+ angles = torch.arccos(torch.abs(scalar_products)) * 180 / torch.pi # grid_x x grid_y
+ angles = angles * valid_subgrids
+ print(angles)
+
+ fig = plt.figure()
+ ax = fig.add_subplot(111, projection='3d')
+
+ f = array[:, angles > 0, :]
+
+ for i in range(f.shape[1]):
+ ax.scatter(f[0, i, :], f[1, i, :], f[2, i, :])
+
+ plt.show(block=True)
+
+ valid_subgrids = (angles > 45) # grid_x x grid_y
+
+
+ filtered_points = array[:, valid_subgrids, :] # 3 x num_valid_subgrids x num_pts_per_subgrid
+
+ filtered_points = filtered_points.reshape(3, -1) # 3 x (num_valid_subgrids * num_pts_per_subgrid)
+ filtered_points = filtered_points.T # (num_valid_subgrids * num_pts_per_subgrid) x 3
+
+ # filtered_points = filtered_points[filtered_points[:, 2] > 0, :] # remove points with z = 0
+ return filtered_points
\ No newline at end of file
diff --git a/obstacle_detection/camera/obstacle_filter/obstacle_filter/filter/ransac_filter.py b/obstacle_detection/camera/obstacle_filter/obstacle_filter/filter/ransac_filter.py
new file mode 100644
index 0000000..2e11577
--- /dev/null
+++ b/obstacle_detection/camera/obstacle_filter/obstacle_filter/filter/ransac_filter.py
@@ -0,0 +1,216 @@
+import torch
+import numpy as np
+import matplotlib.pyplot as plt
+from mpl_toolkits import mplot3d
+
+def convertToPointCloud(array, f_x, c_x, c_y):
+ height, width = array.shape
+ x = torch.arange(0, width)
+ y = torch.arange(0, height)
+
+ x, y = torch.meshgrid(x, y, indexing='xy')
+
+ x = x - c_x
+ y = y - c_y
+
+ x = x * array / f_x
+ y = y * array / f_x
+
+ return x, y, array
+
+def ransac(array, num_iterations, threshold_distance, num_inliers, num_random_pts=3, device='cpu'):
+
+
+ array = array.to(device) # 3 x grid_x x grid_y x (height * width)
+ array = array.permute(1, 2, 3, 0) # grid_x x grid_y x (height * width) x 3
+
+ # for each subgrid element we want to select num_random_pts random
+ num_pts = array.shape[-2]
+
+ random_indices = torch.randint(0, num_pts, \
+ (num_iterations, num_random_pts)) # shape num_iterations, num_random_pts
+
+ random_points = array[:, :, random_indices, :] # shape grid_x x grid_y x num_iter x num_random_pts x 3
+
+ # A (x, y, 1)
+ A = torch.cat([random_points[:, :, :, :, 0:1], \
+ random_points[:, :, :, :, 1:2], \
+ torch.ones_like(random_points[:, :, :, :, 2:3])], dim = 4) # grid_x x grid_y x num_iter x num_random_pts x 3
+ # b (z)
+ b = random_points[:, :, :, :, 2]
+
+ coeff, _, _, _ = torch.linalg.lstsq(A, b) # grid_x x grid_y x num_iter x 3
+
+ coeffs = torch.stack([coeff[:, :, :, 0], \
+ coeff[:, :, :, 1], \
+ -1 * torch.ones_like(coeff[:, :, :, 2]), coeff[:, :, :, 2]], dim=3) # grid_x x grid_y x num_iter x 4
+
+ # (grid_x x grid_y x (height * width) x 3) x (grid_x x grid_y x 3 x num_iter) + (grid_x x grid_y x 1 x num_iter)
+ distances = torch.abs(array.matmul(coeffs[:, :, :, :3].permute(0, 1, 3, 2)) + \
+ coeffs[:, :, :, 3:4].permute(0, 1, 3, 2)) / \
+ torch.linalg.norm(coeffs[:, :, :, :3], dim = 3, keepdim=True).permute(0, 1, 3, 2) #grid_x x grid_y x (height * width) x num_iter
+
+ # Count the number of inliers
+ inliers = torch.sum(distances < threshold_distance, dim=2) # grid_x x grid_y x num_iter
+
+ valid_planes = torch.zeros((inliers.shape[0], inliers.shape[1]), dtype=bool) # grid_x x grid_y
+
+ maxed_inliers = torch.max(inliers, dim = 2) # grid_x x grid_y
+ valid_planes = maxed_inliers.values > num_inliers # grid_x x grid_y
+
+ # choose the best plane
+ best_plane_ids = maxed_inliers.indices # grid_x x grid_y
+
+ best_planes = torch.zeros((array.shape[0], array.shape[1], 4)).to(device) # grid_x x grid_y x 4
+
+ for i in range(array.shape[0]):
+ for j in range(array.shape[1]):
+ best_planes[i, j] = coeffs[i, j, best_plane_ids[i, j], :]
+
+ # could I do this without the for loops ?
+
+ return best_planes, valid_planes
+
+def getPC(depth_array, gridshape = (8, 8), \
+ max_depth = 3000, min_depth = 100, num_iter = 20, thresh_dist = 20, num_inliers = 200, \
+ how_to_replace = 'random', filter = True, device = 'cpu', ground_vector = torch.tensor([0, np.cos(16/180*torch.pi), np.sin(16/180*torch.pi)])):
+
+
+ # print(depth_array.shape)
+
+ if depth_array.shape[0] == 720: # 720p
+ f_x = 798.31
+ # f_y = f_x
+
+ c_x = 655.73
+ c_y = 313.12
+
+ elif depth_array.shape[0] == 400: # 400p
+ f_x = 399.15
+ # f_y = f_x
+
+ c_x = 327.86
+ c_y = 176.55
+
+ # before 1280 720
+ # after 256 144
+ skip_points = 5
+ depth_array = depth_array[::skip_points, ::skip_points]
+
+ depth_array = torch.from_numpy(depth_array.astype(np.float32))
+
+ f_x = f_x / skip_points
+ c_x = c_x / skip_points
+ c_y = c_y / skip_points
+
+ x, y, z = convertToPointCloud(depth_array, f_x, c_x, c_y)
+ pc = torch.stack((x, y, z), axis=0) # 3 x height x width
+
+ # reshape the pointcloud image into a grid of subimages
+ grid_height = (depth_array.shape[0] // gridshape[0])
+ grid_width = (depth_array.shape[1] // gridshape[1])
+
+ subgrid_size = (grid_height, grid_width)
+
+ subimages = torch.split(pc, subgrid_size[0], dim=1)
+ subimages = [torch.split(subimage, subgrid_size[1], dim=2) for subimage in subimages]
+
+ array = torch.stack([torch.stack(subimage_row, dim=1) for subimage_row in subimages], dim=1)
+ array = array.reshape(3, gridshape[0], gridshape[1], grid_height * grid_width)
+
+ # filter 0 : don't do the filtering
+ if filter == False:
+ array = array.reshape(3, -1)
+ array = array.transpose(dim0=0, dim1=1)
+ return array
+
+ # filter 1 : points too far or too close
+ array = array * (array[2, :, :, :] < max_depth) * (array[2, :, :, :] > min_depth)
+
+ # filter 2 : compute ratio of valid points to all points,
+ # if ratio is too low, then discard the subgrid
+ valid_subgrids = torch.zeros((gridshape[0], gridshape[1]), dtype=bool) # grid_x x grid_y
+
+ num_pts_per_subgrid = grid_height * grid_width
+ sums = torch.zeros((gridshape[0], gridshape[1]))
+ sums = torch.sum(array[2, :, :, :] > 0, dim=2)
+
+ print(sums)
+
+ ratio = 0.4
+ for i in range(gridshape[0]):
+ for j in range(gridshape[1]):
+ valid_pts_mask = (array[2, i, j, :] > 0) # grid_x x grid_y x nb_pts
+ # sum_ = torch.sum(valid_pts_mask)
+ # sums[i, j] = sum_
+
+ # if number of valid points is greater than ratio
+ if sums[i,j] > num_pts_per_subgrid * ratio:
+ # set subgrid to be valid
+ valid_subgrids[i, j] = 1
+
+ # get the valid points
+ valid_pts = array[:, i, j, valid_pts_mask] # 3 x num_valid_pts
+
+ if how_to_replace == 'gaussian': # not working, due to invalid balues of covariance matrix
+ # set the invalid points to be distributed according to normal dist defined by the valid points
+ # compute mean and cov of valid points
+ mean = torch.mean(valid_pts, axis=1) # 3
+ cov = torch.cov(valid_pts) # 3 x 3
+ distr = torch.distributions.multivariate_normal.MultivariateNormal(mean, cov)
+ array[:, i, j, ~valid_pts_mask] = distr.sample((torch.sum(~valid_pts_mask),)).T
+ elif how_to_replace == 'median':
+ median, indices = torch.median(valid_pts, dim=1, keepdim=True) #
+ array[:, i, j, ~valid_pts_mask] = median
+ elif how_to_replace == 'mean':
+ # set invalid points to the mean of the valid points
+ mean = torch.mean(valid_pts, axis=1, keepdim=True)
+ array[:, i, j, ~valid_pts_mask] = mean
+ elif how_to_replace == 'random':
+ # set the invalid points to be randomly one of the valid points
+ random_valid_pts = torch.randint(0, valid_pts.shape[1], (torch.sum(~valid_pts_mask),))
+ array[:, i, j, ~valid_pts_mask] = valid_pts[:, random_valid_pts]
+ else:
+ raise ValueError('how_to_replace must be one of gaussian, mean, random')
+ pass
+ else:
+ # ratio is not satisfied so set all points to zero
+ array[:, i, j, :] = 0
+
+ # compute the planes for each subgrid
+ best_planes, valid_planes = ransac(array, num_iter, thresh_dist, num_inliers, num_random_pts=3, device=device) # grid_x x grid_y x 4
+
+
+ plane_vectors = torch.stack([best_planes[:, :, 0], best_planes[:, :, 1], -1*torch.ones_like(best_planes[:, :, 1])], dim=2)
+
+ plane_vectors = plane_vectors / torch.linalg.norm(plane_vectors, dim=2, keepdim=True) # grid_x x grid_y x 3
+
+ # compute scalar product between plane and gravity vector
+ scalar_products = torch.sum(plane_vectors * ground_vector, dim=2, keepdim=False) # grid_x x grid_y
+
+ # compute angle between plane and gravity vector
+ angles = torch.arccos(torch.abs(scalar_products)) * 180 / torch.pi # grid_x x grid_y
+ angles = angles * valid_subgrids #* valid_planes
+ print('Angles: ', angles)
+
+ valid_subgrids = (angles > 45) # grid_x x grid_y
+
+ array = array[:, valid_subgrids, :]
+
+ #z_new = array[ 0, :, :, :] * coeff[:, :, 0:1] + array[1, :, :, :] * coeff[:, :, 1:2] + coeff[:, :, 2:3]
+
+ array = array.reshape(3, -1)
+ array = array.transpose(dim0=0, dim1=1)
+
+ return array
+
+ # # plot the best plane found by ransac
+
+ # z_new = array[0, :, :, :] * best_planes[:, :, 0:1] + array[1, :, :, :] * best_planes[:, :, 1:2] + best_planes[:, :, 3:4]
+ # array[2, :, :, :] = z_new
+
+
+ # array = array.reshape(3, -1)
+ # array = array.transpose(dim0=0, dim1=1)
+
+ # return array
\ No newline at end of file
diff --git a/obstacle_detection/camera/obstacle_filter/obstacle_filter/filter/ransac_filter_old.py b/obstacle_detection/camera/obstacle_filter/obstacle_filter/filter/ransac_filter_old.py
new file mode 100644
index 0000000..810c64d
--- /dev/null
+++ b/obstacle_detection/camera/obstacle_filter/obstacle_filter/filter/ransac_filter_old.py
@@ -0,0 +1,199 @@
+import numpy as np
+import matplotlib.pyplot as plt
+from sklearn.neighbors import BallTree
+import numpy as np
+import torch
+
+def filter_cuda(points):
+ #fct = lambda x, z, coeff: coeff[0] + coeff[1] * x + coeff[2] * z
+
+ ### parameters
+
+ skip_points = 50
+
+ # ransac parameters
+
+ num_iterations = 10
+ threshold_distance = 25
+ num_inliers = 500
+ num_random_pts = 12
+
+ # distance filter threshold
+ threshold = 100
+
+ # outlier removal parameters
+ numberpts = 10
+ radius = 200
+ leaf_size = 10
+
+
+ points = points[::skip_points]
+
+ best_plane = ransac_3d_cuda(points, num_iterations, threshold_distance, num_inliers, num_random_pts, device='cuda')
+
+ x = points[:, 0]
+ y = points[:, 1]
+ z = points[:, 2]
+
+ y_pred = -(best_plane[0] * x + best_plane[2] * z + best_plane[3]) / best_plane[1]
+
+ # filter points that are too far away from the plane
+
+ points = points[np.abs(y_pred - y) > threshold]
+
+ # remove outliers
+ tree = BallTree(points, leaf_size=leaf_size, metric='euclidean')
+
+ dist, ind = tree.query(points, k=numberpts) # query the tree for the 20 nearest neighbors for each point
+
+ # if one of the 20 nearest neighbors is more than 200mm away, remove the point
+ delidx = np.ones(points.shape[0], dtype=bool)
+ for i in range(points.shape[0]):
+ if np.any(dist[i] > radius):
+ delidx[i] = False
+
+ points = points[delidx]
+ return points
+
+
+def ransac_3d_cuda(points, num_iterations, threshold_distance, num_inliers, device, num_random_pts=12):
+ ### gpu code
+ # load the points to the gpu
+ points = torch.from_numpy(points).to(device)
+
+ # create a random index array
+ random_indices = torch.randint(0, points.shape[0], (num_iterations, num_random_pts) ).to(device)
+ # rand_ind_1 = torch.randint(0, num_iterations)
+ # print(points.shape)
+ # points = points.repeat(num_iterations, 1, 1)
+ print(points.shape)
+ print(random_indices.shape)
+
+ random_points = points[random_indices, :] # num_iter x random_pts x 3
+ print(points.shape)
+
+ # fit plane to random points
+
+ # Construct the matrix A
+
+ A = torch.cat([random_points[:, :, 0:1]*0 + 1, random_points[:, :, 0:1], random_points[:, :, 2:3]], dim=2)#.permute(0, 2, 1) # y = c + ax + bz
+ b = random_points[:, :, 1]
+
+ print(A.shape) # 10 x 12 x 3, num_iter x num_random_pts x 3
+ print(b.shape) # 10 x 12
+
+ coeff, _, _, _ = torch.linalg.lstsq(A, b)
+ print(coeff.shape) # 10 x 3
+
+ coeffs = torch.stack([coeff[:, 1], -1*torch.ones_like(coeff[:, 1]), coeff[:, 2], coeff[:, 0]], dim=1)
+ print(coeffs.shape) # 10 x 4
+
+ # Compute the distance between each point and the plane
+ distances = torch.abs(points.matmul(coeffs[:, :3].permute(1, 0)) + coeffs[:, 3]) / torch.linalg.norm(coeffs[:, :3], dim=1)
+
+ print(distances.shape) # pc_size x 10
+ # Count the number of inliers
+ inliers = torch.sum(distances < threshold_distance, dim=0)
+ # print(inliers.shape)
+
+ # choose the best plane
+ best_plane_id = torch.argmax(inliers, dim=0)
+ # print(coeffs.shape)
+
+ best_plane = coeffs[best_plane_id]
+
+ return best_plane
+
+
+def filter(points):
+ fct = lambda x, z, coeff: coeff[0] + coeff[1] * x + coeff[2] * z
+
+ skip_points = 50
+ points = points[::skip_points]
+
+ num_iterations = 10
+ threshold_distance = 25
+ num_inliers = 500
+
+ threshold = 100
+
+ best_plane = ransac_3d(points, num_iterations, threshold_distance, num_inliers)
+
+ x = points[:, 0]
+ y = points[:, 1]
+ z = points[:, 2]
+
+ y_pred = -(best_plane[0] * x + best_plane[2] * z + best_plane[3]) / best_plane[1]
+
+ # filter points that are too far away from the plane
+
+ points = points[np.abs(y_pred - y) > threshold]
+
+ # remove outliers
+ numberpts = 10
+ radius = 200
+ tree = BallTree(points, leaf_size=10, metric='euclidean')
+
+ dist, ind = tree.query(points, k=numberpts) # query the tree for the 20 nearest neighbors for each point
+
+ # if one of the 20 nearest neighbors is more than 200mm away, remove the point
+ delidx = np.ones(points.shape[0], dtype=bool)
+ for i in range(points.shape[0]):
+ if np.any(dist[i] > radius):
+ delidx[i] = False
+
+ points = points[delidx]
+ return points
+
+def ransac_3d(points, num_iterations, threshold_distance, num_inliers):
+ """
+ RANSAC algorithm for fitting a plane to 3D point cloud data.
+
+ :param points: 3D point cloud data (array-like object of shape (N, 3))
+ :param num_iterations: number of iterations for RANSAC algorithm (int)
+ :param threshold_distance: threshold distance for inliers (float)
+ :param num_inliers: number of inliers required to fit a plane (int)
+ :return: plane parameters (array-like object of shape (4,))
+ """
+ max_inliers = 0
+ best_plane = None
+
+ for i in range(num_iterations):
+ # Randomly select three points
+ sample_indices = np.random.choice(len(points), size=10, replace=False)
+ sample_points = points[sample_indices]
+
+ # Fit a plane to the three points
+ coeffs = fit_plane(sample_points)
+
+ # Compute the distance between each point and the plane
+ distances = np.abs(points.dot(coeffs[:3]) + coeffs[3]) / np.linalg.norm(coeffs[:3])
+
+ # Count the number of inliers
+ inliers = np.sum(distances < threshold_distance)
+
+ # Update the best plane if this iteration has more inliers
+ if inliers > max_inliers and inliers >= num_inliers:
+ max_inliers = inliers
+ best_plane = coeffs
+
+ if best_plane is None:
+ raise ValueError("Could not fit a plane to the point cloud data")
+
+ return best_plane
+
+def fit_plane(points):
+ """
+ Fit a plane to 3D points using a least-squares regression.
+
+ :param points: 3D points (array-like object of shape (3, 3))
+ :return: plane parameters (array-like object of shape (4,))
+ """
+ # Construct the matrix A
+ A = np.array([points[:, 0]*0 + 1, points[:, 0], points[:, 2]]).T # y = c + ax + bz
+ b = points[:, 1]
+ coeff, r, rank, s = np.linalg.lstsq(A, b)
+
+ coeffs = np.array([coeff[1], -1, coeff[2], coeff[0]])
+
+ return coeffs
diff --git a/obstacle_detection/camera/obstacle_filter/obstacle_filter/filter/ransac_filter_old_v2.py b/obstacle_detection/camera/obstacle_filter/obstacle_filter/filter/ransac_filter_old_v2.py
new file mode 100644
index 0000000..56ca8be
--- /dev/null
+++ b/obstacle_detection/camera/obstacle_filter/obstacle_filter/filter/ransac_filter_old_v2.py
@@ -0,0 +1,208 @@
+import torch
+import numpy as np
+
+
+def convertToPointCloud(array, f_x, c_x, c_y):
+ height, width = array.shape
+ x = torch.arange(0, width)
+ y = torch.arange(0, height)
+
+ #print(height, width)
+
+ x, y = torch.meshgrid(x, y, indexing='xy')
+ #print(x, y)
+ x = x - c_x
+ y = y - c_y
+
+ x = x * array / f_x
+ y = y * array / f_x
+
+ return x, y, array
+
+def ransac(array, num_iterations, threshold_distance, num_inliers, num_random_pts=3, device='cpu'):
+ ### gpu code
+ # load the points to the gpu
+ # size grid_x x grid_y x (height * width) x 3
+ # array = torch.from_numpy(array).to('cpu')
+
+ array = array.to(device) # 3 x grid_x x grid_y x (height * width)
+ array = array.permute(1, 2, 3, 0) # grid_x x grid_y x (height * width) x 3
+
+ # for each subgrid element we want to select num_random_pts random
+ num_pts = array.shape[-2]
+
+ random_indices = torch.randint(0, num_pts, \
+ (num_iterations, num_random_pts)) # shape num_iterations, num_random_pts
+
+ random_points = array[:, :, random_indices, :] # shape grid_x x grid_y x num_iter x num_random_pts x 3
+
+ A = torch.cat([random_points[:, :, :, :, 0:1] * 0 + 1, random_points[:, :, :, :, 0:1], \
+ random_points[:, :, :, :, 2:3]], dim = 4) # grid_x x grid_y x num_iter x num_random_pts x 3
+
+ b = random_points[:, :, :, :, 1] # grid_x x grid_y x num_iter x num_random_pts
+ print(A.shape)
+ print(b.shape)
+
+ coeff, _, _, _ = torch.linalg.lstsq(A, b) # grid_x x grid_y x num_iter x 3
+ # print(coeff.shape)
+
+ coeffs = torch.stack([coeff[:, :, :, 1], -1*torch.ones_like(coeff[:, :, :, 1]), \
+ coeff[:, :, :, 2], coeff[:, :, :, 0]], dim=3) # grid_x x grid_y x num_iter x 4
+
+ # Compute the distance between each point and the plane
+
+ # (grid_x x grid_y x (height * width) x 3) x (grid_x x grid_y x 3 x num_iter) + (grid_x x grid_y x 1 x num_iter)
+ distances = torch.abs(array.matmul(coeffs[:, :, :, :3].permute(0, 1, 3, 2)) + \
+ coeffs[:, :, :, 3:4].permute(0, 1, 3, 2)) / \
+ torch.linalg.norm(coeffs[:, :, :, :3], dim = 3, keepdim=True).permute(0, 1, 3, 2) #grid_x x grid_y x (height * width) x num_iter
+
+
+ # Count the number of inliers
+ inliers = torch.sum(distances < threshold_distance, dim=2) # grid_x x grid_y x num_iter
+
+ valid_planes = torch.zeros((inliers.shape[0], inliers.shape[1]), dtype=bool) # grid_x x grid_y
+
+ maxed_inliers = torch.max(inliers, dim = 2) # grid_x x grid_y
+ valid_planes = maxed_inliers.values > num_inliers # grid_x x grid_y
+
+ # choose the best plane
+ best_plane_ids = maxed_inliers.indices # grid_x x grid_y
+
+ best_planes = torch.zeros((array.shape[0], array.shape[1], 4)).to(device) # grid_x x grid_y x 4
+
+ for i in range(array.shape[0]):
+ for j in range(array.shape[1]):
+ best_planes[i, j] = coeffs[i, j, best_plane_ids[i, j], :]
+
+ # could I do this without the for loops ?
+
+ return best_planes, valid_planes
+
+def getPC(depth_array, gridshape = (8, 8), \
+ max_depth = 3000, min_depth = 100, num_iter = 20, thresh_dist = 20, num_inliers = 200, \
+ how_to_replace = 'random', filter = True, device = 'cpu', ground_vector = torch.tensor([0, np.cos(0.35), -np.sin(0.35)])):
+ # before 1280 720
+ # after 256 144
+ skip_points = 5
+ depth_array = depth_array[::skip_points, ::skip_points]
+ # conf_array = conf_array[::skip_points, ::skip_points]
+
+ depth_array = torch.from_numpy(depth_array.astype(np.float32))
+ # conf_array = torch.from_numpy(conf_array.astype(np.uint8))
+
+ f_x = 798.31
+ # f_y = f_x
+
+ c_x = 655.73
+ c_y = 313.12
+
+ f_x = f_x / skip_points
+ c_x = c_x / skip_points
+ c_y = c_y / skip_points
+
+ x, y, z = convertToPointCloud(depth_array, f_x, c_x, c_y)
+ pc = torch.stack((x, y, z), axis=0) # 3 x height x width
+
+ # reshape the pointcloud image into a grid of subimages
+ grid_height = (depth_array.shape[0] // gridshape[0])
+ grid_width = (depth_array.shape[1] // gridshape[1])
+
+ subgrid_size = (grid_height, grid_width)
+
+ subimages = torch.split(pc, subgrid_size[0], dim=1)
+ subimages = [torch.split(subimage, subgrid_size[1], dim=2) for subimage in subimages]
+
+ array = torch.stack([torch.stack(subimage_row, dim=1) for subimage_row in subimages], dim=1)
+ array = array.reshape(3, gridshape[0], gridshape[1], grid_height * grid_width)
+
+ # # reshape the confidence array into a grid of subimages
+ # subconf = torch.split(conf_array, subgrid_size[0], dim=0)
+ # subconf = [torch.split(subconf_row, subgrid_size[1], dim=1) for subconf_row in subconf]
+
+ # conf_array = torch.stack([torch.stack(subconf_row, dim=1) for subconf_row in subconf], dim=1)
+ # conf_array = conf_array.reshape(gridshape[0], gridshape[1], grid_height * grid_width)
+
+ # filter 1 : points too far or too close
+ array = array * (array[2, :, :, :] < max_depth) * (array[2, :, :, :] > min_depth)
+
+ # # filter 2 : points we are not confident in
+ # array = array * (conf_array[:, :, :] < threshold)
+
+ if filter == False:
+ return array
+
+ # filter 3 : compute ratio of valid points to all points,
+ # if ratio is too low, then discard the subgrid
+ valid_subgrids = torch.zeros((gridshape[0], gridshape[1]), dtype=bool) # grid_x x grid_y
+
+ num_pts_per_subgrid = grid_height * grid_width
+ sums = torch.zeros((gridshape[0], gridshape[1]))
+ sums = torch.sum(array[2, :, :, :] > 0, dim=2)
+
+ ratio = 0.4
+ for i in range(gridshape[0]):
+ for j in range(gridshape[1]):
+ valid_pts_mask = (array[2, i, j, :] > 0)
+ # sum_ = torch.sum(valid_pts_mask)
+ # sums[i, j] = sum_
+
+ # if number of valid points is greater than ratio
+ if sums[i,j] > num_pts_per_subgrid * ratio:
+ # set subgrid to be valid
+ valid_subgrids[i, j] = 1
+
+ # get the valid points
+ valid_pts = array[:, i, j, valid_pts_mask] # 3 x num_valid_pts
+
+ if how_to_replace == 'gaussian':
+ # set the invalid points to be distributed according to normal dist defined by the valid points
+ # compute mean and cov of valid points
+ mean = torch.mean(valid_pts, axis=1)
+ cov = torch.cov(valid_pts)
+ distr = torch.distributions.multivariate_normal.MultivariateNormal(mean, cov)
+ array[:, i, j, ~valid_pts_mask] = distr.sample((torch.sum(~valid_pts_mask),)).T
+ elif how_to_replace == 'mean':
+ # set invalid points to the mean of the valid points
+ mean = torch.mean(valid_pts, axis=1, keepdim=True)
+ array[:, i, j, ~valid_pts_mask] = mean
+ elif how_to_replace == 'random':
+ # set the invalid points to be randomly one of the valid points
+ random_valid_pts = torch.randint(0, valid_pts.shape[1], (torch.sum(~valid_pts_mask),))
+ array[:, i, j, ~valid_pts_mask] = valid_pts[:, random_valid_pts]
+ else:
+ raise ValueError('how_to_replace must be one of gaussian, mean, random')
+ pass
+
+ # filter 4 : if the plane is less than 45 degrees from horizontal, then discard the subgrid
+
+ # compute the planes for each subgrid
+ best_planes, valid_planes = ransac(array, num_iter, thresh_dist, num_inliers, num_random_pts=3, device=device) # grid_x x grid_y x 4
+
+ print(best_planes)
+ plane_vectors = best_planes[:, :, :3]
+ plane_vectors = plane_vectors / torch.linalg.norm(plane_vectors, dim=2, keepdim=True) # grid_x x grid_y x 3
+
+ # compute scalar product between plane and gravity vector
+ scalar_products = torch.sum(plane_vectors * ground_vector, dim=2, keepdim=False) # grid_x x grid_y
+
+ # compute angle between plane and gravity vector
+ angles = torch.arccos(torch.abs(scalar_products)) * 180 / torch.pi # grid_x x grid_y
+ print(ground_vector)
+ print(angles)
+ print(plane_vectors)
+
+ # print(valid_subgrids * angles)
+
+ valid_subgrids = valid_subgrids * (angles > 45) * valid_planes # grid_x x grid_y
+
+ print(valid_subgrids)
+
+ # filtered_points = array
+
+ filtered_points = array[:, valid_subgrids, :] # 3 x num_valid_subgrids x num_pts_per_subgrid
+
+ filtered_points = filtered_points.reshape(3, -1) # 3 x (num_valid_subgrids * num_pts_per_subgrid)
+ filtered_points = filtered_points.T # (num_valid_subgrids * num_pts_per_subgrid) x 3
+
+ # filtered_points = filtered_points[filtered_points[:, 2] > 0, :] # remove points with z = 0
+ return filtered_points
\ No newline at end of file
diff --git a/obstacle_detection/camera/obstacle_filter/obstacle_filter/filter/slope_filter.py b/obstacle_detection/camera/obstacle_filter/obstacle_filter/filter/slope_filter.py
new file mode 100644
index 0000000..0527447
--- /dev/null
+++ b/obstacle_detection/camera/obstacle_filter/obstacle_filter/filter/slope_filter.py
@@ -0,0 +1,112 @@
+import torch
+import numpy as np
+
+from .filter import ransac, lstsq
+
+
+def getPC(pc, gridshape = (8, 8), \
+ max_height = 2000.0, max_dist = 6000.0, num_iter = 20, thresh_dist = 20, num_inliers = 200, \
+ how_to_replace = 'random', filter = 'ransac', device = 'cpu', \
+ obstacle_angle = 45):
+
+ ground_vector = torch.tensor([0, 0, 1], device=device)
+
+ pc = torch.from_numpy(pc.astype(np.float32))
+
+ # reshape the pointcloud image into a grid of subimages
+ grid_height = (pc.shape[1] // gridshape[0])
+ grid_width = (pc.shape[2] // gridshape[1])
+
+ subgrid_size = (grid_height, grid_width)
+
+ subimages = torch.split(pc, subgrid_size[0], dim=1)
+ subimages = [torch.split(subimage, subgrid_size[1], dim=2) for subimage in subimages]
+
+ array = torch.stack([torch.stack(subimage_row, dim=1) for subimage_row in subimages], dim=1)
+ array = array.reshape(3, gridshape[0], gridshape[1], grid_height * grid_width)
+
+ # filter 0 : don't do the filtering
+ if filter == 'no_filter':
+ array = array.reshape(3, -1)
+ array = array.transpose(dim0=0, dim1=1)
+ return array
+ elif filter == 'off':
+ return
+
+ orig_array = array
+
+ dist = torch.linalg.norm(array[:2], dim = 0).clone()
+
+ # filter 1 : points too high or too far
+ array = array * (array[2, :, :, :] < (max_height-1.0)) * (dist < max_dist) * (dist> 2000)
+
+ # filter 2 : compute ratio of valid points to all points,
+ # if ratio is too low, then discard the subgrid
+ valid_subgrids = torch.zeros((gridshape[0], gridshape[1]), dtype=bool) # grid_x x grid_y
+
+ num_pts_per_subgrid = grid_height * grid_width
+
+ sums = torch.sum(dist > 10, dim=2) # sum all of the points that are not at (0, 0, 0) invalid
+
+ ratio = 0.4
+ for i in range(gridshape[0]):
+ for j in range(gridshape[1]):
+ valid_pts_mask = dist[i, j, :] > 0
+
+ # if number of valid points is greater than ratio
+ if sums[i,j] > num_pts_per_subgrid * ratio:
+ # set subgrid to be valid
+ valid_subgrids[i, j] = 1
+
+ # get the valid points
+ valid_pts = array[:, i, j, valid_pts_mask] # 3 x num_valid_pts
+
+ if how_to_replace == 'median':
+ median, indices = torch.median(valid_pts, dim=1, keepdim=True) #
+ array[:, i, j, ~valid_pts_mask] = median
+ elif how_to_replace == 'mean':
+ # set invalid points to the mean of the valid points
+ mean = torch.mean(valid_pts, axis=1, keepdim=True)
+ array[:, i, j, ~valid_pts_mask] = mean
+ elif how_to_replace == 'random':
+ # set the invalid points to be randomly one of the valid points
+ random_valid_pts = torch.randint(0, valid_pts.shape[1], (torch.sum(~valid_pts_mask),))
+ array[:, i, j, ~valid_pts_mask] = valid_pts[:, random_valid_pts]
+ else:
+ raise ValueError('how_to_replace must be one of median, mean, random')
+ else:
+ # ratio is not satisfied, so set all points to be random. This is so the lstsq function works on CUDA
+ random_values = torch.normal(mean = 1000, std = 100, size=(array.shape[0], array.shape[-1]))
+ array[:, i, j, :] = random_values
+
+ if filter == 'ransac':
+ coeff = ransac(array, num_iter, thresh_dist, num_inliers, num_random_pts=3, device=device) # grid_x x grid_y x 4
+ elif filter == 'lstsq':
+ coeff = lstsq(array, device=device) # grid_x x grid_y x 3
+
+ plane_vectors = torch.stack([coeff[:, :, 0], coeff[:, :, 1], -1*torch.ones_like(coeff[:, :, 1])], dim=2)
+
+ plane_vectors = plane_vectors / torch.linalg.norm(plane_vectors, dim=2, keepdim=True) # grid_x x grid_y x 3
+
+ # compute scalar product between plane and gravity vector
+ scalar_products = torch.sum(plane_vectors * ground_vector, dim=2, keepdim=False) # grid_x x grid_y
+
+ # compute angle between plane and gravity vector
+ angles = torch.arccos(torch.abs(scalar_products)) * 180 / torch.pi # grid_x x grid_y
+
+ # only consider angles which are valid
+ angles = angles * valid_subgrids
+ print('Angles: ', angles)
+
+ valid_subgrids = (angles > obstacle_angle) # grid_x x grid_y
+
+ array = array[:, valid_subgrids, :]
+ not_array = orig_array[:, ~valid_subgrids, :]
+
+ array = array.reshape(3, -1)
+ array = array.transpose(dim0=0, dim1=1)
+
+ not_array = not_array.reshape(3, -1)
+ not_array = not_array.transpose(dim0=0, dim1=1)
+
+ return array, not_array
\ No newline at end of file
diff --git a/obstacle_detection/camera/obstacle_filter/obstacle_filter/obstacle_filter_node.py b/obstacle_detection/camera/obstacle_filter/obstacle_filter/obstacle_filter_node.py
new file mode 100644
index 0000000..7e64aad
--- /dev/null
+++ b/obstacle_detection/camera/obstacle_filter/obstacle_filter/obstacle_filter_node.py
@@ -0,0 +1,183 @@
+import rclpy
+from rclpy.node import Node
+from sensor_msgs.msg import Image, PointCloud2, PointField
+from std_msgs.msg import Header, String
+from rclpy.qos import QoSProfile, qos_profile_sensor_data
+
+import numpy as np
+import cv2
+from cv_bridge import CvBridge # might need to sudo apt install ros-foxy-vision-opencv
+import time
+
+# Important
+# from .filter.ransac_filter import getPC
+# from .filter.lstsq_filter import getPC_lstsq
+from .filter.filter import getPC
+
+def point_cloud(points, parent_frame): #, stamp):
+ """ Creates a point cloud message.
+ Args:
+ points: Nx3 array of xyz positions.
+ parent_frame: frame in which the point cloud is defined
+ Returns:
+ sensor_msgs/PointCloud2 message
+ Code source:
+ https://gist.github.com/pgorczak/5c717baa44479fa064eb8d33ea4587e0
+ References:
+ http://docs.ros.org/melodic/api/sensor_msgs/html/msg/PointCloud2.html
+ http://docs.ros.org/melodic/api/sensor_msgs/html/msg/PointField.html
+ http://docs.ros.org/melodic/api/std_msgs/html/msg/Header.html
+ """
+ # In a PointCloud2 message, the point cloud is stored as an byte
+ # array. In order to unpack it, we also include some parameters
+ # which desribes the size of each individual point.
+ ros_dtype = PointField.FLOAT32
+ dtype = np.float32
+ itemsize = np.dtype(dtype).itemsize # A 32-bit float takes 4 bytes.
+
+ data = points.astype(dtype).tobytes()
+
+ # The fields specify what the bytes represents. The first 4 bytes
+ # represents the x-coordinate, the next 4 the y-coordinate, etc.
+ fields = [PointField(
+ name=n, offset=i*itemsize, datatype=ros_dtype, count=1)
+ for i, n in enumerate('xyz')]
+
+ # The PointCloud2 message also has a header which specifies which
+ # coordinate frame it is represented in.
+ header = Header(frame_id=parent_frame) #, stamp=stamp)
+
+ return PointCloud2(
+ header=header,
+ height=1,
+ width=points.shape[0],
+ is_dense=False,
+ is_bigendian=False,
+ fields=fields,
+ point_step=(itemsize * 3), # Every point consists of three float32s.
+ row_step=(itemsize * 3 * points.shape[0]),
+ data=data
+ )
+
+class ObstacleFilterNode(Node):
+ def __init__(self):
+ super().__init__('NAV_obstacle_filter_node')
+
+ # declare subscriber to topic /stereo/depth of type Image
+ self.subscription_ = self.create_subscription(
+ Image,
+ '/stereo/depth',
+ self.callback,
+ qos_profile_sensor_data)
+
+ self.destroy_sub = self.create_subscription(
+ String,
+ "/ROVER/NAV_status",
+ self.callback_destroy,
+ qos_profile_sensor_data)
+
+ # declare publisher to topic /obstacles_points of type PointCloud2
+ self.publisher_ = self.create_publisher(
+ PointCloud2,
+ '/obstacles_points',
+ qos_profile_sensor_data)
+
+ # declare publisher to topic /obstacles_points of type PointCloud2
+ self.publisher_2 = self.create_publisher(
+ PointCloud2,
+ '/not_obstacles_points',
+ qos_profile_sensor_data)
+
+ # declare parameters to be given as input to the
+ self.declare_parameter('gridshape_y', 8) # 8x8 grid by default = 64 sub images
+ self.declare_parameter('gridshape_x', 8)
+ self.declare_parameter('max_depth', 3000) # 3 meters
+ self.declare_parameter('min_depth', 100) # 10 cm
+ self.declare_parameter('num_iter', 50) # 50 iterations for ransac
+ self.declare_parameter('thresh_dist', 20) # 20 mm distance treashold for ransac (inliers)
+ self.declare_parameter('num_inliers', 360) # 360 inliers for ransac for a valid plane
+ self.declare_parameter('camera_angle', 17) # 17 degrees camera angle (hardcoded for now)
+ self.declare_parameter('obstacle_angle', 45) # 45 degrees obstacle angle
+ self.declare_parameter('how_to_replace', 'random')
+
+ self.declare_parameter('filter', 'ransac')
+ self.declare_parameter('device', 'cpu')
+
+ # there may be a need for a ground vector parameter
+
+ # declare CvBridge object, which converts between ROS Image messages and OpenCV images
+ self.bridge = CvBridge()
+
+ def callback(self, msg):
+ # print('Received message')
+
+ # t1 = time.time()
+
+ # get parameters values
+ gridshape_y = self.get_parameter('gridshape_y').get_parameter_value().integer_value
+ gridshape_x = self.get_parameter('gridshape_x').get_parameter_value().integer_value
+ max_depth = self.get_parameter('max_depth').get_parameter_value().integer_value
+ min_depth = self.get_parameter('min_depth').get_parameter_value().integer_value
+ num_iter = self.get_parameter('num_iter').get_parameter_value().integer_value
+ thresh_dist = self.get_parameter('thresh_dist').get_parameter_value().integer_value
+ num_inliers = self.get_parameter('num_inliers').get_parameter_value().integer_value
+ camera_angle = self.get_parameter('camera_angle').get_parameter_value().integer_value
+ obstacle_angle = self.get_parameter('obstacle_angle').get_parameter_value().integer_value
+
+ how_to_replace = self.get_parameter('how_to_replace').get_parameter_value().string_value
+ filter_bool = self.get_parameter('filter').get_parameter_value().string_value
+ device = self.get_parameter('device').get_parameter_value().string_value
+
+ # convert ROS Image message to OpenCV image to numpy array
+ cv_img = self.bridge.imgmsg_to_cv2(msg)
+ depth_array = np.array(cv_img, dtype=np.float32)
+
+ # Filter out all points that are not obstacles
+
+ # t2 = time.time()
+
+ # print("Time for message processing: ", t2-t1)
+
+ # points, not_points = getPC_lstsq(depth_array, \
+ points, not_points = getPC(depth_array, \
+ (gridshape_y, gridshape_x), max_depth, min_depth, \
+ num_iter, thresh_dist, num_inliers, how_to_replace, \
+ filter_bool, device, camera_angle, obstacle_angle)
+
+ # t3 = time.time()
+
+ # print("Time for filtering", t3-t2)
+
+ points = points.numpy() / 1000.0 # convert to meters
+ print(points.shape)
+
+ not_points = not_points.numpy() / 1000
+
+ # Filter out all points that are not obstacles
+ pcd = point_cloud(points, msg.header.frame_id)
+ pcd2 = point_cloud(not_points, msg.header.frame_id)
+ self.publisher_.publish(pcd)
+ self.publisher_2.publish(pcd2)
+
+ # t4 = time.time()
+
+ # print("Time for publishing: ", t4-t3)
+ # print("Time for all: ", t4-t1)
+
+ def callback_destroy(self, msg):
+ if msg.data == "abort":
+ self.destroy_node()
+ rclpy.shutdown()
+
+
+
+def main(args=None):
+ print('Obstacle Filter Node started')
+ rclpy.init(args=args)
+
+ obstacle_filter_node = ObstacleFilterNode()
+
+ rclpy.spin(obstacle_filter_node)
+
+ obstacle_filter_node.destroy_node()
+ rclpy.shutdown()
\ No newline at end of file
diff --git a/obstacle_detection/camera/obstacle_filter/obstacle_filter/slope_filter_node.py b/obstacle_detection/camera/obstacle_filter/obstacle_filter/slope_filter_node.py
new file mode 100644
index 0000000..879796f
--- /dev/null
+++ b/obstacle_detection/camera/obstacle_filter/obstacle_filter/slope_filter_node.py
@@ -0,0 +1,253 @@
+import rclpy
+from rclpy.node import Node
+from sensor_msgs.msg import Image, PointCloud2, PointField
+from std_msgs.msg import Header
+from rclpy.qos import QoSProfile, qos_profile_sensor_data
+
+import numpy as np
+import sys
+from collections import namedtuple
+import ctypes
+import math
+import struct
+
+import matplotlib.pyplot as plt
+from mpl_toolkits import mplot3d
+
+
+_DATATYPES = {}
+_DATATYPES[PointField.INT8] = ('b', 1)
+_DATATYPES[PointField.UINT8] = ('B', 1)
+_DATATYPES[PointField.INT16] = ('h', 2)
+_DATATYPES[PointField.UINT16] = ('H', 2)
+_DATATYPES[PointField.INT32] = ('i', 4)
+_DATATYPES[PointField.UINT32] = ('I', 4)
+_DATATYPES[PointField.FLOAT32] = ('f', 4)
+_DATATYPES[PointField.FLOAT64] = ('d', 8)
+
+import time
+
+from .filter.slope_filter import getPC
+
+
+def read_points(cloud, field_names=None, skip_nans=False, uvs=[]):
+ """
+ Read points from a L{sensor_msgs.PointCloud2} message.
+ @param cloud: The point cloud to read from.
+ @type cloud: L{sensor_msgs.PointCloud2}
+ @param field_names: The names of fields to read. If None, read all fields. [default: None]
+ @type field_names: iterable
+ @param skip_nans: If True, then don't return any point with a NaN value.
+ @type skip_nans: bool [default: False]
+ @param uvs: If specified, then only return the points at the given coordinates. [default: empty list]
+ @type uvs: iterable
+ @return: Generator which yields a list of values for each point.
+ @rtype: generator
+ """
+ assert isinstance(cloud, PointCloud2), 'cloud is not a sensor_msgs.msg.PointCloud2'
+ fmt = _get_struct_fmt(cloud.is_bigendian, cloud.fields, field_names)
+ width, height, point_step, row_step, data, isnan = cloud.width, cloud.height, cloud.point_step, cloud.row_step, cloud.data, math.isnan
+ unpack_from = struct.Struct(fmt).unpack_from
+
+ # if skip_nans:
+ # if uvs:
+ # for u, v in uvs:
+ # p = unpack_from(data, (row_step * v) + (point_step * u))
+ # has_nan = False
+ # for pv in p:
+ # if isnan(pv):
+ # has_nan = True
+ # break
+ # if not has_nan:
+ # yield p
+ # else:
+ # for v in range(height):
+ # offset = row_step * v
+ # for u in range(width):
+ # p = unpack_from(data, offset)
+ # has_nan = False
+ # for pv in p:
+ # if isnan(pv):
+ # has_nan = True
+ # break
+ # if not has_nan:
+ # yield p
+ # offset += point_step
+ # else:
+ # if uvs:
+ # for u, v in uvs:
+ # yield unpack_from(data, (row_step * v) + (point_step * u))
+ # else:
+ # for v in range(height):
+ # offset = row_step * v
+ # for u in range(width):
+ # # print(type(unpack_from(data, offset)), unpack_from(data, offset))
+ # yield unpack_from(data, offset)
+ # offset += point_step
+
+ pcd = np.zeros((3, height, width))
+
+ for v in range(height):
+ offset = row_step * v
+ for u in range(width):
+ data_tuple = unpack_from(data, offset)
+ pcd[:, v, u] = np.array([data_tuple[0], data_tuple[1], data_tuple[2]])
+ offset += point_step
+
+ return pcd
+
+
+def _get_struct_fmt(is_bigendian, fields, field_names=None):
+ fmt = '>' if is_bigendian else '<'
+
+ offset = 0
+ for field in (f for f in sorted(fields, key=lambda f: f.offset) if field_names is None or f.name in field_names):
+ if offset < field.offset:
+ fmt += 'x' * (field.offset - offset)
+ offset = field.offset
+ if field.datatype not in _DATATYPES:
+ print('Skipping unknown PointField datatype [%d]' % field.datatype, file=sys.stderr)
+ else:
+ datatype_fmt, datatype_length = _DATATYPES[field.datatype]
+ fmt += field.count * datatype_fmt
+ offset += field.count * datatype_length
+
+ return fmt
+
+
+def point_cloud(points, parent_frame):
+ """ Creates a point cloud message.
+ Args:
+ points: Nx3 array of xyz positions.
+ parent_frame: frame in which the point cloud is defined
+ Returns:
+ sensor_msgs/PointCloud2 message
+ Code source:
+ https://gist.github.com/pgorczak/5c717baa44479fa064eb8d33ea4587e0
+ References:
+ http://docs.ros.org/melodic/api/sensor_msgs/html/msg/PointCloud2.html
+ http://docs.ros.org/melodic/api/sensor_msgs/html/msg/PointField.html
+ http://docs.ros.org/melodic/api/std_msgs/html/msg/Header.html
+ """
+ # In a PointCloud2 message, the point cloud is stored as an byte
+ # array. In order to unpack it, we also include some parameters
+ # which desribes the size of each individual point.
+ ros_dtype = PointField.FLOAT32
+ dtype = np.float32
+ itemsize = np.dtype(dtype).itemsize # A 32-bit float takes 4 bytes.
+
+ data = points.astype(dtype).tobytes()
+
+ # The fields specify what the bytes represents. The first 4 bytes
+ # represents the x-coordinate, the next 4 the y-coordinate, etc.
+ fields = [PointField(
+ name=n, offset=i*itemsize, datatype=ros_dtype, count=1)
+ for i, n in enumerate('xyz')]
+
+ # The PointCloud2 message also has a header which specifies which
+ # coordinate frame it is represented in.
+ header = Header(frame_id=parent_frame)
+
+ return PointCloud2(
+ header=header,
+ height=1,
+ width=points.shape[0],
+ is_dense=False,
+ is_bigendian=False,
+ fields=fields,
+ point_step=(itemsize * 3), # Every point consists of three float32s.
+ row_step=(itemsize * 3 * points.shape[0]),
+ data=data
+ )
+
+class SlopeFilterNode(Node):
+ def __init__(self):
+ super().__init__('slope_filter_node')
+ self.publisher_ = self.create_publisher(
+ PointCloud2,
+ '/slope_points',
+ qos_profile_sensor_data)
+ self.publisher_2 = self.create_publisher(
+ PointCloud2,
+ '/not_slope_points',
+ qos_profile_sensor_data)
+
+ self.subscription_ = self.create_subscription(
+ PointCloud2,
+ '/ouster/points',
+ self.callback,
+ qos_profile_sensor_data)
+
+ # declare parameters to be given as input to the
+ self.declare_parameter('gridshape_y', 16)
+ self.declare_parameter('gridshape_x', 32)
+ self.declare_parameter('max_height', 2000)
+ self.declare_parameter('max_dist', 6000)
+ self.declare_parameter('num_iter', 20)
+ self.declare_parameter('thresh_dist', 20)
+ self.declare_parameter('num_inliers', 200)
+ self.declare_parameter('obstacle_angle', 45)
+ self.declare_parameter('how_to_replace', 'random')
+
+ self.declare_parameter('filter', 'ransac')
+ self.declare_parameter('device', 'cpu')
+
+
+
+ def callback(self, msg):
+ print('Received message')
+ t1 = time.time()
+ # get parameters values
+ gridshape_y = self.get_parameter('gridshape_y').get_parameter_value().integer_value
+ gridshape_x = self.get_parameter('gridshape_x').get_parameter_value().integer_value
+ max_height = self.get_parameter('max_height').get_parameter_value().integer_value
+ max_dist = self.get_parameter('max_dist').get_parameter_value().integer_value
+ num_iter = self.get_parameter('num_iter').get_parameter_value().integer_value
+ thresh_dist = self.get_parameter('thresh_dist').get_parameter_value().integer_value
+ num_inliers = self.get_parameter('num_inliers').get_parameter_value().integer_value
+ obstacle_angle = self.get_parameter('obstacle_angle').get_parameter_value().integer_value
+
+ how_to_replace = self.get_parameter('how_to_replace').get_parameter_value().string_value
+ filter_ = self.get_parameter('filter').get_parameter_value().string_value
+ device = self.get_parameter('device').get_parameter_value().string_value
+
+ points = read_points(msg, skip_nans=False) * 1000.0
+ points, not_points= getPC(points, gridshape = (gridshape_y, gridshape_x), max_height = max_height, max_dist = max_dist, num_iter = num_iter, thresh_dist = thresh_dist, num_inliers = num_inliers, \
+ how_to_replace = how_to_replace, filter = filter_, device = device, \
+ obstacle_angle = obstacle_angle)
+
+ pcd = point_cloud(points.numpy() / 1000.0, msg.header.frame_id)
+ not_pcd = point_cloud(not_points.numpy() / 1000.0, msg.header.frame_id)
+
+ self.publisher_.publish(pcd)
+ self.publisher_2.publish(not_pcd)
+
+ t4 = time.time()
+ print("Time for all: ", t4-t1)
+
+ def draw(self):
+
+ fig = plt.figure()
+ ax = fig.add_subplot(111, projection='3d')
+
+ ax.scatter(self.x[:512], self.y[:512], self.z[:512], s=5)
+
+ ax.set_xlabel('X')
+ ax.set_ylabel('Y')
+ ax.set_zlabel('Z')
+
+ plt.show(block=True)
+
+
+
+
+def main(args=None):
+ print('Slope Filter Node started')
+ rclpy.init(args=args)
+
+ slope_filter_node = SlopeFilterNode()
+
+ rclpy.spin(slope_filter_node)
+
+ slope_filter_node.destroy_node()
+ rclpy.shutdown()
\ No newline at end of file
diff --git a/obstacle_detection/camera/obstacle_filter/package.xml b/obstacle_detection/camera/obstacle_filter/package.xml
new file mode 100644
index 0000000..eea464d
--- /dev/null
+++ b/obstacle_detection/camera/obstacle_filter/package.xml
@@ -0,0 +1,28 @@
+
+
+
+ obstacle_filter
+ 0.0.0
+ TODO: Package description
+ xplore
+ TODO: License declaration
+
+ ament_copyright
+ ament_flake8
+ ament_pep257
+ python3-pytest
+
+ rclpy
+ sensor_msgs
+ std_msgs
+ cv_bridge
+ opencv2
+ ransac_filter
+ lstsq_filter
+ filter
+ slope_filter
+
+
+ ament_python
+
+
diff --git a/obstacle_detection/camera/obstacle_filter/resource/obstacle_filter b/obstacle_detection/camera/obstacle_filter/resource/obstacle_filter
new file mode 100644
index 0000000..e69de29
diff --git a/obstacle_detection/camera/obstacle_filter/setup.cfg b/obstacle_detection/camera/obstacle_filter/setup.cfg
new file mode 100644
index 0000000..d901ff0
--- /dev/null
+++ b/obstacle_detection/camera/obstacle_filter/setup.cfg
@@ -0,0 +1,4 @@
+[develop]
+script_dir=$base/lib/obstacle_filter
+[install]
+install_scripts=$base/lib/obstacle_filter
diff --git a/obstacle_detection/camera/obstacle_filter/setup.py b/obstacle_detection/camera/obstacle_filter/setup.py
new file mode 100644
index 0000000..fe827f7
--- /dev/null
+++ b/obstacle_detection/camera/obstacle_filter/setup.py
@@ -0,0 +1,27 @@
+from setuptools import find_packages, setup
+
+package_name = 'obstacle_filter'
+
+setup(
+ name=package_name,
+ version='0.0.0',
+ packages=find_packages(exclude=['test']),
+ data_files=[
+ ('share/ament_index/resource_index/packages',
+ ['resource/' + package_name]),
+ ('share/' + package_name, ['package.xml']),
+ ],
+ install_requires=['setuptools'],
+ zip_safe=True,
+ maintainer='xplore',
+ maintainer_email='xplore@todo.todo',
+ description='TODO: Package description',
+ license='TODO: License declaration',
+ tests_require=['pytest'],
+ entry_points={
+ 'console_scripts': [
+ 'obstacle_filter_node = obstacle_filter.obstacle_filter_node:main',
+ 'slope_filter_node = obstacle_filter.slope_filter_node:main',
+ ],
+ },
+)
diff --git a/obstacle_detection/camera/obstacle_filter/test/test_copyright.py b/obstacle_detection/camera/obstacle_filter/test/test_copyright.py
new file mode 100644
index 0000000..97a3919
--- /dev/null
+++ b/obstacle_detection/camera/obstacle_filter/test/test_copyright.py
@@ -0,0 +1,25 @@
+# Copyright 2015 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_copyright.main import main
+import pytest
+
+
+# Remove the `skip` decorator once the source file(s) have a copyright header
+@pytest.mark.skip(reason='No copyright header has been placed in the generated source file.')
+@pytest.mark.copyright
+@pytest.mark.linter
+def test_copyright():
+ rc = main(argv=['.', 'test'])
+ assert rc == 0, 'Found errors'
diff --git a/obstacle_detection/camera/obstacle_filter/test/test_flake8.py b/obstacle_detection/camera/obstacle_filter/test/test_flake8.py
new file mode 100644
index 0000000..27ee107
--- /dev/null
+++ b/obstacle_detection/camera/obstacle_filter/test/test_flake8.py
@@ -0,0 +1,25 @@
+# Copyright 2017 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_flake8.main import main_with_errors
+import pytest
+
+
+@pytest.mark.flake8
+@pytest.mark.linter
+def test_flake8():
+ rc, errors = main_with_errors(argv=[])
+ assert rc == 0, \
+ 'Found %d code style errors / warnings:\n' % len(errors) + \
+ '\n'.join(errors)
diff --git a/obstacle_detection/camera/obstacle_filter/test/test_pep257.py b/obstacle_detection/camera/obstacle_filter/test/test_pep257.py
new file mode 100644
index 0000000..b234a38
--- /dev/null
+++ b/obstacle_detection/camera/obstacle_filter/test/test_pep257.py
@@ -0,0 +1,23 @@
+# Copyright 2015 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_pep257.main import main
+import pytest
+
+
+@pytest.mark.linter
+@pytest.mark.pep257
+def test_pep257():
+ rc = main(argv=['.', 'test'])
+ assert rc == 0, 'Found code style errors / warnings'
diff --git a/obstacle_detection/lidar/pointcloud_proc/package.xml b/obstacle_detection/lidar/pointcloud_proc/package.xml
new file mode 100644
index 0000000..9b1510c
--- /dev/null
+++ b/obstacle_detection/lidar/pointcloud_proc/package.xml
@@ -0,0 +1,23 @@
+
+
+
+ pointcloud_proc
+ 0.0.0
+ TODO: Package description
+ david
+ TODO: License declaration
+
+ rclpy
+ sensor_msgs
+ opencv4
+ pcl_conversions
+
+ ament_copyright
+ ament_flake8
+ ament_pep257
+ python3-pytest
+
+
+ ament_python
+
+
diff --git a/obstacle_detection/lidar/pointcloud_proc/pointcloud_proc/__init__.py b/obstacle_detection/lidar/pointcloud_proc/pointcloud_proc/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/obstacle_detection/lidar/pointcloud_proc/pointcloud_proc/tester.py b/obstacle_detection/lidar/pointcloud_proc/pointcloud_proc/tester.py
new file mode 100644
index 0000000..7306896
--- /dev/null
+++ b/obstacle_detection/lidar/pointcloud_proc/pointcloud_proc/tester.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python3
+
+import rclpy
+from rclpy.node import Node
+from sensor_msgs.msg import PointCloud2
+import numpy as np
+import cv2 as cv
+import matplotlib.pyplot as plt
+from cv_bridge import CvBridge
+import sensor_msgs_py.point_cloud2 as pc2
+
+class PointCloudProcessor(Node):
+ def __init__(self):
+ super().__init__('point_cloud_processor')
+ self.subscription = self.create_subscription(
+ PointCloud2,
+ '/ouster/points',
+ self.point_cloud_callback,
+ 10)
+ self.subscription # prevent unused variable warning
+ self.bridge = CvBridge()
+
+ def create_heightmap(self, points):
+ # Assume points is a Nx3 numpy array
+ # Convert points to a heightmap here (simplified example)
+ H, W = 100, 100 # Define dimensions of the heightmap
+ heightmap = np.zeros((H, W), dtype=np.float32)
+
+ # Fill heightmap based on x, y, z values
+ # This is highly simplified and might need actual transformation logic
+ for x, y, z in points:
+ ix, iy = int(x), int(y)
+ if 0 <= ix < W and 0 <= iy < H:
+ heightmap[iy, ix] = z
+
+ return heightmap
+
+ def point_cloud_callback(self, msg):
+ # Convert PointCloud2 to numpy array
+ points_array = np.array(list(pc2.read_points(msg, field_names=("x", "y", "z"), skip_nans=True)))
+ print(points_array.shape)
+
+ if points_array.size == 0:
+ return
+
+ # Example: Create a heightmap from the point cloud
+ # This is a placeholder for actual heightmap generation code
+ heightmap = self.create_heightmap(points_array)
+ print(heightmap.shape)
+ # Display or process the heightmap
+ self.display_heightmap(heightmap)
+
+ # Compute the X, Y, Z coordinates
+ X = np.arange(0, W, 1) * METERS_PER_PIXEL
+ Y = np.arange(0, H, 1) * METERS_PER_PIXEL
+ X, Y = np.meshgrid(X, Y)
+ Z = static_map
+
+
+ def display_heightmap(self, heightmap):
+
+ # Plot the heightmap
+ fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
+ ax.plot_surface(X, Y, Z, cmap="hot")
+ ax.set_title("Heightmap")
+ ax.set_xlabel("X [m]")
+ ax.set_ylabel("Y [m]")
+ ax.set_zlabel("Height [m]")
+
+
+def main(args=None):
+ rclpy.init(args=args)
+ print("hmnm")
+ point_cloud_processor = PointCloudProcessor()
+ rclpy.spin(point_cloud_processor)
+
+ point_cloud_processor.destroy_node()
+ rclpy.shutdown()
+
+if __name__ == '__main__':
+ main()
diff --git a/obstacle_detection/lidar/pointcloud_proc/resource/pointcloud_proc b/obstacle_detection/lidar/pointcloud_proc/resource/pointcloud_proc
new file mode 100644
index 0000000..e69de29
diff --git a/obstacle_detection/lidar/pointcloud_proc/setup.cfg b/obstacle_detection/lidar/pointcloud_proc/setup.cfg
new file mode 100644
index 0000000..03451f5
--- /dev/null
+++ b/obstacle_detection/lidar/pointcloud_proc/setup.cfg
@@ -0,0 +1,4 @@
+[develop]
+script_dir=$base/lib/pointcloud_proc
+[install]
+install_scripts=$base/lib/pointcloud_proc
diff --git a/obstacle_detection/lidar/pointcloud_proc/setup.py b/obstacle_detection/lidar/pointcloud_proc/setup.py
new file mode 100644
index 0000000..dd91083
--- /dev/null
+++ b/obstacle_detection/lidar/pointcloud_proc/setup.py
@@ -0,0 +1,26 @@
+from setuptools import find_packages, setup
+
+package_name = 'pointcloud_proc'
+
+setup(
+ name=package_name,
+ version='0.0.0',
+ packages=find_packages(exclude=['test']),
+ data_files=[
+ ('share/ament_index/resource_index/packages',
+ ['resource/' + package_name]),
+ ('share/' + package_name, ['package.xml']),
+ ],
+ install_requires=['setuptools'],
+ zip_safe=True,
+ maintainer='david',
+ maintainer_email='david.farah@epfl.ch',
+ description='TODO: Package description',
+ license='TODO: License declaration',
+ tests_require=['pytest'],
+ entry_points={
+ 'console_scripts': [
+ 'pointcloud_filter_node = pointcloud_proc.tester:main',
+ ],
+ },
+)
diff --git a/obstacle_detection/lidar/pointcloud_proc/test/test_copyright.py b/obstacle_detection/lidar/pointcloud_proc/test/test_copyright.py
new file mode 100644
index 0000000..97a3919
--- /dev/null
+++ b/obstacle_detection/lidar/pointcloud_proc/test/test_copyright.py
@@ -0,0 +1,25 @@
+# Copyright 2015 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_copyright.main import main
+import pytest
+
+
+# Remove the `skip` decorator once the source file(s) have a copyright header
+@pytest.mark.skip(reason='No copyright header has been placed in the generated source file.')
+@pytest.mark.copyright
+@pytest.mark.linter
+def test_copyright():
+ rc = main(argv=['.', 'test'])
+ assert rc == 0, 'Found errors'
diff --git a/obstacle_detection/lidar/pointcloud_proc/test/test_flake8.py b/obstacle_detection/lidar/pointcloud_proc/test/test_flake8.py
new file mode 100644
index 0000000..27ee107
--- /dev/null
+++ b/obstacle_detection/lidar/pointcloud_proc/test/test_flake8.py
@@ -0,0 +1,25 @@
+# Copyright 2017 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_flake8.main import main_with_errors
+import pytest
+
+
+@pytest.mark.flake8
+@pytest.mark.linter
+def test_flake8():
+ rc, errors = main_with_errors(argv=[])
+ assert rc == 0, \
+ 'Found %d code style errors / warnings:\n' % len(errors) + \
+ '\n'.join(errors)
diff --git a/obstacle_detection/lidar/pointcloud_proc/test/test_pep257.py b/obstacle_detection/lidar/pointcloud_proc/test/test_pep257.py
new file mode 100644
index 0000000..b234a38
--- /dev/null
+++ b/obstacle_detection/lidar/pointcloud_proc/test/test_pep257.py
@@ -0,0 +1,23 @@
+# Copyright 2015 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_pep257.main import main
+import pytest
+
+
+@pytest.mark.linter
+@pytest.mark.pep257
+def test_pep257():
+ rc = main(argv=['.', 'test'])
+ assert rc == 0, 'Found code style errors / warnings'
diff --git a/sensors/camera/CMakeLists.txt b/sensors/camera/CMakeLists.txt
new file mode 100644
index 0000000..e0a8741
--- /dev/null
+++ b/sensors/camera/CMakeLists.txt
@@ -0,0 +1,89 @@
+cmake_minimum_required(VERSION 3.5)
+project(front_camera)
+
+# Default to C99
+if(NOT CMAKE_C_STANDARD)
+ set(CMAKE_C_STANDARD 99)
+endif()
+
+# Default to C++14
+if(NOT CMAKE_CXX_STANDARD)
+ set(CMAKE_CXX_STANDARD 14)
+endif()
+
+if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ add_compile_options(-Wall -Wextra -Wpedantic)
+endif()
+
+# find dependencies
+find_package(camera_info_manager REQUIRED)
+find_package(cv_bridge REQUIRED)
+find_package(OpenCV REQUIRED)
+find_package(depthai CONFIG REQUIRED)
+# find_package(depthai_ros_msgs REQUIRED)
+find_package(depthai_bridge REQUIRED)
+find_package(rclcpp REQUIRED)
+find_package(sensor_msgs REQUIRED)
+find_package(pcl_conversions REQUIRED)
+find_package(PCL REQUIRED)
+find_package(stereo_msgs REQUIRED)
+
+find_package(std_msgs REQUIRED)
+# find_package(vision_msgs REQUIRED)
+# find_package(depth_image_proc REQUIRED)
+
+set(dependencies
+ camera_info_manager
+ depthai_bridge
+ depthai
+ rclcpp
+ sensor_msgs
+ stereo_msgs
+ std_msgs
+ pcl_conversions
+ PCL
+ OpenCV
+)
+
+include_directories(
+ include ${ament_INCLUDE_DIRS}
+ ${OpenCV_INCLUDE_DIRS}
+ ${cv_bridge_INCLUDE_DIRS}
+)
+
+
+# include_directories(
+# ../depth_image_proc/include
+# )
+
+
+
+
+if(BUILD_TESTING)
+ find_package(ament_lint_auto REQUIRED)
+ # the following line skips the linter which checks for copyrights
+ # uncomment the line when a copyright and license is not present in all source files
+ #set(ament_cmake_copyright_FOUND TRUE)
+ # the following line skips cpplint (only works in a git repo)
+ # uncomment the line when this package is not in a git repo
+ #set(ament_cmake_cpplint_FOUND TRUE)
+ ament_lint_auto_find_test_dependencies()
+endif()
+
+install(DIRECTORY launch/ DESTINATION share/${PROJECT_NAME}/launch
+FILES_MATCHING PATTERN "*.py")
+install(DIRECTORY launch/ DESTINATION share/${PROJECT_NAME}/launch
+FILES_MATCHING PATTERN "*.xml")
+
+
+# add_executable(front_camera_filter src/front_camera_filter.cpp)
+# ament_target_dependencies(front_camera_filter rclcpp sensor_msgs pcl_conversions PCL cv_bridge OpenCV)
+
+add_executable(front_camera_publisher src/stereo_publisher.cpp)
+ament_target_dependencies(front_camera_publisher rclcpp std_msgs sensor_msgs stereo_msgs camera_info_manager depthai depthai_bridge)
+
+install(TARGETS
+ front_camera_publisher
+ DESTINATION lib/${PROJECT_NAME})
+
+ament_package()
diff --git a/sensors/camera/client_id b/sensors/camera/client_id
new file mode 100644
index 0000000..651c16f
--- /dev/null
+++ b/sensors/camera/client_id
@@ -0,0 +1 @@
+7110fea2-9170-4bc1-9a30-875db238c1e2
\ No newline at end of file
diff --git a/sensors/camera/launch/obstacle_pc.launch.xml b/sensors/camera/launch/obstacle_pc.launch.xml
new file mode 100644
index 0000000..8066c51
--- /dev/null
+++ b/sensors/camera/launch/obstacle_pc.launch.xml
@@ -0,0 +1,11 @@
+
+
+
+
+
+
+
+
+
+
diff --git a/sensors/camera/launch/stereo.launch.py b/sensors/camera/launch/stereo.launch.py
new file mode 100644
index 0000000..0296ba6
--- /dev/null
+++ b/sensors/camera/launch/stereo.launch.py
@@ -0,0 +1,221 @@
+import os
+
+from ament_index_python.packages import get_package_share_directory
+from launch import LaunchDescription, launch_description_sources
+from launch.actions import IncludeLaunchDescription
+from launch.actions import DeclareLaunchArgument
+from launch.substitutions import LaunchConfiguration
+import launch_ros.actions
+import launch_ros.descriptions
+
+
+def generate_launch_description():
+ default_rviz = os.path.join(get_package_share_directory('depthai_examples'),
+ 'rviz', 'stereoPointCloud.rviz')
+ urdf_launch_dir = os.path.join(get_package_share_directory('depthai_descriptions'), 'launch')
+
+
+ camera_model = LaunchConfiguration('camera_model', default = 'OAK-D')
+ tf_prefix = LaunchConfiguration('tf_prefix', default = 'oak')
+ base_frame = LaunchConfiguration('base_frame', default = 'oak-d_frame')
+ parent_frame = LaunchConfiguration('parent_frame', default = 'oak-d-base-frame')
+
+ cam_pos_x = LaunchConfiguration('cam_pos_x', default = '0.0')
+ cam_pos_y = LaunchConfiguration('cam_pos_y', default = '0.0')
+ cam_pos_z = LaunchConfiguration('cam_pos_z', default = '0.0')
+ cam_roll = LaunchConfiguration('cam_roll', default = '0.0')
+ cam_pitch = LaunchConfiguration('cam_pitch', default = '0.0')
+ cam_yaw = LaunchConfiguration('cam_yaw', default = '0.0')
+
+ mode = LaunchConfiguration('mode', default = 'depth')
+ lrcheck = LaunchConfiguration('lrcheck', default = True)
+ extended = LaunchConfiguration('extended', default = False)
+ subpixel = LaunchConfiguration('subpixel', default = True)
+ confidence = LaunchConfiguration('confidence', default = 200)
+ LRchecktresh = LaunchConfiguration('LRchecktresh', default = 5)
+ monoResolution = LaunchConfiguration('monoResolution', default = '720p')
+
+ declare_camera_model_cmd = DeclareLaunchArgument(
+ 'camera_model',
+ default_value=camera_model,
+ description='The model of the camera. Using a wrong camera model can disable camera features. Valid models: `OAK-D, OAK-D-LITE`.')
+
+ declare_tf_prefix_cmd = DeclareLaunchArgument(
+ 'tf_prefix',
+ default_value=tf_prefix,
+ description='The name of the camera. It can be different from the camera model and it will be used in naming TF.')
+
+ declare_base_frame_cmd = DeclareLaunchArgument(
+ 'base_frame',
+ default_value=base_frame,
+ description='Name of the base link.')
+
+ declare_parent_frame_cmd = DeclareLaunchArgument(
+ 'parent_frame',
+ default_value=parent_frame,
+ description='Name of the parent link from other a robot TF for example that can be connected to the base of the OAK.')
+
+ declare_pos_x_cmd = DeclareLaunchArgument(
+ 'cam_pos_x',
+ default_value=cam_pos_x,
+ description='Position X of the camera with respect to the base frame.')
+
+ declare_pos_y_cmd = DeclareLaunchArgument(
+ 'cam_pos_y',
+ default_value=cam_pos_y,
+ description='Position Y of the camera with respect to the base frame.')
+
+ declare_pos_z_cmd = DeclareLaunchArgument(
+ 'cam_pos_z',
+ default_value=cam_pos_z,
+ description='Position Z of the camera with respect to the base frame.')
+
+ declare_roll_cmd = DeclareLaunchArgument(
+ 'cam_roll',
+ default_value=cam_roll,
+ description='Roll orientation of the camera with respect to the base frame.')
+
+ declare_pitch_cmd = DeclareLaunchArgument(
+ 'cam_pitch',
+ default_value=cam_pitch,
+ description='Pitch orientation of the camera with respect to the base frame.')
+
+ declare_yaw_cmd = DeclareLaunchArgument(
+ 'cam_yaw',
+ default_value=cam_yaw,
+ description='Yaw orientation of the camera with respect to the base frame.')
+
+ declare_mode_cmd = DeclareLaunchArgument(
+ 'mode',
+ default_value=mode,
+ description='set to depth or disparity. Setting to depth will publish depth or else will publish disparity.')
+
+ declare_lrcheck_cmd = DeclareLaunchArgument(
+ 'lrcheck',
+ default_value=lrcheck,
+ description='The name of the camera. It can be different from the camera model and it will be used as node `namespace`.')
+
+ declare_extended_cmd = DeclareLaunchArgument(
+ 'extended',
+ default_value=extended,
+ description='The name of the camera. It can be different from the camera model and it will be used as node `namespace`.')
+
+ declare_subpixel_cmd = DeclareLaunchArgument(
+ 'subpixel',
+ default_value=subpixel,
+ description='The name of the camera. It can be different from the camera model and it will be used as node `namespace`.')
+
+ declare_confidence_cmd = DeclareLaunchArgument(
+ 'confidence',
+ default_value=confidence,
+ description='The name of the camera. It can be different from the camera model and it will be used as node `namespace`.')
+
+ declare_LRchecktresh_cmd = DeclareLaunchArgument(
+ 'LRchecktresh',
+ default_value=LRchecktresh,
+ description='The name of the camera. It can be different from the camera model and it will be used as node `namespace`.')
+
+ declare_monoResolution_cmd = DeclareLaunchArgument(
+ 'monoResolution',
+ default_value=monoResolution,
+ description='Contains the resolution of the Mono Cameras. Available resolutions are 800p, 720p & 400p for OAK-D & 480p for OAK-D-Lite.')
+
+ urdf_launch = IncludeLaunchDescription(
+ launch_description_sources.PythonLaunchDescriptionSource(
+ os.path.join(urdf_launch_dir, 'urdf_launch.py')),
+ launch_arguments={'tf_prefix' : tf_prefix,
+ 'camera_model': camera_model,
+ 'base_frame' : base_frame,
+ 'parent_frame': parent_frame,
+ 'cam_pos_x' : cam_pos_x,
+ 'cam_pos_y' : cam_pos_y,
+ 'cam_pos_z' : cam_pos_z,
+ 'cam_roll' : cam_roll,
+ 'cam_pitch' : cam_pitch,
+ 'cam_yaw' : cam_yaw}.items())
+
+ stereo_node = launch_ros.actions.Node(
+ package='depthai_examples', executable='stereo_node',
+ output='screen',
+ parameters=[{'tf_prefix': tf_prefix},
+ {'mode': mode},
+ {'lrcheck': lrcheck},
+ {'extended': extended},
+ {'subpixel': subpixel},
+ {'confidence': confidence},
+ {'LRchecktresh': LRchecktresh},
+ {'monoResolution': monoResolution}])
+
+
+ metric_converter_node = launch_ros.actions.ComposableNodeContainer(
+ name='container',
+ namespace='',
+ package='rclcpp_components',
+ executable='component_container',
+ composable_node_descriptions=[
+ # Driver itself
+ launch_ros.descriptions.ComposableNode(
+ package='depth_image_proc',
+ plugin='depth_image_proc::ConvertMetricNode',
+ name='convert_metric_node',
+ remappings=[('image_raw', '/stereo/depth'),
+ ('camera_info', '/stereo/camera_info'),
+ ('image', '/stereo/converted_depth')]
+ ),
+ ],
+ output='screen',)
+
+ point_cloud_node = launch_ros.actions.ComposableNodeContainer(
+ name='container2',
+ namespace='',
+ package='rclcpp_components',
+ executable='component_container',
+ composable_node_descriptions=[
+ # Driver itself
+ launch_ros.descriptions.ComposableNode(
+ package='depth_image_proc',
+ plugin='depth_image_proc::PointCloudXyziNode',
+ name='point_cloud_xyzi',
+
+ remappings=[('depth/image_rect', '/stereo/converted_depth'),
+ ('intensity/image_rect', '/right/image_rect'),
+ ('intensity/camera_info', '/right/camera_info'),
+ ('points', '/stereo/points')]
+ ),
+ ],
+ output='screen',)
+
+ rviz_node = launch_ros.actions.Node(
+ package='rviz2', executable='rviz2', output='screen',
+ arguments=['--display-config', default_rviz])
+
+ ld = LaunchDescription()
+ ld.add_action(declare_tf_prefix_cmd)
+ ld.add_action(declare_camera_model_cmd)
+
+ ld.add_action(declare_base_frame_cmd)
+ ld.add_action(declare_parent_frame_cmd)
+
+ ld.add_action(declare_pos_x_cmd)
+ ld.add_action(declare_pos_y_cmd)
+ ld.add_action(declare_pos_z_cmd)
+ ld.add_action(declare_roll_cmd)
+ ld.add_action(declare_pitch_cmd)
+ ld.add_action(declare_yaw_cmd)
+
+ ld.add_action(declare_mode_cmd)
+ ld.add_action(declare_lrcheck_cmd)
+ ld.add_action(declare_extended_cmd)
+ ld.add_action(declare_subpixel_cmd)
+ ld.add_action(declare_confidence_cmd)
+ ld.add_action(declare_LRchecktresh_cmd)
+ ld.add_action(declare_monoResolution_cmd)
+
+ ld.add_action(stereo_node)
+ ld.add_action(urdf_launch)
+
+ ld.add_action(metric_converter_node)
+ ld.add_action(point_cloud_node)
+ ld.add_action(rviz_node)
+ return ld
+
diff --git a/sensors/camera/obstacles.rviz b/sensors/camera/obstacles.rviz
new file mode 100644
index 0000000..d082f34
--- /dev/null
+++ b/sensors/camera/obstacles.rviz
@@ -0,0 +1,226 @@
+Panels:
+ - Class: rviz_common/Displays
+ Help Height: 78
+ Name: Displays
+ Property Tree Widget:
+ Expanded:
+ - /Global Options1
+ - /Status1
+ - /PointCloud21
+ - /PointCloud21/Topic1
+ - /Image1
+ - /Image1/Topic1
+ - /TF1
+ - /TF1/Frames1
+ Splitter Ratio: 0.5
+ Tree Height: 566
+ - Class: rviz_common/Selection
+ Name: Selection
+ - Class: rviz_common/Tool Properties
+ Expanded:
+ - /2D Goal Pose1
+ - /Publish Point1
+ Name: Tool Properties
+ Splitter Ratio: 0.5886790156364441
+ - Class: rviz_common/Views
+ Expanded:
+ - /Current View1
+ Name: Views
+ Splitter Ratio: 0.5
+Visualization Manager:
+ Class: ""
+ Displays:
+ - Alpha: 0.5
+ Cell Size: 1
+ Class: rviz_default_plugins/Grid
+ Color: 160; 160; 164
+ Enabled: true
+ Line Style:
+ Line Width: 0.029999999329447746
+ Value: Lines
+ Name: Grid
+ Normal Cell Count: 0
+ Offset:
+ X: 0
+ Y: 0
+ Z: 0
+ Plane: XY
+ Plane Cell Count: 10
+ Reference Frame:
+ Value: true
+ - Alpha: 1
+ Autocompute Intensity Bounds: true
+ Autocompute Value Bounds:
+ Max Value: 3.147482395172119
+ Min Value: 0
+ Value: true
+ Axis: X
+ Channel Name: intensity
+ Class: rviz_default_plugins/PointCloud2
+ Color: 255; 255; 255
+ Color Transformer: AxisColor
+ Decay Time: 0
+ Enabled: true
+ Invert Rainbow: false
+ Max Color: 255; 255; 255
+ Max Intensity: 4096
+ Min Color: 0; 0; 0
+ Min Intensity: 0
+ Name: PointCloud2
+ Position Transformer: XYZ
+ Selectable: true
+ Size (Pixels): 3
+ Size (m): 0.009999999776482582
+ Style: Flat Squares
+ Topic:
+ Depth: 5
+ Durability Policy: Volatile
+ History Policy: Keep Last
+ Reliability Policy: Best Effort
+ Value: /obstacles_points
+ Use Fixed Frame: true
+ Use rainbow: true
+ Value: true
+ - Class: rviz_default_plugins/Image
+ Enabled: true
+ Max Value: 1
+ Median window: 5
+ Min Value: 0
+ Name: Image
+ Normalize Range: true
+ Topic:
+ Depth: 5
+ Durability Policy: Volatile
+ History Policy: Keep Last
+ Reliability Policy: Reliable
+ Value: /right/image_rect
+ Value: true
+ - Class: rviz_default_plugins/TF
+ Enabled: true
+ Frame Timeout: 15
+ Frames:
+ All Enabled: false
+ oak-d-base-frame:
+ Value: true
+ oak-d_frame:
+ Value: true
+ oak_imu_frame:
+ Value: false
+ oak_left_camera_frame:
+ Value: false
+ oak_left_camera_optical_frame:
+ Value: false
+ oak_model_origin:
+ Value: false
+ oak_rgb_camera_frame:
+ Value: false
+ oak_rgb_camera_optical_frame:
+ Value: false
+ oak_right_camera_frame:
+ Value: false
+ oak_right_camera_optical_frame:
+ Value: false
+ Marker Scale: 1
+ Name: TF
+ Show Arrows: true
+ Show Axes: true
+ Show Names: false
+ Tree:
+ oak-d-base-frame:
+ oak-d_frame:
+ oak_imu_frame:
+ {}
+ oak_left_camera_frame:
+ oak_left_camera_optical_frame:
+ {}
+ oak_model_origin:
+ {}
+ oak_rgb_camera_frame:
+ oak_rgb_camera_optical_frame:
+ {}
+ oak_right_camera_frame:
+ oak_right_camera_optical_frame:
+ {}
+ Update Interval: 0
+ Value: true
+ Enabled: true
+ Global Options:
+ Background Color: 48; 48; 48
+ Fixed Frame: oak-d-base-frame
+ Frame Rate: 30
+ Name: root
+ Tools:
+ - Class: rviz_default_plugins/Interact
+ Hide Inactive Objects: true
+ - Class: rviz_default_plugins/MoveCamera
+ - Class: rviz_default_plugins/Select
+ - Class: rviz_default_plugins/FocusCamera
+ - Class: rviz_default_plugins/Measure
+ Line color: 128; 128; 0
+ - Class: rviz_default_plugins/SetInitialPose
+ Topic:
+ Depth: 5
+ Durability Policy: Volatile
+ History Policy: Keep Last
+ Reliability Policy: Reliable
+ Value: /initialpose
+ - Class: rviz_default_plugins/SetGoal
+ Topic:
+ Depth: 5
+ Durability Policy: Volatile
+ History Policy: Keep Last
+ Reliability Policy: Reliable
+ Value: /goal_pose
+ - Class: rviz_default_plugins/PublishPoint
+ Single click: true
+ Topic:
+ Depth: 5
+ Durability Policy: Volatile
+ History Policy: Keep Last
+ Reliability Policy: Reliable
+ Value: /clicked_point
+ Transformation:
+ Current:
+ Class: rviz_default_plugins/TF
+ Value: true
+ Views:
+ Current:
+ Class: rviz_default_plugins/Orbit
+ Distance: 3.6460747718811035
+ Enable Stereo Rendering:
+ Stereo Eye Separation: 0.05999999865889549
+ Stereo Focal Distance: 1
+ Swap Stereo Eyes: false
+ Value: false
+ Focal Point:
+ X: 1.027349591255188
+ Y: 0.49573948979377747
+ Z: 0.07529568672180176
+ Focal Shape Fixed Size: true
+ Focal Shape Size: 0.05000000074505806
+ Invert Z Axis: false
+ Name: Current View
+ Near Clip Distance: 0.009999999776482582
+ Pitch: 0.3303980827331543
+ Target Frame:
+ Value: Orbit (rviz)
+ Yaw: 4.235406398773193
+ Saved: ~
+Window Geometry:
+ Displays:
+ collapsed: false
+ Height: 997
+ Hide Left Dock: false
+ Hide Right Dock: false
+ Image:
+ collapsed: false
+ QMainWindow State: 000000ff00000000fd0000000400000000000001560000038ffc0200000009fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003b000002bf000000c700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d0061006700650100000300000000ca0000002800ffffff000000010000010f0000038ffc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073010000003b0000038f000000a000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004420000003efc0100000002fb0000000800540069006d00650100000000000004420000000000000000fb0000000800540069006d00650100000000000004500000000000000000000005030000038f00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
+ Selection:
+ collapsed: false
+ Tool Properties:
+ collapsed: false
+ Views:
+ collapsed: false
+ Width: 1908
+ X: 0
+ Y: 0
diff --git a/sensors/camera/package.xml b/sensors/camera/package.xml
new file mode 100644
index 0000000..7c1509f
--- /dev/null
+++ b/sensors/camera/package.xml
@@ -0,0 +1,32 @@
+
+
+
+ front_camera
+ 0.0.0
+ Front camera node
+ anoca
+ JAJ
+
+ ament_cmake
+
+ ament_lint_auto
+ ament_lint_common
+
+
+ ament_cmake
+ camera_info_manager
+ depthai_bridge
+ depthai
+ rcl_cpp
+ sensor_msgs
+ stereo_msgs
+ std_msgs
+ rclcpp
+ sensor_msgs
+ cv_bridge
+ opencv2
+
+ pcl_conversions
+ PCL
+
+
diff --git a/sensors/camera/src/stereo_publisher.cpp b/sensors/camera/src/stereo_publisher.cpp
new file mode 100644
index 0000000..73be3fa
--- /dev/null
+++ b/sensors/camera/src/stereo_publisher.cpp
@@ -0,0 +1,205 @@
+#include
+#include
+#include
+#include
+
+#include "camera_info_manager/camera_info_manager.hpp"
+#include "rclcpp/executors.hpp"
+#include "rclcpp/node.hpp"
+#include "sensor_msgs/msg/image.hpp"
+#include "stereo_msgs/msg/disparity_image.hpp"
+
+// Inludes common necessary includes for development using depthai library
+#include "depthai/device/DataQueue.hpp"
+#include "depthai/device/Device.hpp"
+#include "depthai/pipeline/Pipeline.hpp"
+#include "depthai/pipeline/node/MonoCamera.hpp"
+#include "depthai/pipeline/node/StereoDepth.hpp"
+#include "depthai/pipeline/node/XLinkOut.hpp"
+#include "depthai_bridge/BridgePublisher.hpp"
+#include "depthai_bridge/DisparityConverter.hpp"
+#include "depthai_bridge/ImageConverter.hpp"
+
+std::tuple createPipeline(
+ bool withDepth, bool lrcheck, bool extended, bool subpixel, int confidence, int LRchecktresh, std::string resolution) {
+ dai::Pipeline pipeline;
+ dai::node::MonoCamera::Properties::SensorResolution monoResolution;
+ auto monoLeft = pipeline.create();
+ auto monoRight = pipeline.create();
+ auto xoutLeft = pipeline.create();
+ auto xoutRight = pipeline.create();
+ auto stereo = pipeline.create();
+ auto xoutDepth = pipeline.create();
+
+ // XLinkOut
+ xoutLeft->setStreamName("left");
+ xoutRight->setStreamName("right");
+
+ if(withDepth) {
+ xoutDepth->setStreamName("depth");
+ } else {
+ xoutDepth->setStreamName("disparity");
+ }
+
+ int width, height;
+ if(resolution == "720p") {
+ monoResolution = dai::node::MonoCamera::Properties::SensorResolution::THE_720_P;
+ width = 1280;
+ height = 720;
+ } else if(resolution == "400p") {
+ monoResolution = dai::node::MonoCamera::Properties::SensorResolution::THE_400_P;
+ width = 640;
+ height = 400;
+ } else if(resolution == "800p") {
+ monoResolution = dai::node::MonoCamera::Properties::SensorResolution::THE_800_P;
+ width = 1280;
+ height = 800;
+ } else if(resolution == "480p") {
+ monoResolution = dai::node::MonoCamera::Properties::SensorResolution::THE_480_P;
+ width = 640;
+ height = 480;
+ } else {
+ RCLCPP_ERROR(rclcpp::get_logger("rclcpp"), "Invalid parameter. -> monoResolution: %s", resolution.c_str());
+ throw std::runtime_error("Invalid mono camera resolution.");
+ }
+
+ // MonoCamera
+ monoLeft->setResolution(monoResolution);
+ monoLeft->setBoardSocket(dai::CameraBoardSocket::CAM_B);
+ monoRight->setResolution(monoResolution);
+ monoRight->setBoardSocket(dai::CameraBoardSocket::CAM_C);
+
+ // StereoDepth
+ stereo->initialConfig.setConfidenceThreshold(confidence);
+ stereo->setRectifyEdgeFillColor(0); // black, to better see the cutout
+ stereo->initialConfig.setLeftRightCheckThreshold(LRchecktresh);
+ stereo->setLeftRightCheck(lrcheck);
+ stereo->setExtendedDisparity(extended);
+ stereo->setSubpixel(subpixel);
+
+ // Link plugins CAM -> STEREO -> XLINK
+ monoLeft->out.link(stereo->left);
+ monoRight->out.link(stereo->right);
+
+ stereo->rectifiedLeft.link(xoutLeft->input);
+ stereo->rectifiedRight.link(xoutRight->input);
+
+ if(withDepth) {
+ stereo->depth.link(xoutDepth->input);
+ } else {
+ stereo->disparity.link(xoutDepth->input);
+ }
+
+ return std::make_tuple(pipeline, width, height);
+}
+
+int main(int argc, char** argv) {
+ rclcpp::init(argc, argv);
+ auto node = rclcpp::Node::make_shared("stereo_node");
+
+ std::string tfPrefix, mode, monoResolution;
+ bool lrcheck, extended, subpixel, enableDepth;
+ int confidence, LRchecktresh;
+ int monoWidth, monoHeight;
+ dai::Pipeline pipeline;
+
+ node->declare_parameter("tf_prefix", "oak");
+ node->declare_parameter("mode", "depth");
+ node->declare_parameter("lrcheck", true);
+ node->declare_parameter("extended", false);
+ node->declare_parameter("subpixel", true);
+ node->declare_parameter("confidence", 200);
+ node->declare_parameter("LRchecktresh", 5);
+ node->declare_parameter("monoResolution", "720p");
+
+ node->get_parameter("tf_prefix", tfPrefix);
+ node->get_parameter("mode", mode);
+ node->get_parameter("lrcheck", lrcheck);
+ node->get_parameter("extended", extended);
+ node->get_parameter("subpixel", subpixel);
+ node->get_parameter("confidence", confidence);
+ node->get_parameter("LRchecktresh", LRchecktresh);
+ node->get_parameter("monoResolution", monoResolution);
+
+ if(mode == "depth") {
+ enableDepth = true;
+ } else {
+ enableDepth = false;
+ }
+
+ std::tie(pipeline, monoWidth, monoHeight) = createPipeline(enableDepth, lrcheck, extended, subpixel, confidence, LRchecktresh, monoResolution);
+ dai::Device device(pipeline);
+ auto leftQueue = device.getOutputQueue("left", 30, false);
+ auto rightQueue = device.getOutputQueue("right", 30, false);
+ std::shared_ptr stereoQueue;
+ if(enableDepth) {
+ stereoQueue = device.getOutputQueue("depth", 30, false);
+ } else {
+ stereoQueue = device.getOutputQueue("disparity", 30, false);
+ }
+
+ auto calibrationHandler = device.readCalibration();
+
+ auto boardName = calibrationHandler.getEepromData().boardName;
+ if(monoHeight > 480 && boardName == "OAK-D-LITE") {
+ monoWidth = 640;
+ monoHeight = 480;
+ }
+
+ dai::rosBridge::ImageConverter converter(tfPrefix + "_left_camera_optical_frame", true);
+ auto leftCameraInfo = converter.calibrationToCameraInfo(calibrationHandler, dai::CameraBoardSocket::CAM_B, monoWidth, monoHeight);
+ dai::rosBridge::BridgePublisher leftPublish(
+ leftQueue,
+ node,
+ std::string("left/image_rect"),
+ std::bind(&dai::rosBridge::ImageConverter::toRosMsg, &converter, std::placeholders::_1, std::placeholders::_2),
+ 30,
+ leftCameraInfo,
+ "left");
+
+ leftPublish.addPublisherCallback();
+
+ dai::rosBridge::ImageConverter rightconverter(tfPrefix + "_right_camera_optical_frame", true);
+ auto rightCameraInfo = converter.calibrationToCameraInfo(calibrationHandler, dai::CameraBoardSocket::CAM_C, monoWidth, monoHeight);
+
+ dai::rosBridge::BridgePublisher rightPublish(
+ rightQueue,
+ node,
+ std::string("right/image_rect"),
+ std::bind(&dai::rosBridge::ImageConverter::toRosMsg, &rightconverter, std::placeholders::_1, std::placeholders::_2),
+ 30,
+ rightCameraInfo,
+ "right");
+
+ rightPublish.addPublisherCallback();
+
+ if(mode == "depth") {
+ dai::rosBridge::BridgePublisher depthPublish(
+ stereoQueue,
+ node,
+ std::string("stereo/depth"),
+ std::bind(&dai::rosBridge::ImageConverter::toRosMsg,
+ &rightconverter, // since the converter has the same frame name
+ // and image type is also same we can reuse it
+ std::placeholders::_1,
+ std::placeholders::_2),
+ 30,
+ rightCameraInfo,
+ "stereo");
+ depthPublish.addPublisherCallback();
+ rclcpp::spin(node);
+ } else {
+ dai::rosBridge::DisparityConverter dispConverter(tfPrefix + "_right_camera_optical_frame", 880, 7.5, 20, 2000);
+ dai::rosBridge::BridgePublisher dispPublish(
+ stereoQueue,
+ node,
+ std::string("stereo/disparity"),
+ std::bind(&dai::rosBridge::DisparityConverter::toRosMsg, &dispConverter, std::placeholders::_1, std::placeholders::_2),
+ 30,
+ rightCameraInfo,
+ "stereo");
+ dispPublish.addPublisherCallback();
+ rclcpp::spin(node);
+ }
+ return 0;
+}
diff --git a/sensors/lidar/lidar_filter/CMakeLists.txt b/sensors/lidar/lidar_filter/CMakeLists.txt
new file mode 100644
index 0000000..dd56019
--- /dev/null
+++ b/sensors/lidar/lidar_filter/CMakeLists.txt
@@ -0,0 +1,84 @@
+cmake_minimum_required(VERSION 3.5)
+project(lidar_filter)
+
+# Default to C99
+if(NOT CMAKE_C_STANDARD)
+ set(CMAKE_C_STANDARD 99)
+endif()
+
+# Default to C++14
+if(NOT CMAKE_CXX_STANDARD)
+ set(CMAKE_CXX_STANDARD 14)
+endif()
+
+if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ add_compile_options(-Wall -Wextra -Wpedantic)
+endif()
+
+# find dependencies
+find_package(rclcpp REQUIRED)
+find_package(std_msgs REQUIRED)
+find_package(sensor_msgs REQUIRED)
+find_package(pcl_conversions REQUIRED)
+find_package(PCL REQUIRED)
+# find_package(stereo_msgs REQUIRED)
+
+# find_package(std_msgs REQUIRED)
+
+set(dependencies
+ rclcpp
+ std_msgs
+ sensor_msgs
+ pcl_conversions
+ PCL
+ # OpenCV
+)
+
+include_directories(
+ include ${ament_INCLUDE_DIRS}
+ include ${PCL_INCLUDE_DIRS}
+ # ${OpenCV_INCLUDE_DIRS}
+ # ${cv_bridge_INCLUDE_DIRS}
+)
+
+link_directories(
+ ${PCL_LIBRARY_DIRS}
+)
+
+add_definitions(${PCL_DEFINITIONS})
+
+if(BUILD_TESTING)
+ find_package(ament_lint_auto REQUIRED)
+ # the following line skips the linter which checks for copyrights
+ # uncomment the line when a copyright and license is not present in all source files
+ #set(ament_cmake_copyright_FOUND TRUE)
+ # the following line skips cpplint (only works in a git repo)
+ # uncomment the line when this package is not in a git repo
+ #set(ament_cmake_cpplint_FOUND TRUE)
+ ament_lint_auto_find_test_dependencies()
+endif()
+
+install(DIRECTORY launch/ DESTINATION share/${PROJECT_NAME}/launch
+FILES_MATCHING PATTERN "*.py")
+install(DIRECTORY launch/ DESTINATION share/${PROJECT_NAME}/launch
+FILES_MATCHING PATTERN "*.xml")
+
+
+# add_executable(front_camera_filter src/front_camera_filter.cpp)
+# ament_target_dependencies(front_camera_filter rclcpp sensor_msgs pcl_conversions PCL cv_bridge OpenCV)
+
+add_executable(NAV_lidar_filter_node src/lidar_filter.cpp)
+ament_target_dependencies(NAV_lidar_filter_node rclcpp std_msgs sensor_msgs PCL pcl_conversions)
+
+target_link_libraries(NAV_lidar_filter_node
+ ${ament_LIBRARIES}
+ ${PCL_LIBRARIES}
+)
+
+install(TARGETS
+ NAV_lidar_filter_node
+ DESTINATION lib/${PROJECT_NAME})
+
+# ament_export_include_directories(include)
+
+ament_package()
diff --git a/sensors/lidar/lidar_filter/launch/lidar_filter.launch.xml b/sensors/lidar/lidar_filter/launch/lidar_filter.launch.xml
new file mode 100644
index 0000000..33cf743
--- /dev/null
+++ b/sensors/lidar/lidar_filter/launch/lidar_filter.launch.xml
@@ -0,0 +1,13 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/sensors/lidar/lidar_filter/package.xml b/sensors/lidar/lidar_filter/package.xml
new file mode 100644
index 0000000..ca51a03
--- /dev/null
+++ b/sensors/lidar/lidar_filter/package.xml
@@ -0,0 +1,23 @@
+
+
+
+ lidar_filter
+ 0.0.0
+ Lidar filter node
+ anoca
+ JAJ
+
+ ament_cmake
+
+ ament_lint_auto
+ ament_lint_common
+
+
+ ament_cmake
+ sensor_msgs
+ std_msgs
+ rclcpp
+ pcl_conversions
+ PCL
+
+
diff --git a/sensors/lidar/lidar_filter/src/lidar_filter.cpp b/sensors/lidar/lidar_filter/src/lidar_filter.cpp
new file mode 100644
index 0000000..21f251e
--- /dev/null
+++ b/sensors/lidar/lidar_filter/src/lidar_filter.cpp
@@ -0,0 +1,177 @@
+// ROS2 files for the lidar_filter node
+// using pcl library
+#include "rclcpp/rclcpp.hpp"
+#include "pcl_conversions/pcl_conversions.h"
+#include "pcl/point_cloud.h"
+#include "sensor_msgs/msg/point_cloud2.hpp"
+#include "std_msgs/msg/string.hpp"
+#include
+#include
+
+#include
+#include
+
+rmw_qos_profile_t profile = {
+ RMW_QOS_POLICY_HISTORY_KEEP_LAST,
+ 5,
+ RMW_QOS_POLICY_RELIABILITY_BEST_EFFORT,
+ RMW_QOS_POLICY_DURABILITY_VOLATILE,
+ RMW_QOS_DEADLINE_DEFAULT,
+ RMW_QOS_LIFESPAN_DEFAULT,
+ RMW_QOS_POLICY_LIVELINESS_AUTOMATIC,
+ RMW_QOS_LIVELINESS_LEASE_DURATION_DEFAULT,
+ false
+};
+
+auto qos = rclcpp::QoS(
+ rclcpp::QoSInitialization(
+ profile.history,
+ profile.depth
+ ),
+ profile);
+
+class LidarFilterNode : public rclcpp::Node
+{
+ private:
+
+ double min_dist; // minimum distance of points to keep (in meters)
+ double slope_threshold; // slope threshold for the floor (in percentage)
+ bool filter_bool; // whether to filter or not
+
+ protected:
+
+ // Publisher
+ rclcpp::Publisher::SharedPtr pub_;
+ rclcpp::Publisher::SharedPtr pub_floor;
+
+ // Subscriber
+ rclcpp::Subscription::SharedPtr sub_;
+ rclcpp::Subscription::SharedPtr destroy_sub_;
+
+ public:
+ LidarFilterNode() : Node("NAV_lidar_filter_node")
+ {
+ RCLCPP_INFO(this->get_logger(), "Lidar Filter Node Started");
+
+ this->declare_parameter("min_distance", 0.9);
+ this->declare_parameter("slope_threshold", 0.7); //100% is 45 degrees to horizontal
+ this->declare_parameter("filter", true);
+
+ min_dist = this->get_parameter("min_distance").as_double();
+ slope_threshold = this->get_parameter("slope_threshold").as_double();
+ filter_bool = this->get_parameter("filter").as_bool();
+
+ // Create a publisher
+ pub_ = this->create_publisher("/lidar_filter/filtered_points", qos);
+ pub_floor = this->create_publisher("/lidar_filter/floor_points", qos);
+
+ // // Set up QoS settings for the subscriber
+ // rclcpp::QoS qos(rclcpp::KeepLast(5)); // Keep the last 10 messages
+ // qos.reliability(RMW_QOS_POLICY_RELIABILITY_BEST_EFFORT); // Best-effort reliability
+
+ // Create subscribers lidar points and NAV_status (to destroy node)
+ sub_ = this->create_subscription("/ouster/points", qos, std::bind(&LidarFilterNode::callback, this, std::placeholders::_1));
+ destroy_sub_ = this->create_subscription("ROVER/NAV_status", qos, std::bind(&LidarFilterNode::destroy_callback, this, std::placeholders::_1));
+ }
+
+ void callback(const sensor_msgs::msg::PointCloud2::SharedPtr msg)
+ {
+
+ filter_bool = this->get_parameter("filter").as_bool();
+
+
+ if (!filter_bool) {
+ RCLCPP_INFO(this->get_logger(), "Not filtering lidar.");
+ pub_floor->publish(*msg);
+ return;
+ }
+
+ RCLCPP_INFO(this->get_logger(), "Filtering lidar.");
+
+ min_dist = this->get_parameter("min_distance").as_double();
+ slope_threshold = this->get_parameter("slope_threshold").as_double();
+
+ // Convert to pcl point cloud
+ pcl::PointCloud::Ptr cloud(new pcl::PointCloud);
+ pcl::fromROSMsg(*msg, *cloud); // source on the left, target on the right
+
+ pcl::PointCloud::Ptr filtered_points(new pcl::PointCloud());
+ pcl::PointCloud::Ptr obstacle_points(new pcl::PointCloud());
+ pcl::PointCloud::Ptr floor_points(new pcl::PointCloud());
+
+ // voxel grid filter to downsample
+ pcl::VoxelGrid filter;
+ filter.setInputCloud(cloud);
+ filter.setLeafSize(0.05f, 0.05f, 0.05f);
+ filter.filter(*filtered_points); // output dataset is the filtered_points
+
+ // compute kmeans and compute normals
+ pcl::NormalEstimationOMP normal_estimator;
+ normal_estimator.setInputCloud(filtered_points);
+ normal_estimator.setKSearch(50); // need to set the ksearch parameter, i.e., the number of clusters
+ normal_estimator.useSensorOriginAsViewPoint();
+
+ pcl::search::KdTree::Ptr tree(new pcl::search::KdTree());
+ normal_estimator.setSearchMethod(tree);
+
+ pcl::PointCloud::Ptr points_normal(new pcl::PointCloud());
+ normal_estimator.compute(*points_normal);
+
+ // iterate through the points and normals
+ auto normal_iterator = points_normal->begin();
+
+ for(auto point : *filtered_points) {
+ pcl::Normal normal = *normal_iterator++;
+
+ float normal_h = sqrt(normal.normal_x*normal.normal_x + normal.normal_y*normal.normal_y); //
+ float slope_percentage = abs(normal_h / normal.normal_z);
+
+ if(point.x*point.x + point.y*point.y > min_dist*min_dist) {
+ if(slope_percentage > slope_threshold) {
+ obstacle_points->push_back(point);
+ } else {
+ floor_points->push_back(point);
+ }
+ }
+ }
+
+ // Filter
+ // pcl::PointCloud::Ptr cloud_filtered(new pcl::PointCloud);
+ // pcl::PassThrough pass;
+ // pass.setInputCloud(cloud);
+ // pass.setFilterFieldName("z");
+ // pass.setFilterLimits(0.0, 1.0);
+ // pass.filter(*cloud_filtered);
+
+ sensor_msgs::msg::PointCloud2::SharedPtr output(new sensor_msgs::msg::PointCloud2);
+
+ // Obstacles
+ pcl::toROSMsg(*obstacle_points, *output);
+ output->header.frame_id = msg->header.frame_id;
+ pub_->publish(*output);
+
+ // Floor
+ pcl::toROSMsg(*floor_points, *output);
+ output->header.frame_id = msg->header.frame_id;
+ pub_floor->publish(*output);
+
+ // Publish
+ // sensor_msgs::msg::PointCloud2::SharedPtr output(new sensor_msgs::msg::PointCloud2);
+ // pcl::toROSMsg(*cloud_filtered, *output);
+ // output->header.frame_id = "base_link";
+ // pub_->publish(*output);
+ }
+
+ void destroy_callback(const std_msgs::msg::String::SharedPtr msg)
+ {
+ if(msg->data == "abort") rclcpp::shutdown();
+ }
+
+};
+
+int main(int argc, char** argv) {
+ rclcpp::init(argc, argv);
+ rclcpp::spin(std::make_shared());
+ rclcpp::shutdown();
+ return 0;
+}