From 1a274edf44c169d9a9effdc745743d5e84bd9957 Mon Sep 17 00:00:00 2001 From: Alison Ryckman Date: Tue, 26 Sep 2023 17:44:48 -0400 Subject: [PATCH 001/126] add starter files for second cam tag detection node --- .../tag_detector/long_range_tag_detector.cpp | 156 ++++++++++++++++++ .../tag_detector/long_range_tag_detector.hpp | 45 +++++ .../long_range_tag_detector.processing.cpp | 21 +++ 3 files changed, 222 insertions(+) create mode 100644 src/perception/tag_detector/long_range_tag_detector.cpp create mode 100644 src/perception/tag_detector/long_range_tag_detector.hpp create mode 100644 src/perception/tag_detector/long_range_tag_detector.processing.cpp diff --git a/src/perception/tag_detector/long_range_tag_detector.cpp b/src/perception/tag_detector/long_range_tag_detector.cpp new file mode 100644 index 000000000..86ab376b4 --- /dev/null +++ b/src/perception/tag_detector/long_range_tag_detector.cpp @@ -0,0 +1,156 @@ +#include "tag_detector.hpp" + +namespace mrover { + + void TagDetectorNodelet::onInit() { + mNh = getMTNodeHandle(); + mPnh = getMTPrivateNodeHandle(); + mDetectorParams = new cv::aruco::DetectorParameters(); + auto defaultDetectorParams = cv::aruco::DetectorParameters::create(); + int dictionaryNumber; + + mPnh.param("publish_images", mPublishImages, true); + using DictEnumType = std::underlying_type_t; + mPnh.param("dictionary", dictionaryNumber, static_cast(cv::aruco::DICT_4X4_50)); + mPnh.param("min_hit_count_before_publish", mMinHitCountBeforePublish, 5); + mPnh.param("max_hit_count", mMaxHitCount, 5); + mPnh.param("tag_increment_weight", mTagIncrementWeight, 2); + mPnh.param("tag_decrement_weight", mTagDecrementWeight, 1); + + mIt.emplace(mNh); + mImgPub = mIt->advertise("long_range_tag_detection", 1); + mDictionary = cv::aruco::getPredefinedDictionary(dictionaryNumber); + + mPcSub = mNh.subscribe("zoom_cam/image", 1, &LongRangeTagDetectorNodelet::imageCallback, this); + mServiceEnableDetections = mNh.advertiseService("enable_detections", &TagDetectorNodelet::enableDetectionsCallback, this); + + // Lambda handles passing class pointer (implicit first parameter) to configCallback + mCallbackType = [this](mrover::DetectorParamsConfig& config, uint32_t level) { configCallback(config, level); }; + mConfigServer.setCallback(mCallbackType); + + mPnh.param("adaptiveThreshConstant", + mDetectorParams->adaptiveThreshConstant, defaultDetectorParams->adaptiveThreshConstant); + mPnh.param("adaptiveThreshWinSizeMax", + mDetectorParams->adaptiveThreshWinSizeMax, defaultDetectorParams->adaptiveThreshWinSizeMax); + mPnh.param("adaptiveThreshWinSizeMin", + mDetectorParams->adaptiveThreshWinSizeMin, defaultDetectorParams->adaptiveThreshWinSizeMin); + mPnh.param("adaptiveThreshWinSizeStep", + mDetectorParams->adaptiveThreshWinSizeStep, defaultDetectorParams->adaptiveThreshWinSizeStep); + mPnh.param("cornerRefinementMaxIterations", + mDetectorParams->cornerRefinementMaxIterations, + defaultDetectorParams->cornerRefinementMaxIterations); + mPnh.param("cornerRefinementMinAccuracy", + mDetectorParams->cornerRefinementMinAccuracy, + defaultDetectorParams->cornerRefinementMinAccuracy); + mPnh.param("cornerRefinementWinSize", + mDetectorParams->cornerRefinementWinSize, defaultDetectorParams->cornerRefinementWinSize); + + bool doCornerRefinement = true; + mPnh.param("doCornerRefinement", doCornerRefinement, true); + if (doCornerRefinement) { + bool cornerRefinementSubPix = true; + mPnh.param("cornerRefinementSubPix", cornerRefinementSubPix, true); + if (cornerRefinementSubPix) { + mDetectorParams->cornerRefinementMethod = cv::aruco::CORNER_REFINE_SUBPIX; + } else { + mDetectorParams->cornerRefinementMethod = cv::aruco::CORNER_REFINE_CONTOUR; + } + } else { + mDetectorParams->cornerRefinementMethod = cv::aruco::CORNER_REFINE_NONE; + } + + mPnh.param("errorCorrectionRate", + mDetectorParams->errorCorrectionRate, defaultDetectorParams->errorCorrectionRate); + mPnh.param("minCornerDistanceRate", + mDetectorParams->minCornerDistanceRate, defaultDetectorParams->minCornerDistanceRate); + mPnh.param("markerBorderBits", + mDetectorParams->markerBorderBits, defaultDetectorParams->markerBorderBits); + mPnh.param("maxErroneousBitsInBorderRate", + mDetectorParams->maxErroneousBitsInBorderRate, + defaultDetectorParams->maxErroneousBitsInBorderRate); + mPnh.param("minDistanceToBorder", + mDetectorParams->minDistanceToBorder, defaultDetectorParams->minDistanceToBorder); + mPnh.param("minMarkerDistanceRate", + mDetectorParams->minMarkerDistanceRate, defaultDetectorParams->minMarkerDistanceRate); + mPnh.param("minMarkerPerimeterRate", + mDetectorParams->minMarkerPerimeterRate, defaultDetectorParams->minMarkerPerimeterRate); + mPnh.param("maxMarkerPerimeterRate", + mDetectorParams->maxMarkerPerimeterRate, defaultDetectorParams->maxMarkerPerimeterRate); + mPnh.param("minOtsuStdDev", mDetectorParams->minOtsuStdDev, defaultDetectorParams->minOtsuStdDev); + mPnh.param("perspectiveRemoveIgnoredMarginPerCell", + mDetectorParams->perspectiveRemoveIgnoredMarginPerCell, + defaultDetectorParams->perspectiveRemoveIgnoredMarginPerCell); + mPnh.param("perspectiveRemovePixelPerCell", + mDetectorParams->perspectiveRemovePixelPerCell, + defaultDetectorParams->perspectiveRemovePixelPerCell); + mPnh.param("polygonalApproxAccuracyRate", + mDetectorParams->polygonalApproxAccuracyRate, + defaultDetectorParams->polygonalApproxAccuracyRate); + + NODELET_INFO("Tag detection ready, use odom frame: %s, min hit count: %d, max hit count: %d, hit increment weight: %d, hit decrement weight: %d", mUseOdom ? "true" : "false", mMinHitCountBeforePublish, mMaxHitCount, mTagIncrementWeight, mTagDecrementWeight); + } + + void TagDetectorNodelet::configCallback(mrover::DetectorParamsConfig& config, uint32_t level) { + // Don't load initial config, since it will overwrite the rosparam settings + if (level == std::numeric_limits::max()) return; + + mDetectorParams->adaptiveThreshConstant = config.adaptiveThreshConstant; + mDetectorParams->adaptiveThreshWinSizeMin = config.adaptiveThreshWinSizeMin; + mDetectorParams->adaptiveThreshWinSizeMax = config.adaptiveThreshWinSizeMax; + mDetectorParams->adaptiveThreshWinSizeStep = config.adaptiveThreshWinSizeStep; + mDetectorParams->cornerRefinementMaxIterations = config.cornerRefinementMaxIterations; + mDetectorParams->cornerRefinementMinAccuracy = config.cornerRefinementMinAccuracy; + mDetectorParams->cornerRefinementWinSize = config.cornerRefinementWinSize; + if (config.doCornerRefinement) { + if (config.cornerRefinementSubpix) { + mDetectorParams->cornerRefinementMethod = cv::aruco::CORNER_REFINE_SUBPIX; + } else { + mDetectorParams->cornerRefinementMethod = cv::aruco::CORNER_REFINE_CONTOUR; + } + } else { + mDetectorParams->cornerRefinementMethod = cv::aruco::CORNER_REFINE_NONE; + } + mDetectorParams->errorCorrectionRate = config.errorCorrectionRate; + mDetectorParams->minCornerDistanceRate = config.minCornerDistanceRate; + mDetectorParams->markerBorderBits = config.markerBorderBits; + mDetectorParams->maxErroneousBitsInBorderRate = config.maxErroneousBitsInBorderRate; + mDetectorParams->minDistanceToBorder = config.minDistanceToBorder; + mDetectorParams->minMarkerDistanceRate = config.minMarkerDistanceRate; + mDetectorParams->minMarkerPerimeterRate = config.minMarkerPerimeterRate; + mDetectorParams->maxMarkerPerimeterRate = config.maxMarkerPerimeterRate; + mDetectorParams->minOtsuStdDev = config.minOtsuStdDev; + mDetectorParams->perspectiveRemoveIgnoredMarginPerCell = config.perspectiveRemoveIgnoredMarginPerCell; + mDetectorParams->perspectiveRemovePixelPerCell = config.perspectiveRemovePixelPerCell; + mDetectorParams->polygonalApproxAccuracyRate = config.polygonalApproxAccuracyRate; + } + + bool TagDetectorNodelet::enableDetectionsCallback(std_srvs::SetBool::Request& req, std_srvs::SetBool::Response& res) { + mEnableDetections = req.data; + if (mEnableDetections) { + res.message = "Enabled tag detections."; + NODELET_INFO("Enabled tag detections."); + } else { + res.message = "Disabled tag detections."; + NODELET_INFO("Disabled tag detections."); + } + + res.success = true; + return true; + } + +} // namespace mrover + +int main(int argc, char** argv) { + ros::init(argc, argv, "tag_detector"); + + // Start the ZED Nodelet + nodelet::Loader nodelet; + nodelet.load(ros::this_node::getName(), "mrover/TagDetectorNodelet", ros::names::getRemappings(), {}); + + ros::spin(); + + return EXIT_SUCCESS; +} + +#include +PLUGINLIB_EXPORT_CLASS(mrover::TagDetectorNodelet, nodelet::Nodelet) diff --git a/src/perception/tag_detector/long_range_tag_detector.hpp b/src/perception/tag_detector/long_range_tag_detector.hpp new file mode 100644 index 000000000..bfeb2706e --- /dev/null +++ b/src/perception/tag_detector/long_range_tag_detector.hpp @@ -0,0 +1,45 @@ +namespace mrover { + + class LongRangeTagDetectorNodelet : public nodelet::Nodelet { + ros::NodeHandle mNh, mPnh; + + std::optional mIt; + image_transport::Publisher mImgPub; + std::unordered_map mThreshPubs; // Map from threshold scale to publisher + ros::ServiceServer mServiceEnableDetections; + + image_transport::Subscriber mImgSub; + + bool mEnableDetections = true; + bool mUseOdom{}; + std::string mOdomFrameId, mMapFrameId, mCameraFrameId; + bool mPublishImages{}; // If set, we publish the images with the tags drawn on top + int mMinHitCountBeforePublish{}; + int mMaxHitCount{}; + int mTagIncrementWeight{}; + int mTagDecrementWeight{}; + + cv::Ptr mDetectorParams; + cv::Ptr mDictionary; + + cv::Mat mImg; + cv::Mat mGrayImg; + sensor_msgs::Image mImgMsg; + sensor_msgs::Image mThreshMsg; + uint32_t mSeqNum{}; + std::optional mPrevDetectedCount; // Log spam prevention + std::vector> mImmediateCorners; + std::vector mImmediateIds; + std::unordered_map mTags; + dynamic_reconfigure::Server mConfigServer; + dynamic_reconfigure::Server::CallbackType mCallbackType; + + LoopProfiler mProfiler{"Tag Detector"}; + + void onInit() override; + + void publishThresholdedImage(); + + std::optional getTagInCamFromPixel(sensor_msgs::PointCloud2ConstPtr const& cloudPtr, size_t u, size_t v); + } +} \ No newline at end of file diff --git a/src/perception/tag_detector/long_range_tag_detector.processing.cpp b/src/perception/tag_detector/long_range_tag_detector.processing.cpp new file mode 100644 index 000000000..14b60ef2c --- /dev/null +++ b/src/perception/tag_detector/long_range_tag_detector.processing.cpp @@ -0,0 +1,21 @@ +#include "long_range_tag_detector.hpp" + +#include "../point.hpp" + +namespace mrover { + + /** + * Detects tags in an image, draws the detected markers onto the image, and publishes them to /long_range_tag_detection + * + * @param msg image message + */ + void TagDetectorNodelet::imageCallback(sensor_msgs::Image const& msg) { + // 1. Detect tags + // 2. Update the hit counts of the tags in the mTags map + // 3. We only want to publish the tags if the topic has subscribers + if (mPublishImages && mImgPub.getNumSubscribers()) { + // Draw the tags on the image using OpenCV + } + } + +} // namespace mrover \ No newline at end of file From ce56c5326ed880cde673c7947e74d320b4c85e55 Mon Sep 17 00:00:00 2001 From: Marshall Vielmetti Date: Tue, 26 Sep 2023 16:08:58 -0400 Subject: [PATCH 002/126] Wrote detection and hit updating 4 long range tags --- .../tag_detector/long_range_tag_detector.hpp | 68 ++++++++++++++- .../long_range_tag_detector.processing.cpp | 85 ++++++++++++++++++- 2 files changed, 148 insertions(+), 5 deletions(-) diff --git a/src/perception/tag_detector/long_range_tag_detector.hpp b/src/perception/tag_detector/long_range_tag_detector.hpp index bfeb2706e..643f11801 100644 --- a/src/perception/tag_detector/long_range_tag_detector.hpp +++ b/src/perception/tag_detector/long_range_tag_detector.hpp @@ -1,5 +1,13 @@ +#include namespace mrover { + typedef struct LongRangeTagType { + bool updated = false; + int id = -1; + int hitCount = 0; + cv::Point2f imageCenter{}; + } LongRangeTag; + class LongRangeTagDetectorNodelet : public nodelet::Nodelet { ros::NodeHandle mNh, mPnh; @@ -16,21 +24,33 @@ namespace mrover { bool mPublishImages{}; // If set, we publish the images with the tags drawn on top int mMinHitCountBeforePublish{}; int mMaxHitCount{}; + int mBaseHitCount{}; //Value the tag is initialized with after first detection. (Not incremented) int mTagIncrementWeight{}; int mTagDecrementWeight{}; + int mTagRemoveWeight{}; //weight value before the tag is removed + + //ARUCO TAG DETECTION VARIABLES cv::Ptr mDetectorParams; cv::Ptr mDictionary; + //IMAGE MESSAGE VARIABLES + cv::Mat mImg; cv::Mat mGrayImg; sensor_msgs::Image mImgMsg; sensor_msgs::Image mThreshMsg; + + uint32_t mSeqNum{}; std::optional mPrevDetectedCount; // Log spam prevention + + //Raw Tag Data from CV::ARUCO std::vector> mImmediateCorners; std::vector mImmediateIds; - std::unordered_map mTags; + + //Map from tagID to Tag + std::unordered_map mTags; dynamic_reconfigure::Server mConfigServer; dynamic_reconfigure::Server::CallbackType mCallbackType; @@ -40,6 +60,46 @@ namespace mrover { void publishThresholdedImage(); - std::optional getTagInCamFromPixel(sensor_msgs::PointCloud2ConstPtr const& cloudPtr, size_t u, size_t v); - } -} \ No newline at end of file + std::optional getTagInCamFromPixel(sensor_msgs::PointCloud2ConstPtr const& cloudPtr, size_t u, size_t v); + + + /** + * Detects tags in an image, draws the detected markers onto the image, and publishes them to /long_range_tag_detection + * 1. Updates mImg to store the underlying image matrix + * @param msg image message + */ + void imageCallback(sensor_msgs::Image const& msg); + + /** + * Stores msg to mImgMsg + * Creates image matrix from message, stores to mImg + * + * @param msg Current Image Message + */ + void updateImageMatrices(sensor_msgs::Image const& msg); + + /** + * Runs the cv::aruco tag detection on the cv::mat + * Modifies mImmedateCorners and mImmediateIds + * Uses the mDetectorParams and mDictionary member variables + */ + void runTagDetection(); + + /** + * For all the tags stored in mImmediateIds and mImmediateCorners: + * Check to see if they already exist in mTags + * If they do, increment their + */ + void updateHitCounts(); + + LongRangeTag createLrt(int tagId, std::vector& tagCorners); + + /** + * Assumes that mImg exists + * @return a cv::Point2f that contains the centerx and centery + * centerx and centery are -0.5, 0.5, and reflect a normalized offset from the image center + * Average all x values + */ + cv::Point2f getImageCenter(std::vector& tagCorners); + }; +}; // namespace mrover \ No newline at end of file diff --git a/src/perception/tag_detector/long_range_tag_detector.processing.cpp b/src/perception/tag_detector/long_range_tag_detector.processing.cpp index 14b60ef2c..cf1b5d232 100644 --- a/src/perception/tag_detector/long_range_tag_detector.processing.cpp +++ b/src/perception/tag_detector/long_range_tag_detector.processing.cpp @@ -1,6 +1,7 @@ #include "long_range_tag_detector.hpp" #include "../point.hpp" +#include namespace mrover { @@ -9,13 +10,95 @@ namespace mrover { * * @param msg image message */ - void TagDetectorNodelet::imageCallback(sensor_msgs::Image const& msg) { + void LongRangeTagDetectorNodelet::imageCallback(sensor_msgs::Image const& msg) { + if (!mEnableDetections) return; + + //Store image contents to member variables + updateImageMatrices(msg); + // 1. Detect tags + runTagDetection(); + // 2. Update the hit counts of the tags in the mTags map + updateHitCounts(); + // 3. We only want to publish the tags if the topic has subscribers if (mPublishImages && mImgPub.getNumSubscribers()) { // Draw the tags on the image using OpenCV + // LongRangeTagDetectorNodelet::publishDrawnImages() } + + //PUBLISH TAGS + } + + //HELPER FUNCTIONS + + void LongRangeTagDetectorNodelet::updateImageMatrices(sensor_msgs::Image const& msg) { + //Store image message to mImgMsg member variable + mImgMsg = msg; + + cv::Mat cvImage{static_cast(msg.height), static_cast(msg.width), CV_8UC3, const_cast(msg.data.data())}; + + //Store to mImg member variable - unsure if necessary but need it to persist + cvImage.copyTo(mImg); + + // TODO: Set the grayImage if neccesary + } + + void LongRangeTagDetectorNodelet::runTagDetection() { + cv::aruco::detectMarkers(mImg, mDictionary, mImmediateCorners, mImmediateIds, mDetectorParams); + } + + void LongRangeTagDetectorNodelet::updateHitCounts() { + //loop through all identified IDs + for (size_t i = 0; i < mImmediateIds.size(); i++) { + int currentId = mImmediateIds[i]; + + //Create new struct for each tag + LongRangeTag lrt = createLrt(currentId, mImmediateCorners[i]); + + //Check if the tag was already detected and update hitCount to reflect + if (mTags.contains(currentId)) { + //Key exist sin mTags + lrt.hitCount = std::min(mTags[currentId].hitCount + mTagIncrementWeight, mMaxHitCount); + } + + mTags[currentId] = lrt; + } + + //Now decrement all the hitcounts for tags that were not updated + // Set updated status to false + + for (auto& mTag: mTags) { + LongRangeTag& curtag = mTag.second; + + if (curtag.updated) { + curtag.updated = false; + } else { + //Decrement weight of undetected tags + curtag.hitCount -= mTagDecrementWeight; + + //if the value has fallen belown the minimum, remove it + if (curtag.hitCount <= mTagRemoveWeight) { + mTags.erase(mTag.first); + } + } + } + + + //decrement non updated & set updated status to false + } + + LongRangeTag LongRangeTagDetectorNodelet::createLrt(int tagId, std::vector& tagCorners) { + LongRangeTag lrt; + + lrt.hitCount = mBaseHitCount; //Start at base hit count value + lrt.id = tagId; + lrt.updated = true; + + lrt.imageCenter = getImageCenter(tagCorners); + + return lrt; } } // namespace mrover \ No newline at end of file From bba2188d571e13e6ec18c9f338ecc75e907dd218 Mon Sep 17 00:00:00 2001 From: Marshall Vielmetti Date: Tue, 26 Sep 2023 17:19:12 -0400 Subject: [PATCH 003/126] added normed center offset functionality --- .../tag_detector/long_range_tag_detector.hpp | 29 +++++++- .../long_range_tag_detector.processing.cpp | 73 +++++++++++++++---- 2 files changed, 87 insertions(+), 15 deletions(-) diff --git a/src/perception/tag_detector/long_range_tag_detector.hpp b/src/perception/tag_detector/long_range_tag_detector.hpp index 643f11801..42c3a8aee 100644 --- a/src/perception/tag_detector/long_range_tag_detector.hpp +++ b/src/perception/tag_detector/long_range_tag_detector.hpp @@ -92,14 +92,41 @@ namespace mrover { */ void updateHitCounts(); + /** + * @see updateNewlyIdentifiedTags() + * @param tagId - the tagId of the current tag + * @param tagCorners - Reference to the mTagCorners vector of Point2fs for the current id of the lrt being created + * @return a new LongRangeTag + */ LongRangeTag createLrt(int tagId, std::vector& tagCorners); /** + * @see getNormedTagCenter * Assumes that mImg exists * @return a cv::Point2f that contains the centerx and centery * centerx and centery are -0.5, 0.5, and reflect a normalized offset from the image center * Average all x values */ - cv::Point2f getImageCenter(std::vector& tagCorners); + cv::Point2f static getTagCenterPixels(std::vector& tagCorners); + + /** + * Assumes upper left corner is 0, 0 + * Want to return a negative pixel offset if width less than image_width / 2 + * similarily, negative if height > image_height / 2 + * @param tagCorners reference to tag corners, passed to @see getTagCenterPixesl + * @return Point2f of pixel offset from center + */ + cv::Point2f getTagCenterOffsetPixels(std::vector& tagCorners) const; + + cv::Point2f getNormedTagCenterOffset(std::vector& tagCorners) const; + + /** + * @see updateHitCounts() + * Helper function used in updateHitCounts + * Creates a new LongRangeTag for the identified tags and handles + * logic of adding it to the map + * @param tagIndex the index i of the target tag in the mImmediate vectors + */ + void updateNewlyIdentifiedTags(size_t tagIndex); }; }; // namespace mrover \ No newline at end of file diff --git a/src/perception/tag_detector/long_range_tag_detector.processing.cpp b/src/perception/tag_detector/long_range_tag_detector.processing.cpp index cf1b5d232..ae57007f7 100644 --- a/src/perception/tag_detector/long_range_tag_detector.processing.cpp +++ b/src/perception/tag_detector/long_range_tag_detector.processing.cpp @@ -1,6 +1,7 @@ #include "long_range_tag_detector.hpp" #include "../point.hpp" +#include #include namespace mrover { @@ -49,22 +50,12 @@ namespace mrover { cv::aruco::detectMarkers(mImg, mDictionary, mImmediateCorners, mImmediateIds, mDetectorParams); } + void LongRangeTagDetectorNodelet::updateHitCounts() { //loop through all identified IDs - for (size_t i = 0; i < mImmediateIds.size(); i++) { - int currentId = mImmediateIds[i]; - - //Create new struct for each tag - LongRangeTag lrt = createLrt(currentId, mImmediateCorners[i]); + for (size_t i = 0; i < mImmediateIds.size(); i++) + updateNewlyIdentifiedTags(i); - //Check if the tag was already detected and update hitCount to reflect - if (mTags.contains(currentId)) { - //Key exist sin mTags - lrt.hitCount = std::min(mTags[currentId].hitCount + mTagIncrementWeight, mMaxHitCount); - } - - mTags[currentId] = lrt; - } //Now decrement all the hitcounts for tags that were not updated // Set updated status to false @@ -96,9 +87,63 @@ namespace mrover { lrt.id = tagId; lrt.updated = true; - lrt.imageCenter = getImageCenter(tagCorners); + lrt.imageCenter = getNormedTagCenterOffset(tagCorners); return lrt; } + void LongRangeTagDetectorNodelet::updateNewlyIdentifiedTags(size_t tagIndex) { + int currentId = mImmediateIds[tagIndex]; + + //Create new struct for each tag + LongRangeTag lrt = createLrt(currentId, mImmediateCorners[tagIndex]); + + //Check if the tag was already detected and update hitCount to reflect + if (mTags.contains(currentId)) { + //Key exist sin mTags + lrt.hitCount = std::min(mTags[currentId].hitCount + mTagIncrementWeight, mMaxHitCount); + } + + mTags[currentId] = lrt; + } + + cv::Point2f LongRangeTagDetectorNodelet::getTagCenterPixels(std::vector& tagCorners) { + cv::Point2f centerPoint; + float centerX = 0; + float centerY = 0; + + for (size_t i = 0; i < 4; i++) { + centerX += tagCorners[i].x; + centerY += tagCorners[i].y; + } + + centerX /= 4.0; + centerY /= 4.0; + + centerPoint.x = centerX; + centerPoint.y = centerY; + + return centerPoint; + } + + cv::Point2f LongRangeTagDetectorNodelet::getTagCenterOffsetPixels(std::vector& tagCorners) const { + cv::Point2f centerPoint = getTagCenterPixels(tagCorners); + + centerPoint.x -= float(mImgMsg.width); + + //-1 is necessary because of 0,0 being in the top left + centerPoint.y = float(-1.0) * (centerPoint.y - float(mImgMsg.height)); + + return centerPoint; + } + + cv::Point2f LongRangeTagDetectorNodelet::getNormedTagCenterOffset(std::vector& tagCorners) const { + cv::Point2f offsetCenterPoint = getTagCenterOffsetPixels(tagCorners); + + offsetCenterPoint.x /= float(mImgMsg.width); + offsetCenterPoint.y /= float(mImgMsg.height); + + return offsetCenterPoint; + } + } // namespace mrover \ No newline at end of file From 1ce57d1aedf5267f11995eab0211ff49df948efa Mon Sep 17 00:00:00 2001 From: Marshall Vielmetti Date: Tue, 26 Sep 2023 20:07:37 -0400 Subject: [PATCH 004/126] updated Perception README.md --- src/perception/tag_detector/README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/perception/tag_detector/README.md b/src/perception/tag_detector/README.md index f8a24788b..a80de7d8f 100644 --- a/src/perception/tag_detector/README.md +++ b/src/perception/tag_detector/README.md @@ -2,5 +2,12 @@ ### Code Layout + +## Short Range Detection - [tag_detector.cpp](./tag_detector.cpp) Mainly ROS node setup (topics, parameters, etc.) - [tag_detector.processing.cpp](./tag_detector.processing.cpp) Processes inputs (camera feed and pointcloud) to estimate locations of the ArUco fiducials + + +## Long Range Detection +- [long_range_tag_detector.cpp](./long_range_tag_detector.cpp) Mainly ROS node setup (topics, parameters, etc.) +- [long_range_tag_detector.processing.cpp](./long_range_tag_detector.processing.cpp) Does all the input processing (ArUco recognition) and publishes the information. From f14d6fbc6e6f4e66f1ad660426de8da2a5b4c29a Mon Sep 17 00:00:00 2001 From: Marshall Vielmetti Date: Tue, 26 Sep 2023 20:13:28 -0400 Subject: [PATCH 005/126] Created message types --- msg/LongRangeTag.msg | 7 +++++++ msg/LongRangeTags.msg | 1 + 2 files changed, 8 insertions(+) create mode 100644 msg/LongRangeTag.msg create mode 100644 msg/LongRangeTags.msg diff --git a/msg/LongRangeTag.msg b/msg/LongRangeTag.msg new file mode 100644 index 000000000..6a8ea2cb2 --- /dev/null +++ b/msg/LongRangeTag.msg @@ -0,0 +1,7 @@ +# Message published by the LongRangeTagDetector nodelet. +# -0.5 <= xOffset, yOffset <= 0.5 +# Normed to image size, and have signs reflecting typical cartesian coordinates +# Where (0, 0) is at the image center +uint8 id +float32 xOffset +float32 yOffset \ No newline at end of file diff --git a/msg/LongRangeTags.msg b/msg/LongRangeTags.msg new file mode 100644 index 000000000..2f5c82dff --- /dev/null +++ b/msg/LongRangeTags.msg @@ -0,0 +1 @@ +LongRangeTags[] longRangeTags \ No newline at end of file From 1ee5d9fed4e1dd5ac00c5d7617b82469870b7814 Mon Sep 17 00:00:00 2001 From: Marshall Vielmetti Date: Tue, 26 Sep 2023 20:36:52 -0400 Subject: [PATCH 006/126] fixed tags issue and added to CmakeLists --- CMakeLists.txt | 2 ++ msg/LongRangeTags.msg | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3943d9f44..1ba21c323 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -49,6 +49,8 @@ set(MROVER_MESSAGE_FILES Waypoint.msg WaypointType.msg GPSPointList.msg + LongRangeTag.msg + LongRangeTags.msg ) # Service files diff --git a/msg/LongRangeTags.msg b/msg/LongRangeTags.msg index 2f5c82dff..4a48ffb9b 100644 --- a/msg/LongRangeTags.msg +++ b/msg/LongRangeTags.msg @@ -1 +1 @@ -LongRangeTags[] longRangeTags \ No newline at end of file +LongRangeTag[] longRangeTags \ No newline at end of file From de3fd1ebf1e324d10f352e5d6b0d4c372a74ee53 Mon Sep 17 00:00:00 2001 From: Marshall Vielmetti Date: Tue, 26 Sep 2023 20:43:35 -0400 Subject: [PATCH 007/126] added tag publishing logic --- .../tag_detector/long_range_tag_detector.hpp | 68 ++++++++++++------- .../long_range_tag_detector.processing.cpp | 40 +++++++++-- 2 files changed, 77 insertions(+), 31 deletions(-) diff --git a/src/perception/tag_detector/long_range_tag_detector.hpp b/src/perception/tag_detector/long_range_tag_detector.hpp index 42c3a8aee..c600e25a6 100644 --- a/src/perception/tag_detector/long_range_tag_detector.hpp +++ b/src/perception/tag_detector/long_range_tag_detector.hpp @@ -1,4 +1,9 @@ +#include "mrover/LongRangeTag.h" +#include "mrover/LongRangeTags.h" + #include +#include + namespace mrover { typedef struct LongRangeTagType { @@ -6,22 +11,23 @@ namespace mrover { int id = -1; int hitCount = 0; cv::Point2f imageCenter{}; - } LongRangeTag; + } LongRangeTagStruct; class LongRangeTagDetectorNodelet : public nodelet::Nodelet { ros::NodeHandle mNh, mPnh; - std::optional mIt; + //Image Subscriber + image_transport::Subscriber mImgSub; image_transport::Publisher mImgPub; - std::unordered_map mThreshPubs; // Map from threshold scale to publisher - ros::ServiceServer mServiceEnableDetections; - image_transport::Subscriber mImgSub; + //Publishes LongRangeTags messages + ros::Publisher mLongRangeTagsPub; + //Publishing Flags bool mEnableDetections = true; - bool mUseOdom{}; - std::string mOdomFrameId, mMapFrameId, mCameraFrameId; bool mPublishImages{}; // If set, we publish the images with the tags drawn on top + + //Constatns set in ROS params for num hits needed to publish int mMinHitCountBeforePublish{}; int mMaxHitCount{}; int mBaseHitCount{}; //Value the tag is initialized with after first detection. (Not incremented) @@ -34,35 +40,34 @@ namespace mrover { cv::Ptr mDetectorParams; cv::Ptr mDictionary; - //IMAGE MESSAGE VARIABLES - + //IMAGE VARIABLES cv::Mat mImg; - cv::Mat mGrayImg; sensor_msgs::Image mImgMsg; - sensor_msgs::Image mThreshMsg; - - - uint32_t mSeqNum{}; - std::optional mPrevDetectedCount; // Log spam prevention //Raw Tag Data from CV::ARUCO std::vector> mImmediateCorners; std::vector mImmediateIds; //Map from tagID to Tag - std::unordered_map mTags; + std::unordered_map mTags; + + //TODO + //All these variables are unused by long_range_tag_detector.processing.cpp + //A bunch of things I think I should be able to delete + //But I am not sure about + uint32_t mSeqNum{}; + std::optional mPrevDetectedCount; // Log spam prevention dynamic_reconfigure::Server mConfigServer; dynamic_reconfigure::Server::CallbackType mCallbackType; - - LoopProfiler mProfiler{"Tag Detector"}; + LoopProfiler mProfiler{"Long RangeTag Detector"}; + std::optional mIt; + std::unordered_map mThreshPubs; // Map from threshold scale to publisher + ros::ServiceServer mServiceEnableDetections; + bool mUseOdom{}; + std::string mOdomFrameId, mMapFrameId, mCameraFrameId; void onInit() override; - void publishThresholdedImage(); - - std::optional getTagInCamFromPixel(sensor_msgs::PointCloud2ConstPtr const& cloudPtr, size_t u, size_t v); - - /** * Detects tags in an image, draws the detected markers onto the image, and publishes them to /long_range_tag_detection * 1. Updates mImg to store the underlying image matrix @@ -98,7 +103,7 @@ namespace mrover { * @param tagCorners - Reference to the mTagCorners vector of Point2fs for the current id of the lrt being created * @return a new LongRangeTag */ - LongRangeTag createLrt(int tagId, std::vector& tagCorners); + LongRangeTagStruct createLrt(int tagId, std::vector& tagCorners); /** * @see getNormedTagCenter @@ -128,5 +133,18 @@ namespace mrover { * @param tagIndex the index i of the target tag in the mImmediate vectors */ void updateNewlyIdentifiedTags(size_t tagIndex); + + + /** + * Publish the tags which have been detected for more than + * mMinHitCountBeforePublish + */ + void publishThresholdTags(); + + /** + * publishes the thresholded tags onto an image using OpenCV + * only if mPublishImages and the topic has a subscriber + */ + void publishTagsOnImage(); }; -}; // namespace mrover \ No newline at end of file +}; // namespace mrover diff --git a/src/perception/tag_detector/long_range_tag_detector.processing.cpp b/src/perception/tag_detector/long_range_tag_detector.processing.cpp index ae57007f7..993ecd13d 100644 --- a/src/perception/tag_detector/long_range_tag_detector.processing.cpp +++ b/src/perception/tag_detector/long_range_tag_detector.processing.cpp @@ -1,6 +1,8 @@ #include "long_range_tag_detector.hpp" -#include "../point.hpp" +#include "../point.hpp" //Might not actually need?' +#include "mrover/LongRangeTags.h" + #include #include @@ -26,9 +28,11 @@ namespace mrover { // 3. We only want to publish the tags if the topic has subscribers if (mPublishImages && mImgPub.getNumSubscribers()) { // Draw the tags on the image using OpenCV - // LongRangeTagDetectorNodelet::publishDrawnImages() + publishTagsOnImage(); } + publishThresholdTags(); + //PUBLISH TAGS } @@ -61,7 +65,7 @@ namespace mrover { // Set updated status to false for (auto& mTag: mTags) { - LongRangeTag& curtag = mTag.second; + LongRangeTagStruct& curtag = mTag.second; if (curtag.updated) { curtag.updated = false; @@ -80,8 +84,8 @@ namespace mrover { //decrement non updated & set updated status to false } - LongRangeTag LongRangeTagDetectorNodelet::createLrt(int tagId, std::vector& tagCorners) { - LongRangeTag lrt; + LongRangeTagStruct LongRangeTagDetectorNodelet::createLrt(int tagId, std::vector& tagCorners) { + LongRangeTagStruct lrt; lrt.hitCount = mBaseHitCount; //Start at base hit count value lrt.id = tagId; @@ -96,7 +100,7 @@ namespace mrover { int currentId = mImmediateIds[tagIndex]; //Create new struct for each tag - LongRangeTag lrt = createLrt(currentId, mImmediateCorners[tagIndex]); + LongRangeTagStruct lrt = createLrt(currentId, mImmediateCorners[tagIndex]); //Check if the tag was already detected and update hitCount to reflect if (mTags.contains(currentId)) { @@ -146,4 +150,28 @@ namespace mrover { return offsetCenterPoint; } + void LongRangeTagDetectorNodelet::publishThresholdTags() { + //Loop through all the tags + LongRangeTags tagsMessage; // + + for (auto& tag: mTags) { + if (tag.second.hitCount >= mMinHitCountBeforePublish) { + //LongRangeTag message + LongRangeTag newTagMessage; + + //Fill in fields + newTagMessage.id = tag.second.id; + newTagMessage.xOffset = tag.second.imageCenter.x; + newTagMessage.yOffset = tag.second.imageCenter.y; + + //Add to back of tagsMessage + tagsMessage.longRangeTags.push_back(newTagMessage); + } + } + + //tagsMessage should be a vector of LongRangeTag messages + //Need something like an mTagsPublisher + mLongRangeTagsPub.publish(tagsMessage); + } + } // namespace mrover \ No newline at end of file From 1dc1f1d0dc05e8b309a6d67f01bb0e3bef8b42d4 Mon Sep 17 00:00:00 2001 From: Marshall Vielmetti Date: Tue, 26 Sep 2023 20:44:49 -0400 Subject: [PATCH 008/126] replaced C style casting with static casts --- .../tag_detector/long_range_tag_detector.processing.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/perception/tag_detector/long_range_tag_detector.processing.cpp b/src/perception/tag_detector/long_range_tag_detector.processing.cpp index 993ecd13d..76b8fe7bc 100644 --- a/src/perception/tag_detector/long_range_tag_detector.processing.cpp +++ b/src/perception/tag_detector/long_range_tag_detector.processing.cpp @@ -133,10 +133,10 @@ namespace mrover { cv::Point2f LongRangeTagDetectorNodelet::getTagCenterOffsetPixels(std::vector& tagCorners) const { cv::Point2f centerPoint = getTagCenterPixels(tagCorners); - centerPoint.x -= float(mImgMsg.width); + centerPoint.x -= static_cast(mImgMsg.width); //-1 is necessary because of 0,0 being in the top left - centerPoint.y = float(-1.0) * (centerPoint.y - float(mImgMsg.height)); + centerPoint.y = static_cast(-1.0) * (centerPoint.y - static_cast(mImgMsg.height)); return centerPoint; } @@ -144,8 +144,8 @@ namespace mrover { cv::Point2f LongRangeTagDetectorNodelet::getNormedTagCenterOffset(std::vector& tagCorners) const { cv::Point2f offsetCenterPoint = getTagCenterOffsetPixels(tagCorners); - offsetCenterPoint.x /= float(mImgMsg.width); - offsetCenterPoint.y /= float(mImgMsg.height); + offsetCenterPoint.x /= static_cast(mImgMsg.width); + offsetCenterPoint.y /= static_cast(mImgMsg.height); return offsetCenterPoint; } From 977e2526eccb1e10e3d641fa6829da304954dd37 Mon Sep 17 00:00:00 2001 From: Marshall Vielmetti Date: Tue, 26 Sep 2023 20:48:43 -0400 Subject: [PATCH 009/126] added marked image publishing --- .../long_range_tag_detector.processing.cpp | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/src/perception/tag_detector/long_range_tag_detector.processing.cpp b/src/perception/tag_detector/long_range_tag_detector.processing.cpp index 76b8fe7bc..1120f5bc0 100644 --- a/src/perception/tag_detector/long_range_tag_detector.processing.cpp +++ b/src/perception/tag_detector/long_range_tag_detector.processing.cpp @@ -3,8 +3,12 @@ #include "../point.hpp" //Might not actually need?' #include "mrover/LongRangeTags.h" +#include +#include #include +#include #include +#include namespace mrover { @@ -31,6 +35,7 @@ namespace mrover { publishTagsOnImage(); } + //Publish all tags that meet threshold publishThresholdTags(); //PUBLISH TAGS @@ -174,4 +179,16 @@ namespace mrover { mLongRangeTagsPub.publish(tagsMessage); } + void LongRangeTagDetectorNodelet::publishTagsOnImage() { + cv::Mat markedImage; + mImg.copyTo(markedImage); + + cv::aruco::drawDetectedMarkers(markedImage, mImmediateCorners); + + sensor_msgs::Image imgMsgOut = mImgMsg; + imgMsgOut.data = markedImage; + + mImgPub.publish(imgMsgOut); + } + } // namespace mrover \ No newline at end of file From 35c9bf2571542e9e93b8ca46c0536ba2775054e8 Mon Sep 17 00:00:00 2001 From: Marshall Vielmetti Date: Sat, 30 Sep 2023 11:01:03 -0400 Subject: [PATCH 010/126] Removed abbreviation Removed typedef --- .../tag_detector/long_range_tag_detector.hpp | 4 ++-- .../long_range_tag_detector.processing.cpp | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/perception/tag_detector/long_range_tag_detector.hpp b/src/perception/tag_detector/long_range_tag_detector.hpp index c600e25a6..b5dddfa06 100644 --- a/src/perception/tag_detector/long_range_tag_detector.hpp +++ b/src/perception/tag_detector/long_range_tag_detector.hpp @@ -6,12 +6,12 @@ namespace mrover { - typedef struct LongRangeTagType { + struct LongRangeTagStruct { bool updated = false; int id = -1; int hitCount = 0; cv::Point2f imageCenter{}; - } LongRangeTagStruct; + }; class LongRangeTagDetectorNodelet : public nodelet::Nodelet { ros::NodeHandle mNh, mPnh; diff --git a/src/perception/tag_detector/long_range_tag_detector.processing.cpp b/src/perception/tag_detector/long_range_tag_detector.processing.cpp index 1120f5bc0..63975f713 100644 --- a/src/perception/tag_detector/long_range_tag_detector.processing.cpp +++ b/src/perception/tag_detector/long_range_tag_detector.processing.cpp @@ -70,16 +70,16 @@ namespace mrover { // Set updated status to false for (auto& mTag: mTags) { - LongRangeTagStruct& curtag = mTag.second; + LongRangeTagStruct& currentTag = mTag.second; - if (curtag.updated) { - curtag.updated = false; + if (currentTag.updated) { + currentTag.updated = false; } else { //Decrement weight of undetected tags - curtag.hitCount -= mTagDecrementWeight; + currentTag.hitCount -= mTagDecrementWeight; //if the value has fallen belown the minimum, remove it - if (curtag.hitCount <= mTagRemoveWeight) { + if (currentTag.hitCount <= mTagRemoveWeight) { mTags.erase(mTag.first); } } From 51c664baf2dc4d0a377e66ad1235c95fd9a1fe73 Mon Sep 17 00:00:00 2001 From: Alison Ryckman Date: Sun, 1 Oct 2023 12:33:10 -0400 Subject: [PATCH 011/126] add wrapper node --- .../long_range_cam/long_range_cam.cpp | 167 ++++++++++++++++++ .../long_range_cam/long_range_cam.hpp | 60 +++++++ .../tag_detector/long_range_tag_detector.cpp | 14 +- .../tag_detector/long_range_tag_detector.hpp | 100 +---------- 4 files changed, 242 insertions(+), 99 deletions(-) create mode 100644 src/perception/long_range_cam/long_range_cam.cpp create mode 100644 src/perception/long_range_cam/long_range_cam.hpp diff --git a/src/perception/long_range_cam/long_range_cam.cpp b/src/perception/long_range_cam/long_range_cam.cpp new file mode 100644 index 000000000..1a13c5cb1 --- /dev/null +++ b/src/perception/long_range_cam/long_range_cam.cpp @@ -0,0 +1,167 @@ +#include "long_range_cam.hpp" + +namespace mrover { + + using namespace std::chrono_literals; + + /** + * @brief Load config, open the camera, and start our threads + */ + void LongRangeCamNodelet::onInit() { + try { + // See: http://wiki.ros.org/roscpp/Overview/NodeHandles for public vs. private node handle + // MT means multithreaded + mNh = getMTNodeHandle(); + mPnh = getMTPrivateNodeHandle(); + mCamInfoPub = mNh.advertise("camera/right/camera_info", 1); + image_transport::ImageTransport it{mNh}; + mRightImgPub = it.advertise("long_range_cam/image", 1); + + int imageWidth{}; + int imageHeight{}; + // TODO: edit these so they correspond to the long range cam's resolution + mPnh.param("image_width", imageWidth, 1280); + mPnh.param("image_height", imageHeight, 720); + + if (imageWidth < 0 || imageHeight < 0) { + throw std::invalid_argument("Invalid image dimensions"); + } + if (mGrabTargetFps < 0) { + throw std::invalid_argument("Invalid grab target framerate"); + } + + mImageResolution = sl::Resolution(imageWidth, imageHeight); + mPointResolution = sl::Resolution(imageWidth, imageHeight); + + NODELET_INFO("Resolution: %s image: %zux%zu points: %zux%zu", + grabResolutionString.c_str(), mImageResolution.width, mImageResolution.height); + + sl::InitParameters initParameters; + if (mSvoPath) { + initParameters.input.setFromSVOFile(mSvoPath); + } else { + initParameters.input.setFromCameraID(-1, sl::BUS_TYPE::USB); + } + initParameters.depth_stabilization = mUseDepthStabilization; + initParameters.camera_resolution = stringToZedEnum(grabResolutionString); + initParameters.depth_mode = stringToZedEnum(depthModeString); + initParameters.coordinate_units = sl::UNIT::METER; + initParameters.sdk_verbose = true; // Log useful information + initParameters.camera_fps = mGrabTargetFps; + initParameters.coordinate_system = sl::COORDINATE_SYSTEM::RIGHT_HANDED_Z_UP_X_FWD; // Match ROS + initParameters.depth_maximum_distance = mDepthMaximumDistance; + + if (mZed.open(initParameters) != sl::ERROR_CODE::SUCCESS) { + throw std::runtime_error("ZED failed to open"); + } + + cudaDeviceProp prop{}; + cudaGetDeviceProperties(&prop, 0); + ROS_INFO("MP count: %d, Max threads/MP: %d, Max blocks/MP: %d, max threads/block: %d", + prop.multiProcessorCount, prop.maxThreadsPerMultiProcessor, prop.maxBlocksPerMultiProcessor, prop.maxThreadsPerBlock); + + mGrabThread = std::thread(&ZedNodelet::grabUpdate, this); + + } catch (std::exception const& e) { + NODELET_FATAL("Exception while starting: %s", e.what()); + ros::shutdown(); + } + } + + /** + * @brief Grabs measures from the ZED. + * + * This update loop needs to happen as fast as possible. + * grab() on the ZED updates positional tracking (visual odometry) which works best at high update rates. + * As such we retrieve the image and point cloud on the GPU to send to the other thread for processing. + * This only happens if the other thread is ready to avoid blocking, hence the try lock. + */ + void ZedNodelet::grabUpdate() { + try { + NODELET_INFO("Starting grab thread"); + while (ros::ok()) { + mGrabThreadProfiler.beginLoop(); + + sl::RuntimeParameters runtimeParameters; + runtimeParameters.confidence_threshold = mDepthConfidence; + runtimeParameters.texture_confidence_threshold = mTextureConfidence; + + if (mZed.grab(runtimeParameters) != sl::ERROR_CODE::SUCCESS) + throw std::runtime_error("ZED failed to grab"); + mGrabThreadProfiler.measureEvent("Grab"); + + // Retrieval has to happen on the same thread as grab so that the image and point cloud are synced + if (mRightImgPub.getNumSubscribers()) + if (mZed.retrieveImage(mGrabMeasures.rightImage, sl::VIEW::RIGHT, sl::MEM::GPU, mImageResolution) != sl::ERROR_CODE::SUCCESS) + throw std::runtime_error("ZED failed to retrieve right image"); + // Only left set is used for processing + if (mZed.retrieveImage(mGrabMeasures.leftImage, sl::VIEW::LEFT, sl::MEM::GPU, mImageResolution) != sl::ERROR_CODE::SUCCESS) + throw std::runtime_error("ZED failed to retrieve left image"); + if (mZed.retrieveMeasure(mGrabMeasures.leftPoints, sl::MEASURE::XYZ, sl::MEM::GPU, mPointResolution) != sl::ERROR_CODE::SUCCESS) + throw std::runtime_error("ZED failed to retrieve point cloud"); + + assert(mGrabMeasures.leftImage.timestamp == mGrabMeasures.leftPoints.timestamp); + + + mGrabMeasures.time = mSvoPath ? ros::Time::now() : slTime2Ros(mZed.getTimestamp(sl::TIME_REFERENCE::IMAGE)); + mGrabThreadProfiler.measureEvent("Retrieve"); + + // If the processing thread is busy skip + // We want this thread to run as fast as possible for grab and positional tracking + if (mSwapMutex.try_lock()) { + std::swap(mGrabMeasures, mPcMeasures); + mIsSwapReady = true; + mSwapMutex.unlock(); + mSwapCv.notify_one(); + } + mGrabThreadProfiler.measureEvent("Try swap"); + + mGrabUpdateTick++; + } + + mZed.close(); + NODELET_INFO("Grab thread finished"); + + } catch (std::exception const& e) { + NODELET_FATAL("Exception while running grab thread: %s", e.what()); + mZed.close(); + ros::shutdown(); + std::exit(EXIT_FAILURE); + } + } + + LongRangeCamNodelet::~LongRangeCamNodelet() { + NODELET_INFO("Long range cam node shutting down"); + mPointCloudThread.join(); + mGrabThread.join(); + } + + ZedNodelet::Measures::Measures(ZedNodelet::Measures&& other) noexcept { + *this = std::move(other); + } + + ZedNodelet::Measures& ZedNodelet::Measures::operator=(ZedNodelet::Measures&& other) noexcept { + sl::Mat::swap(other.leftImage, leftImage); + sl::Mat::swap(other.rightImage, rightImage); + sl::Mat::swap(other.leftPoints, leftPoints); + std::swap(time, other.time); + return *this; + } +} // namespace mrover + +int main(int argc, char** argv) { + ros::init(argc, argv, "zed_wrapper"); + + // Start the ZED Nodelet + nodelet::Loader nodelet; + nodelet.load(ros::this_node::getName(), "mrover/ZedNodelet", ros::names::getRemappings(), {}); + + ros::spin(); + + return EXIT_SUCCESS; +} + +#ifdef MROVER_IS_NODELET +#include +PLUGINLIB_EXPORT_CLASS(mrover::ZedNodelet, nodelet::Nodelet) +#endif diff --git a/src/perception/long_range_cam/long_range_cam.hpp b/src/perception/long_range_cam/long_range_cam.hpp new file mode 100644 index 000000000..e53e140f4 --- /dev/null +++ b/src/perception/long_range_cam/long_range_cam.hpp @@ -0,0 +1,60 @@ +#pragma once + +namespace mrover { + + class LongRangeCamNodelet : public nodelet::Nodelet { + + private: + explicit LongRangeCamNodelet; + virtual ~LongRangeCamNodelet; + LongRangeCamNodelet + ros::NodeHandle mNh, mPnh; + + ros::Publisher mCamInfoPub; + image_transport::Publisher mCamImgPub; + + PointCloudGpu mPointCloudGpu; + + sl::Resolution mImageResolution; + sl::String mSvoPath; + int mGrabTargetFps{}; + int mDepthConfidence{}; + int mTextureConfidence{}; + bool mUseBuiltinPosTracking{}; + bool mUseAreaMemory{}; + bool mUsePoseSmoothing{}; + bool mUseLoopProfiler{}; + bool mUseDepthStabilization{}; + float mDepthMaximumDistance{}; + + Measures mGrabMeasures, mPcMeasures; + + std::thread mGrabThread; + std::mutex mSwapMutex; + std::condition_variable mSwapCv; + std::atomic_bool mIsSwapReady = false; + + LoopProfiler mGrabThreadProfiler{"Long Range Cam Grab"}; + + size_t mGrabUpdateTick = 0, mPointCloudUpdateTick = 0; + + void onInit() override; + + public: + LongRangeCamNodelet() = default; + + ~LongRangeCamNodelet() override; + + void grabUpdate(); + }; + + ros::Time slTime2Ros(sl::Timestamp t); + + void fillCameraInfoMessages(sl::CalibrationParameters& calibration, sl::Resolution const& resolution, + sensor_msgs::CameraInfoPtr const& leftInfoMsg, sensor_msgs::CameraInfoPtr const& rightInfoMsg); + + void fillImageMessage(sl::Mat& bgra, sensor_msgs::ImagePtr const& msg); + + void checkCudaError(cudaError_t error); + +} // namespace mrover diff --git a/src/perception/tag_detector/long_range_tag_detector.cpp b/src/perception/tag_detector/long_range_tag_detector.cpp index 86ab376b4..cec4a48b6 100644 --- a/src/perception/tag_detector/long_range_tag_detector.cpp +++ b/src/perception/tag_detector/long_range_tag_detector.cpp @@ -2,7 +2,7 @@ namespace mrover { - void TagDetectorNodelet::onInit() { + void LongRangeTagDetectorNodelet::onInit() { mNh = getMTNodeHandle(); mPnh = getMTPrivateNodeHandle(); mDetectorParams = new cv::aruco::DetectorParameters(); @@ -21,7 +21,7 @@ namespace mrover { mImgPub = mIt->advertise("long_range_tag_detection", 1); mDictionary = cv::aruco::getPredefinedDictionary(dictionaryNumber); - mPcSub = mNh.subscribe("zoom_cam/image", 1, &LongRangeTagDetectorNodelet::imageCallback, this); + mPcSub = mNh.subscribe("long_range_cam/image", 1, &LongRangeTagDetectorNodelet::imageCallback, this); mServiceEnableDetections = mNh.advertiseService("enable_detections", &TagDetectorNodelet::enableDetectionsCallback, this); // Lambda handles passing class pointer (implicit first parameter) to configCallback @@ -124,7 +124,7 @@ namespace mrover { mDetectorParams->polygonalApproxAccuracyRate = config.polygonalApproxAccuracyRate; } - bool TagDetectorNodelet::enableDetectionsCallback(std_srvs::SetBool::Request& req, std_srvs::SetBool::Response& res) { + bool LongRangeTagDetectorNodelet::enableDetectionsCallback(std_srvs::SetBool::Request& req, std_srvs::SetBool::Response& res) { mEnableDetections = req.data; if (mEnableDetections) { res.message = "Enabled tag detections."; @@ -141,11 +141,11 @@ namespace mrover { } // namespace mrover int main(int argc, char** argv) { - ros::init(argc, argv, "tag_detector"); + ros::init(argc, argv, "long_range_tag_detector"); - // Start the ZED Nodelet + // Start the long range cam Nodelet nodelet::Loader nodelet; - nodelet.load(ros::this_node::getName(), "mrover/TagDetectorNodelet", ros::names::getRemappings(), {}); + nodelet.load(ros::this_node::getName(), "mrover/LongRangeTagDetectorNodelet", ros::names::getRemappings(), {}); ros::spin(); @@ -153,4 +153,4 @@ int main(int argc, char** argv) { } #include -PLUGINLIB_EXPORT_CLASS(mrover::TagDetectorNodelet, nodelet::Nodelet) +PLUGINLIB_EXPORT_CLASS(mrover::LongRangeTagDetectorNodelet, nodelet::Nodelet) diff --git a/src/perception/tag_detector/long_range_tag_detector.hpp b/src/perception/tag_detector/long_range_tag_detector.hpp index 42c3a8aee..1805c155e 100644 --- a/src/perception/tag_detector/long_range_tag_detector.hpp +++ b/src/perception/tag_detector/long_range_tag_detector.hpp @@ -1,13 +1,5 @@ -#include namespace mrover { - typedef struct LongRangeTagType { - bool updated = false; - int id = -1; - int hitCount = 0; - cv::Point2f imageCenter{}; - } LongRangeTag; - class LongRangeTagDetectorNodelet : public nodelet::Nodelet { ros::NodeHandle mNh, mPnh; @@ -17,6 +9,9 @@ namespace mrover { ros::ServiceServer mServiceEnableDetections; image_transport::Subscriber mImgSub; + tf2_ros::Buffer mTfBuffer; + tf2_ros::TransformListener mTfListener{mTfBuffer}; + tf2_ros::TransformBroadcaster mTfBroadcaster; bool mEnableDetections = true; bool mUseOdom{}; @@ -24,109 +19,30 @@ namespace mrover { bool mPublishImages{}; // If set, we publish the images with the tags drawn on top int mMinHitCountBeforePublish{}; int mMaxHitCount{}; - int mBaseHitCount{}; //Value the tag is initialized with after first detection. (Not incremented) int mTagIncrementWeight{}; int mTagDecrementWeight{}; - int mTagRemoveWeight{}; //weight value before the tag is removed - - //ARUCO TAG DETECTION VARIABLES cv::Ptr mDetectorParams; cv::Ptr mDictionary; - //IMAGE MESSAGE VARIABLES - cv::Mat mImg; cv::Mat mGrayImg; sensor_msgs::Image mImgMsg; sensor_msgs::Image mThreshMsg; - - uint32_t mSeqNum{}; std::optional mPrevDetectedCount; // Log spam prevention - - //Raw Tag Data from CV::ARUCO std::vector> mImmediateCorners; std::vector mImmediateIds; - - //Map from tagID to Tag - std::unordered_map mTags; + std::unordered_map mTags; dynamic_reconfigure::Server mConfigServer; dynamic_reconfigure::Server::CallbackType mCallbackType; - LoopProfiler mProfiler{"Tag Detector"}; + LoopProfiler mProfiler{"Long Range Tag Detector"}; void onInit() override; void publishThresholdedImage(); - std::optional getTagInCamFromPixel(sensor_msgs::PointCloud2ConstPtr const& cloudPtr, size_t u, size_t v); - - - /** - * Detects tags in an image, draws the detected markers onto the image, and publishes them to /long_range_tag_detection - * 1. Updates mImg to store the underlying image matrix - * @param msg image message - */ - void imageCallback(sensor_msgs::Image const& msg); - - /** - * Stores msg to mImgMsg - * Creates image matrix from message, stores to mImg - * - * @param msg Current Image Message - */ - void updateImageMatrices(sensor_msgs::Image const& msg); - - /** - * Runs the cv::aruco tag detection on the cv::mat - * Modifies mImmedateCorners and mImmediateIds - * Uses the mDetectorParams and mDictionary member variables - */ - void runTagDetection(); - - /** - * For all the tags stored in mImmediateIds and mImmediateCorners: - * Check to see if they already exist in mTags - * If they do, increment their - */ - void updateHitCounts(); - - /** - * @see updateNewlyIdentifiedTags() - * @param tagId - the tagId of the current tag - * @param tagCorners - Reference to the mTagCorners vector of Point2fs for the current id of the lrt being created - * @return a new LongRangeTag - */ - LongRangeTag createLrt(int tagId, std::vector& tagCorners); - - /** - * @see getNormedTagCenter - * Assumes that mImg exists - * @return a cv::Point2f that contains the centerx and centery - * centerx and centery are -0.5, 0.5, and reflect a normalized offset from the image center - * Average all x values - */ - cv::Point2f static getTagCenterPixels(std::vector& tagCorners); - - /** - * Assumes upper left corner is 0, 0 - * Want to return a negative pixel offset if width less than image_width / 2 - * similarily, negative if height > image_height / 2 - * @param tagCorners reference to tag corners, passed to @see getTagCenterPixesl - * @return Point2f of pixel offset from center - */ - cv::Point2f getTagCenterOffsetPixels(std::vector& tagCorners) const; - - cv::Point2f getNormedTagCenterOffset(std::vector& tagCorners) const; - - /** - * @see updateHitCounts() - * Helper function used in updateHitCounts - * Creates a new LongRangeTag for the identified tags and handles - * logic of adding it to the map - * @param tagIndex the index i of the target tag in the mImmediate vectors - */ - void updateNewlyIdentifiedTags(size_t tagIndex); - }; -}; // namespace mrover \ No newline at end of file + std::optional getTagInCamFromPixel(sensor_msgs::PointCloud2ConstPtr const& cloudPtr, size_t u, size_t v); + } +} \ No newline at end of file From f05e396b86a70f2730b71d97440b14c887e4d64f Mon Sep 17 00:00:00 2001 From: ajaysum Date: Sun, 1 Oct 2023 10:09:32 -0700 Subject: [PATCH 012/126] x --- src/perception/long_range_cam/long_range_cam.hpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/perception/long_range_cam/long_range_cam.hpp b/src/perception/long_range_cam/long_range_cam.hpp index e53e140f4..4ded2cf4a 100644 --- a/src/perception/long_range_cam/long_range_cam.hpp +++ b/src/perception/long_range_cam/long_range_cam.hpp @@ -26,7 +26,10 @@ namespace mrover { bool mUseLoopProfiler{}; bool mUseDepthStabilization{}; float mDepthMaximumDistance{}; + + + Measures mGrabMeasures, mPcMeasures; std::thread mGrabThread; From 7629bcee86892a2c2aa6841f5b0be1738a948431 Mon Sep 17 00:00:00 2001 From: Marshall Vielmetti Date: Tue, 3 Oct 2023 16:00:09 -0400 Subject: [PATCH 013/126] created long_range_cam pch --- src/perception/long_range_cam/pch.hpp | 31 +++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 src/perception/long_range_cam/pch.hpp diff --git a/src/perception/long_range_cam/pch.hpp b/src/perception/long_range_cam/pch.hpp new file mode 100644 index 000000000..b9dcda34c --- /dev/null +++ b/src/perception/long_range_cam/pch.hpp @@ -0,0 +1,31 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include From d5223775e954376df126f7e7e0946f9a79318ed2 Mon Sep 17 00:00:00 2001 From: Alison Ryckman Date: Tue, 3 Oct 2023 18:00:47 -0400 Subject: [PATCH 014/126] move tag detectors into their own folders and added long range cam videoCapture --- .../long_range_cam/long_range_cam.cpp | 159 +++++++++--------- .../long_range_cam/long_range_cam.hpp | 47 +++--- .../long_range_tag_detector.cpp | 0 .../long_range_tag_detector.hpp | 0 .../long_range_tag_detector.processing.cpp | 0 .../tag_detector/long_range/pch.hpp | 31 ++++ src/perception/tag_detector/{ => zed}/pch.hpp | 0 .../tag_detector/{ => zed}/tag_detector.cpp | 0 .../tag_detector/{ => zed}/tag_detector.hpp | 0 .../{ => zed}/tag_detector.processing.cpp | 0 10 files changed, 127 insertions(+), 110 deletions(-) rename src/perception/tag_detector/{ => long_range}/long_range_tag_detector.cpp (100%) rename src/perception/tag_detector/{ => long_range}/long_range_tag_detector.hpp (100%) rename src/perception/tag_detector/{ => long_range}/long_range_tag_detector.processing.cpp (100%) create mode 100644 src/perception/tag_detector/long_range/pch.hpp rename src/perception/tag_detector/{ => zed}/pch.hpp (100%) rename src/perception/tag_detector/{ => zed}/tag_detector.cpp (100%) rename src/perception/tag_detector/{ => zed}/tag_detector.hpp (100%) rename src/perception/tag_detector/{ => zed}/tag_detector.processing.cpp (100%) diff --git a/src/perception/long_range_cam/long_range_cam.cpp b/src/perception/long_range_cam/long_range_cam.cpp index 1a13c5cb1..bd2dce381 100644 --- a/src/perception/long_range_cam/long_range_cam.cpp +++ b/src/perception/long_range_cam/long_range_cam.cpp @@ -13,58 +13,73 @@ namespace mrover { // MT means multithreaded mNh = getMTNodeHandle(); mPnh = getMTPrivateNodeHandle(); - mCamInfoPub = mNh.advertise("camera/right/camera_info", 1); + mCamInfoPub = mNh.advertise("camera/camera_info", 1); image_transport::ImageTransport it{mNh}; - mRightImgPub = it.advertise("long_range_cam/image", 1); - - int imageWidth{}; - int imageHeight{}; - // TODO: edit these so they correspond to the long range cam's resolution - mPnh.param("image_width", imageWidth, 1280); - mPnh.param("image_height", imageHeight, 720); - - if (imageWidth < 0 || imageHeight < 0) { - throw std::invalid_argument("Invalid image dimensions"); + mCamImgPub = it.advertise("long_range_cam/image", 1); + + // TODO: Make these camera open values ROS params/not hardcoded + int deviceID = 0; + int apiID = cv::CAP_ANY; + mCapture.open(deviceID, apiID); + if (!mCapture.isOpened()) { + throw std::runtime_error("Long range cam failed to open"); } - if (mGrabTargetFps < 0) { - throw std::invalid_argument("Invalid grab target framerate"); - } - - mImageResolution = sl::Resolution(imageWidth, imageHeight); - mPointResolution = sl::Resolution(imageWidth, imageHeight); + + mReadThread = std::thread(&LongRangeCamNodelet::readUpdate, this); + mPubThread = std::thread(&LongRangeCamNodelet::imagePubUpdate, this); - NODELET_INFO("Resolution: %s image: %zux%zu points: %zux%zu", - grabResolutionString.c_str(), mImageResolution.width, mImageResolution.height); + } catch (std::exception const& e) { + NODELET_FATAL("Exception while starting: %s", e.what()); + ros::shutdown(); + } + } - sl::InitParameters initParameters; - if (mSvoPath) { - initParameters.input.setFromSVOFile(mSvoPath); - } else { - initParameters.input.setFromCameraID(-1, sl::BUS_TYPE::USB); - } - initParameters.depth_stabilization = mUseDepthStabilization; - initParameters.camera_resolution = stringToZedEnum(grabResolutionString); - initParameters.depth_mode = stringToZedEnum(depthModeString); - initParameters.coordinate_units = sl::UNIT::METER; - initParameters.sdk_verbose = true; // Log useful information - initParameters.camera_fps = mGrabTargetFps; - initParameters.coordinate_system = sl::COORDINATE_SYSTEM::RIGHT_HANDED_Z_UP_X_FWD; // Match ROS - initParameters.depth_maximum_distance = mDepthMaximumDistance; - - if (mZed.open(initParameters) != sl::ERROR_CODE::SUCCESS) { - throw std::runtime_error("ZED failed to open"); - } + /** + * @brief Publishes images from long range cam. + * + * Takes in the image from the cam and publishes it. + */ + void LongRangeCamNodelet::imagePubUpdate() { + try { + NODELET_INFO("Starting image pub thead"); - cudaDeviceProp prop{}; - cudaGetDeviceProperties(&prop, 0); - ROS_INFO("MP count: %d, Max threads/MP: %d, Max blocks/MP: %d, max threads/block: %d", - prop.multiProcessorCount, prop.maxThreadsPerMultiProcessor, prop.maxBlocksPerMultiProcessor, prop.maxThreadsPerBlock); + while (ros::ok()) { + mPubThreadProfiler.beginLoop(); + + // TODO: probably bad that this allocation, best case optimized by tcache + // Needed because publish directly shares the pointer to other nodelets running in this process + // TODO: What does this do? + // Swap critical section + { + std::unique_lock lock{mSwapMutex}; + // Waiting on the condition variable will drop the lock and reacquire it when the condition is met + mSwapCv.wait(lock, [this] { return mIsSwapReady.load(); }); + mIsSwapReady = false; + mPubThreadProfiler.measureEvent("Wait"); + if (mLeftImgPub.getNumSubscribers()) { + auto imgMsg = boost::make_shared(); + fillImageMessage(mPubMeasures.mImage, imgMsg); + imgMsg->header.frame_id = "long_range_cam_frame"; + imgMsg->header.stamp = mPubMeasures.time; + imgMsg->header.seq = mImagePubUpdateTick; + mImgPub.publish(ImgMsg); + + auto camInfoMsg = boost::make_shared(); + fillCameraInfoMfessages(calibration, mImageResolution, leftCamInfoMsg, rightCamInfoMsg); + rightCamInfoMsg->header.frame_id = "long_range_cam_frame"; + rightCamInfoMsg->header.stamp = mPcMeasures.time; + rightCamInfoMsg->header.seq = mImagePubUpdateTick; + mLeftCamInfoPub.publish(camInfoMsg); + } + + mPubThreadProfiler.measureEvent("Publish Message"); + } - mGrabThread = std::thread(&ZedNodelet::grabUpdate, this); + mImagePubUpdateTick++; + } + NODELET_INFO("Tag thread finished"); - } catch (std::exception const& e) { - NODELET_FATAL("Exception while starting: %s", e.what()); - ros::shutdown(); + } } } @@ -76,40 +91,22 @@ namespace mrover { * As such we retrieve the image and point cloud on the GPU to send to the other thread for processing. * This only happens if the other thread is ready to avoid blocking, hence the try lock. */ - void ZedNodelet::grabUpdate() { + void LongRangeCam::readUpdate() { try { NODELET_INFO("Starting grab thread"); while (ros::ok()) { mGrabThreadProfiler.beginLoop(); + // TODO: Combining grab and retrieve together, is that ok? + if (!mCapture.read(mGrabMeasures, mImage)) + throw std::runtime_error("Long range cam failed to get image"); + mGrabThreadProfiler.measureEvent("Read"); - sl::RuntimeParameters runtimeParameters; - runtimeParameters.confidence_threshold = mDepthConfidence; - runtimeParameters.texture_confidence_threshold = mTextureConfidence; - - if (mZed.grab(runtimeParameters) != sl::ERROR_CODE::SUCCESS) - throw std::runtime_error("ZED failed to grab"); - mGrabThreadProfiler.measureEvent("Grab"); - - // Retrieval has to happen on the same thread as grab so that the image and point cloud are synced - if (mRightImgPub.getNumSubscribers()) - if (mZed.retrieveImage(mGrabMeasures.rightImage, sl::VIEW::RIGHT, sl::MEM::GPU, mImageResolution) != sl::ERROR_CODE::SUCCESS) - throw std::runtime_error("ZED failed to retrieve right image"); - // Only left set is used for processing - if (mZed.retrieveImage(mGrabMeasures.leftImage, sl::VIEW::LEFT, sl::MEM::GPU, mImageResolution) != sl::ERROR_CODE::SUCCESS) - throw std::runtime_error("ZED failed to retrieve left image"); - if (mZed.retrieveMeasure(mGrabMeasures.leftPoints, sl::MEASURE::XYZ, sl::MEM::GPU, mPointResolution) != sl::ERROR_CODE::SUCCESS) - throw std::runtime_error("ZED failed to retrieve point cloud"); - - assert(mGrabMeasures.leftImage.timestamp == mGrabMeasures.leftPoints.timestamp); - - - mGrabMeasures.time = mSvoPath ? ros::Time::now() : slTime2Ros(mZed.getTimestamp(sl::TIME_REFERENCE::IMAGE)); - mGrabThreadProfiler.measureEvent("Retrieve"); + mGrabMeasures.time = ros::Time::now(); // If the processing thread is busy skip // We want this thread to run as fast as possible for grab and positional tracking if (mSwapMutex.try_lock()) { - std::swap(mGrabMeasures, mPcMeasures); + std::swap(mGrabMeasures, mPubMeasures); mIsSwapReady = true; mSwapMutex.unlock(); mSwapCv.notify_one(); @@ -118,13 +115,10 @@ namespace mrover { mGrabUpdateTick++; } - - mZed.close(); NODELET_INFO("Grab thread finished"); } catch (std::exception const& e) { NODELET_FATAL("Exception while running grab thread: %s", e.what()); - mZed.close(); ros::shutdown(); std::exit(EXIT_FAILURE); } @@ -132,29 +126,26 @@ namespace mrover { LongRangeCamNodelet::~LongRangeCamNodelet() { NODELET_INFO("Long range cam node shutting down"); - mPointCloudThread.join(); - mGrabThread.join(); + mReadThread.join(); } - ZedNodelet::Measures::Measures(ZedNodelet::Measures&& other) noexcept { + ZedNodelet::Measures::Measures(LongRangeCamNodelet::Measures&& other) noexcept { *this = std::move(other); } - ZedNodelet::Measures& ZedNodelet::Measures::operator=(ZedNodelet::Measures&& other) noexcept { - sl::Mat::swap(other.leftImage, leftImage); - sl::Mat::swap(other.rightImage, rightImage); - sl::Mat::swap(other.leftPoints, leftPoints); + LongRangeCamNodelet::Measures& LongRangeCamNodelet::Measures::operator=(LongRangeCamNodelet::Measures&& other) noexcept { + cv::OutpuArray::swap(other.mImage, mImage); std::swap(time, other.time); return *this; } } // namespace mrover int main(int argc, char** argv) { - ros::init(argc, argv, "zed_wrapper"); + ros::init(argc, argv, "long_range_cam_nodelet"); - // Start the ZED Nodelet + // Start the Long Range Cam Nodelet nodelet::Loader nodelet; - nodelet.load(ros::this_node::getName(), "mrover/ZedNodelet", ros::names::getRemappings(), {}); + nodelet.load(ros::this_node::getName(), "mrover/LongRangeCamNodelet", ros::names::getRemappings(), {}); ros::spin(); @@ -163,5 +154,5 @@ int main(int argc, char** argv) { #ifdef MROVER_IS_NODELET #include -PLUGINLIB_EXPORT_CLASS(mrover::ZedNodelet, nodelet::Nodelet) +PLUGINLIB_EXPORT_CLASS(mrover::LongRangeCamNodelet, nodelet::Nodelet) #endif diff --git a/src/perception/long_range_cam/long_range_cam.hpp b/src/perception/long_range_cam/long_range_cam.hpp index e53e140f4..8bd48c2ed 100644 --- a/src/perception/long_range_cam/long_range_cam.hpp +++ b/src/perception/long_range_cam/long_range_cam.hpp @@ -5,38 +5,36 @@ namespace mrover { class LongRangeCamNodelet : public nodelet::Nodelet { private: - explicit LongRangeCamNodelet; - virtual ~LongRangeCamNodelet; - LongRangeCamNodelet - ros::NodeHandle mNh, mPnh; + cv::VideoCapture mCapture; - ros::Publisher mCamInfoPub; - image_transport::Publisher mCamImgPub; + struct Measures { + ros::Time time; + cv::OutputArray mImage; - PointCloudGpu mPointCloudGpu; + Measures() = default; - sl::Resolution mImageResolution; - sl::String mSvoPath; - int mGrabTargetFps{}; - int mDepthConfidence{}; - int mTextureConfidence{}; - bool mUseBuiltinPosTracking{}; - bool mUseAreaMemory{}; - bool mUsePoseSmoothing{}; - bool mUseLoopProfiler{}; - bool mUseDepthStabilization{}; - float mDepthMaximumDistance{}; + Measures(Measures&) = delete; + Measures& operator=(Measures&) = delete; - Measures mGrabMeasures, mPcMeasures; + Measures(Measures&&) noexcept; + Measures& operator=(Measures&&) noexcept; + }; - std::thread mGrabThread; + ros::NodeHandle mNh, mPnh; + + ros::Publisher mCamInfoPub; + image_transport::Publisher mImgPub; + + Measures mGrabMeasures, mPubMeasures; + + std::thread mReadThread; std::mutex mSwapMutex; std::condition_variable mSwapCv; std::atomic_bool mIsSwapReady = false; LoopProfiler mGrabThreadProfiler{"Long Range Cam Grab"}; - size_t mGrabUpdateTick = 0, mPointCloudUpdateTick = 0; + size_t mGrabUpdateTick = 0; void onInit() override; @@ -45,16 +43,13 @@ namespace mrover { ~LongRangeCamNodelet() override; - void grabUpdate(); + void readUpdate(); + void imagePubUpdate(); }; - ros::Time slTime2Ros(sl::Timestamp t); - void fillCameraInfoMessages(sl::CalibrationParameters& calibration, sl::Resolution const& resolution, sensor_msgs::CameraInfoPtr const& leftInfoMsg, sensor_msgs::CameraInfoPtr const& rightInfoMsg); void fillImageMessage(sl::Mat& bgra, sensor_msgs::ImagePtr const& msg); - void checkCudaError(cudaError_t error); - } // namespace mrover diff --git a/src/perception/tag_detector/long_range_tag_detector.cpp b/src/perception/tag_detector/long_range/long_range_tag_detector.cpp similarity index 100% rename from src/perception/tag_detector/long_range_tag_detector.cpp rename to src/perception/tag_detector/long_range/long_range_tag_detector.cpp diff --git a/src/perception/tag_detector/long_range_tag_detector.hpp b/src/perception/tag_detector/long_range/long_range_tag_detector.hpp similarity index 100% rename from src/perception/tag_detector/long_range_tag_detector.hpp rename to src/perception/tag_detector/long_range/long_range_tag_detector.hpp diff --git a/src/perception/tag_detector/long_range_tag_detector.processing.cpp b/src/perception/tag_detector/long_range/long_range_tag_detector.processing.cpp similarity index 100% rename from src/perception/tag_detector/long_range_tag_detector.processing.cpp rename to src/perception/tag_detector/long_range/long_range_tag_detector.processing.cpp diff --git a/src/perception/tag_detector/long_range/pch.hpp b/src/perception/tag_detector/long_range/pch.hpp new file mode 100644 index 000000000..01f396d92 --- /dev/null +++ b/src/perception/tag_detector/long_range/pch.hpp @@ -0,0 +1,31 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include diff --git a/src/perception/tag_detector/pch.hpp b/src/perception/tag_detector/zed/pch.hpp similarity index 100% rename from src/perception/tag_detector/pch.hpp rename to src/perception/tag_detector/zed/pch.hpp diff --git a/src/perception/tag_detector/tag_detector.cpp b/src/perception/tag_detector/zed/tag_detector.cpp similarity index 100% rename from src/perception/tag_detector/tag_detector.cpp rename to src/perception/tag_detector/zed/tag_detector.cpp diff --git a/src/perception/tag_detector/tag_detector.hpp b/src/perception/tag_detector/zed/tag_detector.hpp similarity index 100% rename from src/perception/tag_detector/tag_detector.hpp rename to src/perception/tag_detector/zed/tag_detector.hpp diff --git a/src/perception/tag_detector/tag_detector.processing.cpp b/src/perception/tag_detector/zed/tag_detector.processing.cpp similarity index 100% rename from src/perception/tag_detector/tag_detector.processing.cpp rename to src/perception/tag_detector/zed/tag_detector.processing.cpp From 90ae8d3537354d47b11ebd68682cdcaffd7cf493 Mon Sep 17 00:00:00 2001 From: ajaysum Date: Thu, 5 Oct 2023 16:32:56 -0700 Subject: [PATCH 015/126] merged in fillImageMessage --- .../long_range_cam/long_range_cam.cpp | 186 +++++++++++------- 1 file changed, 110 insertions(+), 76 deletions(-) diff --git a/src/perception/long_range_cam/long_range_cam.cpp b/src/perception/long_range_cam/long_range_cam.cpp index bd2dce381..ecce0bc08 100644 --- a/src/perception/long_range_cam/long_range_cam.cpp +++ b/src/perception/long_range_cam/long_range_cam.cpp @@ -4,6 +4,31 @@ namespace mrover { using namespace std::chrono_literals; + + cv::VideoCapture cap { + 0, + { cv::CAP_V4L2, + cv::CAP_PROP_FRAME_WIDTH, size.width, + cv::CAP_PROP_FRAME_HEIGHT, size.height, + cv::CAP_PROP_FPS, 30, + + } + void fillImageMessage(sl::Mat& bgra, sensor_msgs::ImagePtr const& msg) { + + + } + }; + void cv::VideoCapture::fillImageMessage(sl::Mat& bgra, sensor_msgs::ImagePtr const& msg) { + assert(bgra.getChannels() == 4); + assert(msg); + msg->height = bgra.getHeight(); + msg->width = bgra.getWidth(); + msg->encoding = sensor_msgs::image_encodings::BGRA8; + msg->step = bgra.getStepBytes(); + msg->is_bigendian = __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__; + bytes_to_copy = image.total() * image.elemSize() + std::memcpy(msg->data.data(), bgra.data, bytes_to_copy) + } /** * @brief Load config, open the camera, and start our threads */ @@ -13,73 +38,58 @@ namespace mrover { // MT means multithreaded mNh = getMTNodeHandle(); mPnh = getMTPrivateNodeHandle(); - mCamInfoPub = mNh.advertise("camera/camera_info", 1); + mCamInfoPub = mNh.advertise("camera/right/camera_info", 1); image_transport::ImageTransport it{mNh}; - mCamImgPub = it.advertise("long_range_cam/image", 1); - - // TODO: Make these camera open values ROS params/not hardcoded - int deviceID = 0; - int apiID = cv::CAP_ANY; - mCapture.open(deviceID, apiID); - if (!mCapture.isOpened()) { - throw std::runtime_error("Long range cam failed to open"); - } - - mReadThread = std::thread(&LongRangeCamNodelet::readUpdate, this); - mPubThread = std::thread(&LongRangeCamNodelet::imagePubUpdate, this); + mRightImgPub = it.advertise("long_range_cam/image", 1); - } catch (std::exception const& e) { - NODELET_FATAL("Exception while starting: %s", e.what()); - ros::shutdown(); - } - } + int imageWidth{}; + int imageHeight{}; + // TODO: edit these so they correspond to the long range cam's resolution + mPnh.param("image_width", imageWidth, 1280); + mPnh.param("image_height", imageHeight, 720); - /** - * @brief Publishes images from long range cam. - * - * Takes in the image from the cam and publishes it. - */ - void LongRangeCamNodelet::imagePubUpdate() { - try { - NODELET_INFO("Starting image pub thead"); + if (imageWidth < 0 || imageHeight < 0) { + throw std::invalid_argument("Invalid image dimensions"); + } + if (mGrabTargetFps < 0) { + throw std::invalid_argument("Invalid grab target framerate"); + } - while (ros::ok()) { - mPubThreadProfiler.beginLoop(); - - // TODO: probably bad that this allocation, best case optimized by tcache - // Needed because publish directly shares the pointer to other nodelets running in this process - // TODO: What does this do? - // Swap critical section - { - std::unique_lock lock{mSwapMutex}; - // Waiting on the condition variable will drop the lock and reacquire it when the condition is met - mSwapCv.wait(lock, [this] { return mIsSwapReady.load(); }); - mIsSwapReady = false; - mPubThreadProfiler.measureEvent("Wait"); - if (mLeftImgPub.getNumSubscribers()) { - auto imgMsg = boost::make_shared(); - fillImageMessage(mPubMeasures.mImage, imgMsg); - imgMsg->header.frame_id = "long_range_cam_frame"; - imgMsg->header.stamp = mPubMeasures.time; - imgMsg->header.seq = mImagePubUpdateTick; - mImgPub.publish(ImgMsg); - - auto camInfoMsg = boost::make_shared(); - fillCameraInfoMfessages(calibration, mImageResolution, leftCamInfoMsg, rightCamInfoMsg); - rightCamInfoMsg->header.frame_id = "long_range_cam_frame"; - rightCamInfoMsg->header.stamp = mPcMeasures.time; - rightCamInfoMsg->header.seq = mImagePubUpdateTick; - mLeftCamInfoPub.publish(camInfoMsg); - } - - mPubThreadProfiler.measureEvent("Publish Message"); - } + mImageResolution = sl::Resolution(imageWidth, imageHeight); + mPointResolution = sl::Resolution(imageWidth, imageHeight); - mImagePubUpdateTick++; - } - NODELET_INFO("Tag thread finished"); + NODELET_INFO("Resolution: %s image: %zux%zu points: %zux%zu", + grabResolutionString.c_str(), mImageResolution.width, mImageResolution.height); + sl::InitParameters initParameters; + if (mSvoPath) { + initParameters.input.setFromSVOFile(mSvoPath); + } else { + initParameters.input.setFromCameraID(-1, sl::BUS_TYPE::USB); } + initParameters.depth_stabilization = mUseDepthStabilization; + initParameters.camera_resolution = stringToZedEnum(grabResolutionString); + initParameters.depth_mode = stringToZedEnum(depthModeString); + initParameters.coordinate_units = sl::UNIT::METER; + initParameters.sdk_verbose = true; // Log useful information + initParameters.camera_fps = mGrabTargetFps; + initParameters.coordinate_system = sl::COORDINATE_SYSTEM::RIGHT_HANDED_Z_UP_X_FWD; // Match ROS + initParameters.depth_maximum_distance = mDepthMaximumDistance; + + if (mZed.open(initParameters) != sl::ERROR_CODE::SUCCESS) { + throw std::runtime_error("ZED failed to open"); + } + + cudaDeviceProp prop{}; + cudaGetDeviceProperties(&prop, 0); + ROS_INFO("MP count: %d, Max threads/MP: %d, Max blocks/MP: %d, max threads/block: %d", + prop.multiProcessorCount, prop.maxThreadsPerMultiProcessor, prop.maxBlocksPerMultiProcessor, prop.maxThreadsPerBlock); + + mGrabThread = std::thread(&ZedNodelet::grabUpdate, this); + + } catch (std::exception const& e) { + NODELET_FATAL("Exception while starting: %s", e.what()); + ros::shutdown(); } } @@ -91,22 +101,40 @@ namespace mrover { * As such we retrieve the image and point cloud on the GPU to send to the other thread for processing. * This only happens if the other thread is ready to avoid blocking, hence the try lock. */ - void LongRangeCam::readUpdate() { + void ZedNodelet::grabUpdate() { try { NODELET_INFO("Starting grab thread"); while (ros::ok()) { - mGrabThreadProfiler.beginLoop(); - // TODO: Combining grab and retrieve together, is that ok? - if (!mCapture.read(mGrabMeasures, mImage)) - throw std::runtime_error("Long range cam failed to get image"); - mGrabThreadProfiler.measureEvent("Read"); + mGrabThreadProfiler.beginLoop(); + + sl::RuntimeParameters runtimeParameters; + runtimeParameters.confidence_threshold = mDepthConfidence; + runtimeParameters.texture_confidence_threshold = mTextureConfidence; + + if (mZed.grab(runtimeParameters) != sl::ERROR_CODE::SUCCESS) + throw std::runtime_error("ZED failed to grab"); + mGrabThreadProfiler.measureEvent("Grab"); - mGrabMeasures.time = ros::Time::now(); + // Retrieval has to happen on the same thread as grab so that the image and point cloud are synced + if (mRightImgPub.getNumSubscribers()) + if (mZed.retrieveImage(mGrabMeasures.rightImage, sl::VIEW::RIGHT, sl::MEM::GPU, mImageResolution) != sl::ERROR_CODE::SUCCESS) + throw std::runtime_error("ZED failed to retrieve right image"); + // Only left set is used for processing + if (mZed.retrieveImage(mGrabMeasures.leftImage, sl::VIEW::LEFT, sl::MEM::GPU, mImageResolution) != sl::ERROR_CODE::SUCCESS) + throw std::runtime_error("ZED failed to retrieve left image"); + if (mZed.retrieveMeasure(mGrabMeasures.leftPoints, sl::MEASURE::XYZ, sl::MEM::GPU, mPointResolution) != sl::ERROR_CODE::SUCCESS) + throw std::runtime_error("ZED failed to retrieve point cloud"); + + assert(mGrabMeasures.leftImage.timestamp == mGrabMeasures.leftPoints.timestamp); + + + mGrabMeasures.time = mSvoPath ? ros::Time::now() : slTime2Ros(mZed.getTimestamp(sl::TIME_REFERENCE::IMAGE)); + mGrabThreadProfiler.measureEvent("Retrieve"); // If the processing thread is busy skip // We want this thread to run as fast as possible for grab and positional tracking if (mSwapMutex.try_lock()) { - std::swap(mGrabMeasures, mPubMeasures); + std::swap(mGrabMeasures, mPcMeasures); mIsSwapReady = true; mSwapMutex.unlock(); mSwapCv.notify_one(); @@ -115,10 +143,13 @@ namespace mrover { mGrabUpdateTick++; } + + mZed.close(); NODELET_INFO("Grab thread finished"); } catch (std::exception const& e) { NODELET_FATAL("Exception while running grab thread: %s", e.what()); + mZed.close(); ros::shutdown(); std::exit(EXIT_FAILURE); } @@ -126,26 +157,29 @@ namespace mrover { LongRangeCamNodelet::~LongRangeCamNodelet() { NODELET_INFO("Long range cam node shutting down"); - mReadThread.join(); + mPointCloudThread.join(); + mGrabThread.join(); } - ZedNodelet::Measures::Measures(LongRangeCamNodelet::Measures&& other) noexcept { + ZedNodelet::Measures::Measures(ZedNodelet::Measures&& other) noexcept { *this = std::move(other); } - LongRangeCamNodelet::Measures& LongRangeCamNodelet::Measures::operator=(LongRangeCamNodelet::Measures&& other) noexcept { - cv::OutpuArray::swap(other.mImage, mImage); + ZedNodelet::Measures& ZedNodelet::Measures::operator=(ZedNodelet::Measures&& other) noexcept { + sl::Mat::swap(other.leftImage, leftImage); + sl::Mat::swap(other.rightImage, rightImage); + sl::Mat::swap(other.leftPoints, leftPoints); std::swap(time, other.time); return *this; } } // namespace mrover int main(int argc, char** argv) { - ros::init(argc, argv, "long_range_cam_nodelet"); + ros::init(argc, argv, "zed_wrapper"); - // Start the Long Range Cam Nodelet + // Start the ZED Nodelet nodelet::Loader nodelet; - nodelet.load(ros::this_node::getName(), "mrover/LongRangeCamNodelet", ros::names::getRemappings(), {}); + nodelet.load(ros::this_node::getName(), "mrover/ZedNodelet", ros::names::getRemappings(), {}); ros::spin(); @@ -154,5 +188,5 @@ int main(int argc, char** argv) { #ifdef MROVER_IS_NODELET #include -PLUGINLIB_EXPORT_CLASS(mrover::LongRangeCamNodelet, nodelet::Nodelet) +PLUGINLIB_EXPORT_CLASS(mrover::ZedNodelet, nodelet::Nodelet) #endif From e70dc0c7832c77547a4f389ff0ac11edc7a8ec09 Mon Sep 17 00:00:00 2001 From: John Date: Tue, 10 Oct 2023 20:11:11 -0400 Subject: [PATCH 016/126] Add cmake --- CMakeLists.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1ba21c323..8a0be1154 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -240,6 +240,10 @@ else () target_precompile_headers(tag_detector_nodelet PRIVATE src/perception/tag_detector/pch.hpp) target_link_libraries(tag_detector_nodelet PRIVATE ${OpenCV_LIBRARIES} lie) + add_cpp_nodelet_macro(long_range_tag_detector_nodelet src/perception/long_range_cam/*.cpp src/perception/long_range_cam) + target_precompile_headers(long_range_tag_detector_nodelet PRIVATE src/perception/long_range_cam/pch.hpp) + target_link_libraries(long_range_tag_detector_nodelet PRIVATE ${OpenCV_LIBRARIES} lie) + add_cpp_node_macro(brushed_motors "src/esw/brushed_motors/*.cpp") add_cpp_library_macro(lie "src/util/lie/*.cpp" "src/util/lie") From 836961e85f15a1a1d102d0df1aa32edc45959c37 Mon Sep 17 00:00:00 2001 From: John Date: Tue, 10 Oct 2023 20:17:56 -0400 Subject: [PATCH 017/126] Fix cmake --- CMakeLists.txt | 13 +++++++------ .../long_range_tag_detector.processing.cpp | 2 +- .../tag_detector/zed/tag_detector.processing.cpp | 2 +- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b9ece4d09..a2b5dc6d4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -235,15 +235,16 @@ else () # define an add and link macro # Put items here to build macro(add_and_link_macro) - add_cpp_nodelet_macro(tag_detector_nodelet src/perception/tag_detector/*.cpp src/perception/tag_detector) - target_precompile_headers(tag_detector_nodelet PRIVATE src/perception/tag_detector/pch.hpp) + # add_cpp_nodelet_macro(usb_camera_nodelet src/perception/long_range_cam/*.cpp src/perception/long_range_cam) + + add_cpp_nodelet_macro(tag_detector_nodelet src/perception/tag_detector/zed/*.cpp src/perception/tag_detector/zed) + target_precompile_headers(tag_detector_nodelet PRIVATE src/perception/tag_detector/zed/pch.hpp) target_link_libraries(tag_detector_nodelet PRIVATE opencv_core opencv_objdetect opencv_aruco opencv_imgproc lie) - add_cpp_nodelet_macro(long_range_tag_detector_nodelet src/perception/long_range_cam/*.cpp src/perception/long_range_cam) - target_precompile_headers(long_range_tag_detector_nodelet PRIVATE src/perception/long_range_cam/pch.hpp) - target_link_libraries(long_range_tag_detector_nodelet PRIVATE ${OpenCV_LIBRARIES} lie) + # add_cpp_nodelet_macro(long_range_tag_detector_nodelet src/perception/long_range_cam/*.cpp src/perception/long_range_cam) + # target_precompile_headers(long_range_tag_detector_nodelet PRIVATE src/perception/long_range_cam/pch.hpp) + # target_link_libraries(long_range_tag_detector_nodelet PRIVATE ${OpenCV_LIBRARIES} lie) - add_cpp_node_macro(brushed_motors "src/esw/brushed_motors/*.cpp") add_cpp_library_macro(lie src/util/lie/*.cpp src/util/lie) add_cpp_interface_library_macro(moteus deps/moteus/lib/cpp/mjbots) diff --git a/src/perception/tag_detector/long_range/long_range_tag_detector.processing.cpp b/src/perception/tag_detector/long_range/long_range_tag_detector.processing.cpp index 63975f713..e14136639 100644 --- a/src/perception/tag_detector/long_range/long_range_tag_detector.processing.cpp +++ b/src/perception/tag_detector/long_range/long_range_tag_detector.processing.cpp @@ -1,6 +1,6 @@ #include "long_range_tag_detector.hpp" -#include "../point.hpp" //Might not actually need?' +#include "../../point.hpp" //Might not actually need?' #include "mrover/LongRangeTags.h" #include diff --git a/src/perception/tag_detector/zed/tag_detector.processing.cpp b/src/perception/tag_detector/zed/tag_detector.processing.cpp index 82866f8ea..63b7ab3ad 100644 --- a/src/perception/tag_detector/zed/tag_detector.processing.cpp +++ b/src/perception/tag_detector/zed/tag_detector.processing.cpp @@ -1,6 +1,6 @@ #include "tag_detector.hpp" -#include "../point.hpp" +#include "../../point.hpp" namespace mrover { From b74bf908c19ee56677afc6fd82a136df8c725af8 Mon Sep 17 00:00:00 2001 From: umroverPerception Date: Sun, 22 Oct 2023 02:11:47 -0400 Subject: [PATCH 018/126] get camera images with gstreamer, add nodelet to cmakelists --- CMakeLists.txt | 6 +- .../long_range_cam/long_range_cam.cpp | 173 +++--------------- .../long_range_cam/long_range_cam.hpp | 24 +-- .../long_range_tag_detector.cpp | 12 +- .../long_range_tag_detector.hpp | 6 + .../long_range_tag_detector.processing.cpp | 1 - .../{long_range => long_range_cam}/pch.hpp | 0 7 files changed, 41 insertions(+), 181 deletions(-) rename src/perception/tag_detector/{long_range => long_range_cam}/long_range_tag_detector.cpp (92%) rename src/perception/tag_detector/{long_range => long_range_cam}/long_range_tag_detector.hpp (95%) rename src/perception/tag_detector/{long_range => long_range_cam}/long_range_tag_detector.processing.cpp (99%) rename src/perception/tag_detector/{long_range => long_range_cam}/pch.hpp (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index a2b5dc6d4..7cccf14f5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -241,9 +241,9 @@ else () target_precompile_headers(tag_detector_nodelet PRIVATE src/perception/tag_detector/zed/pch.hpp) target_link_libraries(tag_detector_nodelet PRIVATE opencv_core opencv_objdetect opencv_aruco opencv_imgproc lie) - # add_cpp_nodelet_macro(long_range_tag_detector_nodelet src/perception/long_range_cam/*.cpp src/perception/long_range_cam) - # target_precompile_headers(long_range_tag_detector_nodelet PRIVATE src/perception/long_range_cam/pch.hpp) - # target_link_libraries(long_range_tag_detector_nodelet PRIVATE ${OpenCV_LIBRARIES} lie) + add_cpp_nodelet_macro(long_range_tag_detector_nodelet src/perception/tag_detector/long_range_cam/*.cpp src/perception/tag_detector/long_range_cam) + target_precompile_headers(long_range_tag_detector_nodelet PRIVATE src/perception/tag_detector/long_range_cam/pch.hpp) + target_link_libraries(long_range_tag_detector_nodelet PRIVATE ${OpenCV_LIBRARIES} lie) add_cpp_library_macro(lie src/util/lie/*.cpp src/util/lie) diff --git a/src/perception/long_range_cam/long_range_cam.cpp b/src/perception/long_range_cam/long_range_cam.cpp index ecce0bc08..857e3c2a8 100644 --- a/src/perception/long_range_cam/long_range_cam.cpp +++ b/src/perception/long_range_cam/long_range_cam.cpp @@ -1,34 +1,14 @@ #include "long_range_cam.hpp" +#include +#include +#include namespace mrover { using namespace std::chrono_literals; - - cv::VideoCapture cap { - 0, - { cv::CAP_V4L2, - cv::CAP_PROP_FRAME_WIDTH, size.width, - cv::CAP_PROP_FRAME_HEIGHT, size.height, - cv::CAP_PROP_FPS, 30, - - } - void fillImageMessage(sl::Mat& bgra, sensor_msgs::ImagePtr const& msg) { - - - } - }; - void cv::VideoCapture::fillImageMessage(sl::Mat& bgra, sensor_msgs::ImagePtr const& msg) { - assert(bgra.getChannels() == 4); - assert(msg); - msg->height = bgra.getHeight(); - msg->width = bgra.getWidth(); - msg->encoding = sensor_msgs::image_encodings::BGRA8; - msg->step = bgra.getStepBytes(); - msg->is_bigendian = __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__; - bytes_to_copy = image.total() * image.elemSize() - std::memcpy(msg->data.data(), bgra.data, bytes_to_copy) - } + // gst-launch-1.0 -e -v v4l2src device=/dev/video4 ! videoconvert ! videoscale ! videorate ! "video/x-raw,format=I420,width=640,height=480,framerate=30/1" ! x264enc key-int-max=15 ! rtph264pay ! udpsink host=localhost port=5000 + // gst-launch-1.0 -v udpsrc uri=udp://localhost:5000 ! application/x-rtp,media=video,clock-rate=90000,encoding-name=H264,payload=96 ! rtph264depay ! avdec_h264 ! autovideosink /** * @brief Load config, open the camera, and start our threads */ @@ -38,148 +18,47 @@ namespace mrover { // MT means multithreaded mNh = getMTNodeHandle(); mPnh = getMTPrivateNodeHandle(); - mCamInfoPub = mNh.advertise("camera/right/camera_info", 1); + mCamInfoPub = mNh.advertise("long_range_cam/camera_info", 1); image_transport::ImageTransport it{mNh}; - mRightImgPub = it.advertise("long_range_cam/image", 1); - - int imageWidth{}; - int imageHeight{}; - // TODO: edit these so they correspond to the long range cam's resolution - mPnh.param("image_width", imageWidth, 1280); - mPnh.param("image_height", imageHeight, 720); - - if (imageWidth < 0 || imageHeight < 0) { - throw std::invalid_argument("Invalid image dimensions"); - } - if (mGrabTargetFps < 0) { - throw std::invalid_argument("Invalid grab target framerate"); - } - - mImageResolution = sl::Resolution(imageWidth, imageHeight); - mPointResolution = sl::Resolution(imageWidth, imageHeight); + mImgPub = it.advertise("long_range_cam/image", 1); - NODELET_INFO("Resolution: %s image: %zux%zu points: %zux%zu", - grabResolutionString.c_str(), mImageResolution.width, mImageResolution.height); - - sl::InitParameters initParameters; - if (mSvoPath) { - initParameters.input.setFromSVOFile(mSvoPath); - } else { - initParameters.input.setFromCameraID(-1, sl::BUS_TYPE::USB); - } - initParameters.depth_stabilization = mUseDepthStabilization; - initParameters.camera_resolution = stringToZedEnum(grabResolutionString); - initParameters.depth_mode = stringToZedEnum(depthModeString); - initParameters.coordinate_units = sl::UNIT::METER; - initParameters.sdk_verbose = true; // Log useful information - initParameters.camera_fps = mGrabTargetFps; - initParameters.coordinate_system = sl::COORDINATE_SYSTEM::RIGHT_HANDED_Z_UP_X_FWD; // Match ROS - initParameters.depth_maximum_distance = mDepthMaximumDistance; - - if (mZed.open(initParameters) != sl::ERROR_CODE::SUCCESS) { - throw std::runtime_error("ZED failed to open"); + // TODO: Fix hardcoded components of this string + mCapture = cv::VideoCapture("v4l2src device=/dev/video4 ! videoconvert ! videoscale ! videorate ! 'video / x - raw, format = I420, width = 640, height = 480, framerate = 30 / 1' ! x264enc key-int-max=15 ! rtph264pay ! udpsink"); + cv::VideoWriter out = cv::VideoWriter("udpsrc ! application/x-rtp,media=video,clock-rate=90000,encoding-name=H264,payload=96 ! rtph264depay ! avdec_h264 ! autovideosink host=localhost port =5000", cv::CAP_GSTREAMER, 0, 30, cv::Size(640, 480), true); + if (!mCapture.isOpened()) { + throw std::runtime_error("Long range cam failed to open"); } - - cudaDeviceProp prop{}; - cudaGetDeviceProperties(&prop, 0); - ROS_INFO("MP count: %d, Max threads/MP: %d, Max blocks/MP: %d, max threads/block: %d", - prop.multiProcessorCount, prop.maxThreadsPerMultiProcessor, prop.maxBlocksPerMultiProcessor, prop.maxThreadsPerBlock); - - mGrabThread = std::thread(&ZedNodelet::grabUpdate, this); - - } catch (std::exception const& e) { - NODELET_FATAL("Exception while starting: %s", e.what()); - ros::shutdown(); - } - } - - /** - * @brief Grabs measures from the ZED. - * - * This update loop needs to happen as fast as possible. - * grab() on the ZED updates positional tracking (visual odometry) which works best at high update rates. - * As such we retrieve the image and point cloud on the GPU to send to the other thread for processing. - * This only happens if the other thread is ready to avoid blocking, hence the try lock. - */ - void ZedNodelet::grabUpdate() { - try { - NODELET_INFO("Starting grab thread"); - while (ros::ok()) { - mGrabThreadProfiler.beginLoop(); - - sl::RuntimeParameters runtimeParameters; - runtimeParameters.confidence_threshold = mDepthConfidence; - runtimeParameters.texture_confidence_threshold = mTextureConfidence; - - if (mZed.grab(runtimeParameters) != sl::ERROR_CODE::SUCCESS) - throw std::runtime_error("ZED failed to grab"); - mGrabThreadProfiler.measureEvent("Grab"); - - // Retrieval has to happen on the same thread as grab so that the image and point cloud are synced - if (mRightImgPub.getNumSubscribers()) - if (mZed.retrieveImage(mGrabMeasures.rightImage, sl::VIEW::RIGHT, sl::MEM::GPU, mImageResolution) != sl::ERROR_CODE::SUCCESS) - throw std::runtime_error("ZED failed to retrieve right image"); - // Only left set is used for processing - if (mZed.retrieveImage(mGrabMeasures.leftImage, sl::VIEW::LEFT, sl::MEM::GPU, mImageResolution) != sl::ERROR_CODE::SUCCESS) - throw std::runtime_error("ZED failed to retrieve left image"); - if (mZed.retrieveMeasure(mGrabMeasures.leftPoints, sl::MEASURE::XYZ, sl::MEM::GPU, mPointResolution) != sl::ERROR_CODE::SUCCESS) - throw std::runtime_error("ZED failed to retrieve point cloud"); - - assert(mGrabMeasures.leftImage.timestamp == mGrabMeasures.leftPoints.timestamp); - - - mGrabMeasures.time = mSvoPath ? ros::Time::now() : slTime2Ros(mZed.getTimestamp(sl::TIME_REFERENCE::IMAGE)); - mGrabThreadProfiler.measureEvent("Retrieve"); - - // If the processing thread is busy skip - // We want this thread to run as fast as possible for grab and positional tracking - if (mSwapMutex.try_lock()) { - std::swap(mGrabMeasures, mPcMeasures); - mIsSwapReady = true; - mSwapMutex.unlock(); - mSwapCv.notify_one(); + ROS_ERROR("NODELET LOADED"); + cv::Mat frame; + while (true) { + mCapture.read(frame); + if (frame.empty()) { + break; + } + out.write(frame); + cv::imshow("Sender", frame); + if (cv::waitKey(1) == 's') { + break; } - mGrabThreadProfiler.measureEvent("Try swap"); - - mGrabUpdateTick++; } - - mZed.close(); - NODELET_INFO("Grab thread finished"); - } catch (std::exception const& e) { - NODELET_FATAL("Exception while running grab thread: %s", e.what()); - mZed.close(); + NODELET_FATAL("Exception while starting: %s", e.what()); ros::shutdown(); - std::exit(EXIT_FAILURE); } } LongRangeCamNodelet::~LongRangeCamNodelet() { NODELET_INFO("Long range cam node shutting down"); - mPointCloudThread.join(); - mGrabThread.join(); } - ZedNodelet::Measures::Measures(ZedNodelet::Measures&& other) noexcept { - *this = std::move(other); - } - - ZedNodelet::Measures& ZedNodelet::Measures::operator=(ZedNodelet::Measures&& other) noexcept { - sl::Mat::swap(other.leftImage, leftImage); - sl::Mat::swap(other.rightImage, rightImage); - sl::Mat::swap(other.leftPoints, leftPoints); - std::swap(time, other.time); - return *this; - } } // namespace mrover int main(int argc, char** argv) { - ros::init(argc, argv, "zed_wrapper"); + ros::init(argc, argv, "long_range_cam"); // Start the ZED Nodelet nodelet::Loader nodelet; - nodelet.load(ros::this_node::getName(), "mrover/ZedNodelet", ros::names::getRemappings(), {}); + nodelet.load(ros::this_node::getName(), "mrover/LongRangeCamNodelet", ros::names::getRemappings(), {}); ros::spin(); diff --git a/src/perception/long_range_cam/long_range_cam.hpp b/src/perception/long_range_cam/long_range_cam.hpp index 8bd48c2ed..e2f776fc9 100644 --- a/src/perception/long_range_cam/long_range_cam.hpp +++ b/src/perception/long_range_cam/long_range_cam.hpp @@ -3,33 +3,18 @@ namespace mrover { class LongRangeCamNodelet : public nodelet::Nodelet { - + private: cv::VideoCapture mCapture; - struct Measures { - ros::Time time; - cv::OutputArray mImage; - - Measures() = default; - - Measures(Measures&) = delete; - Measures& operator=(Measures&) = delete; - - Measures(Measures&&) noexcept; - Measures& operator=(Measures&&) noexcept; - }; - ros::NodeHandle mNh, mPnh; ros::Publisher mCamInfoPub; image_transport::Publisher mImgPub; - Measures mGrabMeasures, mPubMeasures; - std::thread mReadThread; std::mutex mSwapMutex; - std::condition_variable mSwapCv; + boost::condition_variable mSwapCv; std::atomic_bool mIsSwapReady = false; LoopProfiler mGrabThreadProfiler{"Long Range Cam Grab"}; @@ -47,9 +32,4 @@ namespace mrover { void imagePubUpdate(); }; - void fillCameraInfoMessages(sl::CalibrationParameters& calibration, sl::Resolution const& resolution, - sensor_msgs::CameraInfoPtr const& leftInfoMsg, sensor_msgs::CameraInfoPtr const& rightInfoMsg); - - void fillImageMessage(sl::Mat& bgra, sensor_msgs::ImagePtr const& msg); - } // namespace mrover diff --git a/src/perception/tag_detector/long_range/long_range_tag_detector.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp similarity index 92% rename from src/perception/tag_detector/long_range/long_range_tag_detector.cpp rename to src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp index cec4a48b6..3159de9e5 100644 --- a/src/perception/tag_detector/long_range/long_range_tag_detector.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp @@ -1,4 +1,4 @@ -#include "tag_detector.hpp" +#include "long_range_tag_detector.hpp" namespace mrover { @@ -6,12 +6,10 @@ namespace mrover { mNh = getMTNodeHandle(); mPnh = getMTPrivateNodeHandle(); mDetectorParams = new cv::aruco::DetectorParameters(); - auto defaultDetectorParams = cv::aruco::DetectorParameters::create(); + auto defaultDetectorParams = cv::makePtr(); int dictionaryNumber; mPnh.param("publish_images", mPublishImages, true); - using DictEnumType = std::underlying_type_t; - mPnh.param("dictionary", dictionaryNumber, static_cast(cv::aruco::DICT_4X4_50)); mPnh.param("min_hit_count_before_publish", mMinHitCountBeforePublish, 5); mPnh.param("max_hit_count", mMaxHitCount, 5); mPnh.param("tag_increment_weight", mTagIncrementWeight, 2); @@ -19,10 +17,8 @@ namespace mrover { mIt.emplace(mNh); mImgPub = mIt->advertise("long_range_tag_detection", 1); - mDictionary = cv::aruco::getPredefinedDictionary(dictionaryNumber); - mPcSub = mNh.subscribe("long_range_cam/image", 1, &LongRangeTagDetectorNodelet::imageCallback, this); - mServiceEnableDetections = mNh.advertiseService("enable_detections", &TagDetectorNodelet::enableDetectionsCallback, this); + mServiceEnableDetections = mNh.advertiseService("enable_detections", &LongRangeTagDetectorNodelet::enableDetectionsCallback, this); // Lambda handles passing class pointer (implicit first parameter) to configCallback mCallbackType = [this](mrover::DetectorParamsConfig& config, uint32_t level) { configCallback(config, level); }; @@ -90,7 +86,7 @@ namespace mrover { NODELET_INFO("Tag detection ready, use odom frame: %s, min hit count: %d, max hit count: %d, hit increment weight: %d, hit decrement weight: %d", mUseOdom ? "true" : "false", mMinHitCountBeforePublish, mMaxHitCount, mTagIncrementWeight, mTagDecrementWeight); } - void TagDetectorNodelet::configCallback(mrover::DetectorParamsConfig& config, uint32_t level) { + void LongRangeTagDetectorNodelet::configCallback(mrover::DetectorParamsConfig& config, uint32_t level) { // Don't load initial config, since it will overwrite the rosparam settings if (level == std::numeric_limits::max()) return; diff --git a/src/perception/tag_detector/long_range/long_range_tag_detector.hpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.hpp similarity index 95% rename from src/perception/tag_detector/long_range/long_range_tag_detector.hpp rename to src/perception/tag_detector/long_range_cam/long_range_tag_detector.hpp index 4dc90f58e..d30016efd 100644 --- a/src/perception/tag_detector/long_range/long_range_tag_detector.hpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.hpp @@ -29,9 +29,11 @@ namespace mrover { //Constatns set in ROS params for num hits needed to publish int mMinHitCountBeforePublish{}; + int mBaseHitCount{}; int mMaxHitCount{}; int mTagIncrementWeight{}; int mTagDecrementWeight{}; + int mTagRemoveWeight{}; cv::Ptr mDetectorParams; cv::Ptr mDictionary; @@ -142,5 +144,9 @@ namespace mrover { * only if mPublishImages and the topic has a subscriber */ void publishTagsOnImage(); + + void configCallback(mrover::DetectorParamsConfig& config, uint32_t level); + + bool enableDetectionsCallback(std_srvs::SetBool::Request& req, std_srvs::SetBool::Response& res); }; }; // namespace mrover diff --git a/src/perception/tag_detector/long_range/long_range_tag_detector.processing.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp similarity index 99% rename from src/perception/tag_detector/long_range/long_range_tag_detector.processing.cpp rename to src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp index e14136639..6300f4780 100644 --- a/src/perception/tag_detector/long_range/long_range_tag_detector.processing.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp @@ -1,6 +1,5 @@ #include "long_range_tag_detector.hpp" -#include "../../point.hpp" //Might not actually need?' #include "mrover/LongRangeTags.h" #include diff --git a/src/perception/tag_detector/long_range/pch.hpp b/src/perception/tag_detector/long_range_cam/pch.hpp similarity index 100% rename from src/perception/tag_detector/long_range/pch.hpp rename to src/perception/tag_detector/long_range_cam/pch.hpp From a8a814779d3f03d2fe084d90280cbf7ba4859033 Mon Sep 17 00:00:00 2001 From: Alison Ryckman Date: Tue, 24 Oct 2023 12:04:18 -0400 Subject: [PATCH 019/126] add bearing function defs --- CMakeLists.txt | 8 ++++++-- .../long_range/long_range_tag_detector.cpp | 4 ++-- .../long_range/long_range_tag_detector.hpp | 13 +++++++++++-- .../long_range_tag_detector.processing.cpp | 8 +++++++- .../tag_detector/zed/tag_detector.processing.cpp | 2 +- 5 files changed, 27 insertions(+), 8 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1ba21c323..5dec0c05c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -236,10 +236,14 @@ else () # define an add and link macro # Put items here to build macro(add_and_link_macro) - add_cpp_nodelet_macro(tag_detector_nodelet "src/perception/tag_detector/*.cpp" "src/perception/tag_detector") - target_precompile_headers(tag_detector_nodelet PRIVATE src/perception/tag_detector/pch.hpp) + add_cpp_nodelet_macro(tag_detector_nodelet "src/perception/tag_detector/zed/*.cpp" "src/perception/tag_detector/zed") + target_precompile_headers(tag_detector_nodelet PRIVATE src/perception/tag_detector/zed/pch.hpp) target_link_libraries(tag_detector_nodelet PRIVATE ${OpenCV_LIBRARIES} lie) + add_cpp_nodelet_macro(long_range_tag_detector_nodelet "src/perception/tag_detector/long_range/*.cpp" "src/perception/tag_detector/long_range") + target_precompile_headers(tag_detector_nodelet PRIVATE src/perception/tag_detector/long_range/pch.hpp) + target_link_libraries(long_range_tag_detector_nodelet PRIVATE ${OpenCV_LIBRARIES} lie) + add_cpp_node_macro(brushed_motors "src/esw/brushed_motors/*.cpp") add_cpp_library_macro(lie "src/util/lie/*.cpp" "src/util/lie") diff --git a/src/perception/tag_detector/long_range/long_range_tag_detector.cpp b/src/perception/tag_detector/long_range/long_range_tag_detector.cpp index cec4a48b6..6b2eececd 100644 --- a/src/perception/tag_detector/long_range/long_range_tag_detector.cpp +++ b/src/perception/tag_detector/long_range/long_range_tag_detector.cpp @@ -1,4 +1,4 @@ -#include "tag_detector.hpp" +#include "long_range_tag_detector.hpp" namespace mrover { @@ -90,7 +90,7 @@ namespace mrover { NODELET_INFO("Tag detection ready, use odom frame: %s, min hit count: %d, max hit count: %d, hit increment weight: %d, hit decrement weight: %d", mUseOdom ? "true" : "false", mMinHitCountBeforePublish, mMaxHitCount, mTagIncrementWeight, mTagDecrementWeight); } - void TagDetectorNodelet::configCallback(mrover::DetectorParamsConfig& config, uint32_t level) { + void LongRangeTagDetectorNodelet::configCallback(mrover::DetectorParamsConfig& config, uint32_t level) { // Don't load initial config, since it will overwrite the rosparam settings if (level == std::numeric_limits::max()) return; diff --git a/src/perception/tag_detector/long_range/long_range_tag_detector.hpp b/src/perception/tag_detector/long_range/long_range_tag_detector.hpp index 4dc90f58e..ae5a7a77f 100644 --- a/src/perception/tag_detector/long_range/long_range_tag_detector.hpp +++ b/src/perception/tag_detector/long_range/long_range_tag_detector.hpp @@ -1,3 +1,4 @@ +#include "pch.hpp" #include "mrover/LongRangeTag.h" #include "mrover/LongRangeTags.h" @@ -96,10 +97,10 @@ namespace mrover { /** * @see updateNewlyIdentifiedTags() * @param tagId - the tagId of the current tag - * @param tagCorners - Reference to the mTagCorners vector of Point2fs for the current id of the lrt being created + * @param tag_bearing - Bearing of the current tag * @return a new LongRangeTag */ - LongRangeTagStruct createLrt(int tagId, std::vector& tagCorners); + LongRangeTagStruct createLrt(int tagId, float bearing); /** * @see getNormedTagCenter @@ -121,6 +122,14 @@ namespace mrover { cv::Point2f getNormedTagCenterOffset(std::vector& tagCorners) const; + /** + * Given the known tag information and the ZED FOV (hard code for now, we'll + * assign params later), calculate relative bearing of a detected tag + * @param tagCorners reference to tag corners, passed to @see getTagCenterPixels + * @return float of tag bearing + */ + float getTagBearing(std::vector& tagCorners) const; + /** * @see updateHitCounts() * Helper function used in updateHitCounts diff --git a/src/perception/tag_detector/long_range/long_range_tag_detector.processing.cpp b/src/perception/tag_detector/long_range/long_range_tag_detector.processing.cpp index 63975f713..4b69cd108 100644 --- a/src/perception/tag_detector/long_range/long_range_tag_detector.processing.cpp +++ b/src/perception/tag_detector/long_range/long_range_tag_detector.processing.cpp @@ -1,6 +1,5 @@ #include "long_range_tag_detector.hpp" -#include "../point.hpp" //Might not actually need?' #include "mrover/LongRangeTags.h" #include @@ -90,6 +89,7 @@ namespace mrover { } LongRangeTagStruct LongRangeTagDetectorNodelet::createLrt(int tagId, std::vector& tagCorners) { + //TODO: Modify the LRT struct and this function to assign a bearing instead of an imageCenter LongRangeTagStruct lrt; lrt.hitCount = mBaseHitCount; //Start at base hit count value @@ -155,6 +155,12 @@ namespace mrover { return offsetCenterPoint; } + float getTagBearing(std::vector& tagCorners) const { + //TODO: Implement me! + + return {}; + } + void LongRangeTagDetectorNodelet::publishThresholdTags() { //Loop through all the tags LongRangeTags tagsMessage; // diff --git a/src/perception/tag_detector/zed/tag_detector.processing.cpp b/src/perception/tag_detector/zed/tag_detector.processing.cpp index b1033d61d..4dc8649bf 100644 --- a/src/perception/tag_detector/zed/tag_detector.processing.cpp +++ b/src/perception/tag_detector/zed/tag_detector.processing.cpp @@ -1,6 +1,6 @@ #include "tag_detector.hpp" -#include "../point.hpp" +#include "../../point.hpp" namespace mrover { From 488d9f70cde2ab69ee8c407a682e3ba23db6c6d2 Mon Sep 17 00:00:00 2001 From: umroverPerception Date: Wed, 25 Oct 2023 15:31:47 -0400 Subject: [PATCH 020/126] fix cmake and bearing starter code --- CMakeLists.txt | 14 ++++++-------- .../long_range_cam/long_range_tag_detector.hpp | 4 ++-- .../long_range_tag_detector.processing.cpp | 2 +- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1a1d0398c..66adc8ee6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -235,20 +235,18 @@ else () # define an add and link macro # Put items here to build macro(add_and_link_macro) - add_cpp_nodelet_macro(tag_detector_nodelet "src/perception/tag_detector/zed/*.cpp" src/perception/tag_detector/zed) + add_cpp_nodelet_macro(tag_detector_nodelet src/perception/tag_detector/zed/*.cpp src/perception/tag_detector/zed) target_precompile_headers(tag_detector_nodelet PRIVATE src/perception/tag_detector/zed/pch.hpp) target_link_libraries(tag_detector_nodelet PRIVATE ${OpenCV_LIBRARIES} opencv_core opencv_objdetect opencv_aruco opencv_imgproc lie) - add_cpp_nodelet_macro(long_range_tag_detector_nodelet "src/perception/tag_detector/long_range/*.cpp" src/perception/tag_detector/long_range) - target_precompile_headers(tag_detector_nodelet PRIVATE src/perception/tag_detector/long_range/pch.hpp) - target_link_libraries(long_range_tag_detector_nodelet PRIVATE ${OpenCV_LIBRARIES} opencv_core opencv_objdetect opencv_aruco opencv_imgproc lie) - - add_cpp_node_macro(brushed_motors "src/esw/brushed_motors/*.cpp") - add_cpp_nodelet_macro(long_range_tag_detector_nodelet src/perception/tag_detector/long_range_cam/*.cpp src/perception/tag_detector/long_range_cam) - target_precompile_headers(long_range_tag_detector_nodelet PRIVATE src/perception/tag_detector/long_range_cam/pch.hpp) + target_precompile_headers(tag_detector_nodelet PRIVATE src/perception/tag_detector/long_range_cam/pch.hpp) target_link_libraries(long_range_tag_detector_nodelet PRIVATE ${OpenCV_LIBRARIES} lie) + add_cpp_nodelet_macro(long_range_cam_nodelet src/perception/long_range_cam/*.cpp src/perception/long_range_cam) + target_precompile_headers(long_range_cam_nodelet PRIVATE src/perception/long_range_cam/pch.hpp) + target_link_libraries(long_range_cam_nodelet PRIVATE ${OpenCV_LIBRARIES} lie) + add_cpp_library_macro(lie src/util/lie/*.cpp src/util/lie) add_cpp_interface_library_macro(moteus deps/moteus/lib/cpp/mjbots) diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.hpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.hpp index 28af9b33d..0278b8efb 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.hpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.hpp @@ -1,6 +1,6 @@ -#include "pch.hpp" #include "mrover/LongRangeTag.h" #include "mrover/LongRangeTags.h" +#include "pch.hpp" #include #include @@ -102,7 +102,7 @@ namespace mrover { * @param tag_bearing - Bearing of the current tag * @return a new LongRangeTag */ - LongRangeTagStruct createLrt(int tagId, float bearing); + LongRangeTagStruct createLrt(int tagId, std::vector& tagCorners); /** * @see getNormedTagCenter diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp index 4b69cd108..44f71678d 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp @@ -155,7 +155,7 @@ namespace mrover { return offsetCenterPoint; } - float getTagBearing(std::vector& tagCorners) const { + float LongRangeTagDetectorNodelet::getTagBearing(std::vector& tagCorners) const { //TODO: Implement me! return {}; From d3c6d5028d1892f0cf6d05d75d89e201f76feae8 Mon Sep 17 00:00:00 2001 From: umroverPerception Date: Wed, 25 Oct 2023 22:04:23 -0400 Subject: [PATCH 021/126] add nodelet plugins --- .gitignore | 1 - package.xml | 9 ++++++--- plugins/long_range_cam_plugin.xml | 6 ++++++ plugins/long_range_tag_detector_plugin.xml | 6 ++++++ plugins/tag_detector_plugin.xml | 6 +++--- src/perception/long_range_cam/long_range_cam.cpp | 4 +--- src/perception/long_range_cam/long_range_cam.hpp | 2 ++ 7 files changed, 24 insertions(+), 10 deletions(-) create mode 100644 plugins/long_range_cam_plugin.xml create mode 100644 plugins/long_range_tag_detector_plugin.xml diff --git a/.gitignore b/.gitignore index 0f4fb421a..c12791555 100644 --- a/.gitignore +++ b/.gitignore @@ -42,5 +42,4 @@ RemoteSystemsTempFiles/ *.chm *.settings.xml -*.xml *.launch diff --git a/package.xml b/package.xml index 87ca4865b..8b6a46a9c 100644 --- a/package.xml +++ b/package.xml @@ -87,7 +87,10 @@ - - + + + + + - + \ No newline at end of file diff --git a/plugins/long_range_cam_plugin.xml b/plugins/long_range_cam_plugin.xml new file mode 100644 index 000000000..e717701de --- /dev/null +++ b/plugins/long_range_cam_plugin.xml @@ -0,0 +1,6 @@ + + + + \ No newline at end of file diff --git a/plugins/long_range_tag_detector_plugin.xml b/plugins/long_range_tag_detector_plugin.xml new file mode 100644 index 000000000..6a7d82109 --- /dev/null +++ b/plugins/long_range_tag_detector_plugin.xml @@ -0,0 +1,6 @@ + + + + \ No newline at end of file diff --git a/plugins/tag_detector_plugin.xml b/plugins/tag_detector_plugin.xml index 7f9242490..bd6238412 100644 --- a/plugins/tag_detector_plugin.xml +++ b/plugins/tag_detector_plugin.xml @@ -1,6 +1,6 @@ + type="mrover::TagDetectorNodelet" + base_class_type="nodelet::Nodelet"> - + \ No newline at end of file diff --git a/src/perception/long_range_cam/long_range_cam.cpp b/src/perception/long_range_cam/long_range_cam.cpp index 857e3c2a8..25f3de4ec 100644 --- a/src/perception/long_range_cam/long_range_cam.cpp +++ b/src/perception/long_range_cam/long_range_cam.cpp @@ -65,7 +65,5 @@ int main(int argc, char** argv) { return EXIT_SUCCESS; } -#ifdef MROVER_IS_NODELET #include -PLUGINLIB_EXPORT_CLASS(mrover::ZedNodelet, nodelet::Nodelet) -#endif +PLUGINLIB_EXPORT_CLASS(mrover::LongRangeCamNodelet, nodelet::Nodelet) diff --git a/src/perception/long_range_cam/long_range_cam.hpp b/src/perception/long_range_cam/long_range_cam.hpp index e2f776fc9..6c63faaa3 100644 --- a/src/perception/long_range_cam/long_range_cam.hpp +++ b/src/perception/long_range_cam/long_range_cam.hpp @@ -1,5 +1,7 @@ #pragma once +#include "pch.hpp" + namespace mrover { class LongRangeCamNodelet : public nodelet::Nodelet { From 3d5e10be228964ddd9b45a8862d2be7c6340d8be Mon Sep 17 00:00:00 2001 From: umroverPerception Date: Mon, 30 Oct 2023 12:07:55 -0400 Subject: [PATCH 022/126] working greyscale stream with ros --- package.xml | 2 +- ...e_cam_plugin.xml => usb_camera_plugin.xml} | 2 +- .../long_range_cam/long_range_cam.cpp | 75 ++++++++++++++----- .../long_range_cam/long_range_cam.hpp | 7 +- 4 files changed, 64 insertions(+), 22 deletions(-) rename plugins/{long_range_cam_plugin.xml => usb_camera_plugin.xml} (76%) diff --git a/package.xml b/package.xml index 8b6a46a9c..3c865b925 100644 --- a/package.xml +++ b/package.xml @@ -90,7 +90,7 @@ - + \ No newline at end of file diff --git a/plugins/long_range_cam_plugin.xml b/plugins/usb_camera_plugin.xml similarity index 76% rename from plugins/long_range_cam_plugin.xml rename to plugins/usb_camera_plugin.xml index e717701de..98d1f99e2 100644 --- a/plugins/long_range_cam_plugin.xml +++ b/plugins/usb_camera_plugin.xml @@ -1,4 +1,4 @@ - + diff --git a/src/perception/long_range_cam/long_range_cam.cpp b/src/perception/long_range_cam/long_range_cam.cpp index c05cd8e3e..ab8a1fdfa 100644 --- a/src/perception/long_range_cam/long_range_cam.cpp +++ b/src/perception/long_range_cam/long_range_cam.cpp @@ -1,45 +1,83 @@ #include "long_range_cam.hpp" +#include +#include +#include +#include +#include +#include namespace mrover { using namespace std::chrono_literals; + // gst-launch-1.0 -e -v v4l2src device=/dev/video4 ! videoconvert ! videoscale ! videorate ! "video/x-raw,format=I420,width=640,height=480,framerate=30/1" ! x264enc key-int-max=15 ! rtph264pay ! udpsink host=localhost port=5000 // gst-launch-1.0 -v udpsrc uri=udp://localhost:5000 ! application/x-rtp,media=video,clock-rate=90000,encoding-name=H264,payload=96 ! rtph264depay ! avdec_h264 ! autovideosink /** - * @brief Load config, open the camera, and start our threads - */ + * @brief Load config, open the camera, and start our threads + */ void LongRangeCamNodelet::onInit() { try { - // See: http://wiki.ros.org/roscpp/Overview/NodeHandles for public vs. private node handle - // MT means multithreaded - mNh = getMTNodeHandle(); - mPnh = getMTPrivateNodeHandle(); - mCamInfoPub = mNh.advertise("long_range_cam/camera_info", 1); - mImgPub = mNh.advertise("long_range_cam/image", 1); + mGrabThread = std::jthread(&LongRangeCamNodelet::grabUpdate, this); + } catch (std::exception const& e) { + NODELET_FATAL("Exception while starting: %s", e.what()); + ros::shutdown(); + } + } + + void fillImageMessage(cv::Mat& grey, sensor_msgs::ImagePtr const& msg) { + // TODO: Uncomment this + // assert(img.channels() == 4); + assert(msg); - // TODO: Fix hardcoded components of this string - mCapture = cv::VideoCapture("v4l2src device=/dev/video4 ! videoconvert ! videoscale ! videorate ! 'video / x - raw, format = I420, width = 640, height = 480, framerate = 30 / 1' ! x264enc key-int-max=15 ! rtph264pay ! udpsink"); - cv::VideoWriter out = cv::VideoWriter("udpsrc ! application/x-rtp,media=video,clock-rate=90000,encoding-name=H264,payload=96 ! rtph264depay ! avdec_h264 ! autovideosink host=localhost port =5000", cv::CAP_GSTREAMER, 0, 30, cv::Size(640, 480), true); + msg->height = grey.rows; + msg->width = grey.cols; + msg->encoding = sensor_msgs::image_encodings::MONO8; + msg->step = grey.step[0]; + msg->is_bigendian = __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__; + auto* imgPtr = grey.ptr(0); + size_t size = msg->step * msg->height; + msg->data.resize(size); + std::cout << "data size " << msg->data.size() << std::endl; + std::cout << "size obj " << size << std::endl; + memcpy(msg->data.data(), imgPtr, size); + } + + void LongRangeCamNodelet::grabUpdate() { + // See: http://wiki.ros.org/roscpp/Overview/NodeHandles for public vs. private node handle + // MT means multithreaded + mNh = getMTNodeHandle(); + mPnh = getMTPrivateNodeHandle(); + mCamInfoPub = mNh.advertise("long_range_cam/camera_info", 1); + mImgPub = mNh.advertise("long_range_cam/image", 1); + // While dtor not called + while (ros::ok()) { + cv::VideoCapture mCapture{std::format("v4l2src device=/dev/video4 ! videoconvert ! video/x-raw,width={},height={},format=I420,framerate={}/1 ! appsink", 1920, 1080, 5), cv::CAP_GSTREAMER}; if (!mCapture.isOpened()) { throw std::runtime_error("Long range cam failed to open"); } - ROS_ERROR("NODELET LOADED"); cv::Mat frame; while (true) { mCapture.read(frame); if (frame.empty()) { break; } - out.write(frame); cv::imshow("Sender", frame); + if (mImgPub.getNumSubscribers()) { + auto imgMsg = boost::make_shared(); + cv::Mat grey; + cv::cvtColor(frame, grey, cv::COLOR_YUV2GRAY_I420); + fillImageMessage(grey, imgMsg); + imgMsg->header.frame_id = "long_range_cam_frame"; + imgMsg->header.stamp = ros::Time::now(); + imgMsg->header.seq = mGrabUpdateTick; + mImgPub.publish(imgMsg); + } if (cv::waitKey(1) == 's') { break; } + ++mGrabUpdateTick; } - } catch (std::exception const& e) { - NODELET_FATAL("Exception while starting: %s", e.what()); - ros::shutdown(); } } @@ -50,10 +88,11 @@ namespace mrover { } // namespace mrover int main(int argc, char** argv) { - ros::init(argc, argv, "long_range_cam"); + ros::init(argc, argv, "usb_camera"); // Start the ZED Nodelet nodelet::Loader nodelet; + std::cout << ros::this_node::getName() << std::endl; nodelet.load(ros::this_node::getName(), "mrover/LongRangeCamNodelet", ros::names::getRemappings(), {}); ros::spin(); @@ -61,5 +100,7 @@ int main(int argc, char** argv) { return EXIT_SUCCESS; } +#ifdef MROVER_IS_NODELET #include PLUGINLIB_EXPORT_CLASS(mrover::LongRangeCamNodelet, nodelet::Nodelet) +#endif diff --git a/src/perception/long_range_cam/long_range_cam.hpp b/src/perception/long_range_cam/long_range_cam.hpp index c8ca73ee8..2de870686 100644 --- a/src/perception/long_range_cam/long_range_cam.hpp +++ b/src/perception/long_range_cam/long_range_cam.hpp @@ -14,7 +14,7 @@ namespace mrover { ros::Publisher mCamInfoPub; ros::Publisher mImgPub; - std::thread mReadThread; + std::jthread mGrabThread; std::mutex mSwapMutex; boost::condition_variable mSwapCv; std::atomic_bool mIsSwapReady = false; @@ -30,8 +30,9 @@ namespace mrover { ~LongRangeCamNodelet() override; - void readUpdate(); - void imagePubUpdate(); + void grabUpdate(); }; + void fillImageMessage(cv::Mat& grey, sensor_msgs::ImagePtr const& msg); + } // namespace mrover From 22036a99835284cc4e089ad176af01a30313dea3 Mon Sep 17 00:00:00 2001 From: umroverPerception Date: Mon, 30 Oct 2023 16:27:55 -0400 Subject: [PATCH 023/126] switch image type from grey -> bgra --- .../long_range_cam/long_range_cam.cpp | 18 +++++++++--------- .../long_range_cam/long_range_cam.hpp | 2 +- .../long_range_cam/long_range_tag_detector.cpp | 2 ++ .../long_range_cam/long_range_tag_detector.hpp | 10 ++++++---- .../long_range_tag_detector.processing.cpp | 16 ++++++++++------ 5 files changed, 28 insertions(+), 20 deletions(-) diff --git a/src/perception/long_range_cam/long_range_cam.cpp b/src/perception/long_range_cam/long_range_cam.cpp index ab8a1fdfa..8720f9657 100644 --- a/src/perception/long_range_cam/long_range_cam.cpp +++ b/src/perception/long_range_cam/long_range_cam.cpp @@ -25,17 +25,17 @@ namespace mrover { } } - void fillImageMessage(cv::Mat& grey, sensor_msgs::ImagePtr const& msg) { + void fillImageMessage(cv::Mat& bgra, sensor_msgs::ImagePtr const& msg) { // TODO: Uncomment this // assert(img.channels() == 4); assert(msg); - msg->height = grey.rows; - msg->width = grey.cols; - msg->encoding = sensor_msgs::image_encodings::MONO8; - msg->step = grey.step[0]; + msg->height = bgra.rows; + msg->width = bgra.cols; + msg->encoding = sensor_msgs::image_encodings::BGRA8; + msg->step = bgra.step[0]; msg->is_bigendian = __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__; - auto* imgPtr = grey.ptr(0); + auto* imgPtr = bgra.ptr(0); size_t size = msg->step * msg->height; msg->data.resize(size); std::cout << "data size " << msg->data.size() << std::endl; @@ -65,9 +65,9 @@ namespace mrover { cv::imshow("Sender", frame); if (mImgPub.getNumSubscribers()) { auto imgMsg = boost::make_shared(); - cv::Mat grey; - cv::cvtColor(frame, grey, cv::COLOR_YUV2GRAY_I420); - fillImageMessage(grey, imgMsg); + cv::Mat bgra; + cv::cvtColor(frame, bgra, cv::COLOR_YUV2BGRA_I420); + fillImageMessage(bgra, imgMsg); imgMsg->header.frame_id = "long_range_cam_frame"; imgMsg->header.stamp = ros::Time::now(); imgMsg->header.seq = mGrabUpdateTick; diff --git a/src/perception/long_range_cam/long_range_cam.hpp b/src/perception/long_range_cam/long_range_cam.hpp index 2de870686..8c8141534 100644 --- a/src/perception/long_range_cam/long_range_cam.hpp +++ b/src/perception/long_range_cam/long_range_cam.hpp @@ -33,6 +33,6 @@ namespace mrover { void grabUpdate(); }; - void fillImageMessage(cv::Mat& grey, sensor_msgs::ImagePtr const& msg); + void fillImageMessage(cv::Mat& bgra, sensor_msgs::ImagePtr const& msg); } // namespace mrover diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp index b31eb97fe..4b34c8faa 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp @@ -16,6 +16,8 @@ namespace mrover { mPnh.param("tag_decrement_weight", mTagDecrementWeight, 1); mImgPub = mNh.advertise("long_range_tag_detection", 1); + + mImgSub = mNh.subscribe("long_range_cam/image", 1, &LongRangeTagDetectorNodelet::imageCallback, this); mServiceEnableDetections = mNh.advertiseService("enable_detections", &LongRangeTagDetectorNodelet::enableDetectionsCallback, this); // Lambda handles passing class pointer (implicit first parameter) to configCallback diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.hpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.hpp index c7972500d..8e9160494 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.hpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.hpp @@ -1,4 +1,5 @@ #include "pch.hpp" +#include namespace mrover { @@ -21,7 +22,8 @@ namespace mrover { //Publishing Flags bool mEnableDetections = true; - bool mPublishImages{}; // If set, we publish the images with the tags drawn on top + // TODO: change this to param + bool mPublishImages = true; // If set, we publish the images with the tags drawn on top //Constatns set in ROS params for num hits needed to publish int mMinHitCountBeforePublish{}; @@ -53,7 +55,7 @@ namespace mrover { std::optional mPrevDetectedCount; // Log spam prevention dynamic_reconfigure::Server mConfigServer; dynamic_reconfigure::Server::CallbackType mCallbackType; - LoopProfiler mProfiler{"Long RangeTag Detector"}; + LoopProfiler mProfiler{"Long Range Tag Detector"}; std::unordered_map mThreshPubs; // Map from threshold scale to publisher ros::ServiceServer mServiceEnableDetections; bool mUseOdom{}; @@ -66,7 +68,7 @@ namespace mrover { * 1. Updates mImg to store the underlying image matrix * @param msg image message */ - void imageCallback(sensor_msgs::Image const& msg); + void imageCallback(sensor_msgs::ImageConstPtr const& msg); /** * Stores msg to mImgMsg @@ -74,7 +76,7 @@ namespace mrover { * * @param msg Current Image Message */ - void updateImageMatrices(sensor_msgs::Image const& msg); + void updateImageMatrices(sensor_msgs::ImageConstPtr const& msg); /** * Runs the cv::aruco tag detection on the cv::mat diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp index 5a6e1fb4b..582d5761a 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp @@ -1,4 +1,6 @@ #include "long_range_tag_detector.hpp" +#include +#include namespace mrover { @@ -7,7 +9,7 @@ namespace mrover { * * @param msg image message */ - void LongRangeTagDetectorNodelet::imageCallback(sensor_msgs::Image const& msg) { + void LongRangeTagDetectorNodelet::imageCallback(sensor_msgs::ImageConstPtr const& msg) { if (!mEnableDetections) return; //Store image contents to member variables @@ -33,14 +35,16 @@ namespace mrover { //HELPER FUNCTIONS - void LongRangeTagDetectorNodelet::updateImageMatrices(sensor_msgs::Image const& msg) { + void LongRangeTagDetectorNodelet::updateImageMatrices(sensor_msgs::ImageConstPtr const& msg) { + assert(msg); + assert(msg->height > 0); + assert(msg->width > 0); //Store image message to mImgMsg member variable - mImgMsg = msg; - - cv::Mat cvImage{static_cast(msg.height), static_cast(msg.width), CV_8UC3, const_cast(msg.data.data())}; + mImgMsg = *msg; + mImg = cv::Mat{static_cast(msg->height), static_cast(msg->width), CV_8UC4, const_cast(msg->data.data())}; //Store to mImg member variable - unsure if necessary but need it to persist - cvImage.copyTo(mImg); + // cvImage.copyTo(mImg); // TODO: Set the grayImage if neccesary } From ac14193813cad0bd71b07617d71b64524347c6b2 Mon Sep 17 00:00:00 2001 From: lorraine Date: Thu, 2 Nov 2023 19:55:54 -0400 Subject: [PATCH 024/126] implemented part of getTagBearing() --- .../long_range_cam/long_range_tag_detector.cpp | 2 +- .../long_range_cam/long_range_tag_detector.hpp | 2 +- .../long_range_tag_detector.processing.cpp | 14 ++++++++++---- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp index 4b34c8faa..03252cb8b 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp @@ -8,7 +8,7 @@ namespace mrover { mDetectorParams = new cv::aruco::DetectorParameters(); auto defaultDetectorParams = cv::makePtr(); int dictionaryNumber; - + mPnh.param("publish_images", mPublishImages, true); mPnh.param("min_hit_count_before_publish", mMinHitCountBeforePublish, 5); mPnh.param("max_hit_count", mMaxHitCount, 5); diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.hpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.hpp index 8e9160494..4826554a8 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.hpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.hpp @@ -25,7 +25,7 @@ namespace mrover { // TODO: change this to param bool mPublishImages = true; // If set, we publish the images with the tags drawn on top - //Constatns set in ROS params for num hits needed to publish + //Constants set in ROS params for num hits needed to publish int mMinHitCountBeforePublish{}; int mBaseHitCount{}; int mMaxHitCount{}; diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp index 582d5761a..9b9bb2ef3 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp @@ -141,7 +141,7 @@ namespace mrover { return centerPoint; } - cv::Point2f LongRangeTagDetectorNodelet::getNormedTagCenterOffset(std::vector& tagCorners) const { + cv::Poinjt2f LongRangeTagDetectorNodelet::getNormedTagCenterOffset(std::vector& tagCorners) const { cv::Point2f offsetCenterPoint = getTagCenterOffsetPixels(tagCorners); offsetCenterPoint.x /= static_cast(mImgMsg.width); @@ -151,9 +151,15 @@ namespace mrover { } float LongRangeTagDetectorNodelet::getTagBearing(std::vector& tagCorners) const { - //TODO: Implement me! - - return {}; + //TODO: FINISH + //for HD720 resolution + pair center = getTagCenterPixels(tagCorners); + float x = center.first; + int width = //get width of the image + int angleOfFOV = 104; + float bearing = ((x - width / 2) / width) * angleOfFOV; + + return bearing; } void LongRangeTagDetectorNodelet::publishThresholdTags() { From 28fc946260a7349417c07992438bf57f6164bdb8 Mon Sep 17 00:00:00 2001 From: umroverPerception Date: Mon, 6 Nov 2023 11:13:25 -0500 Subject: [PATCH 025/126] fixed segfaults, image wrong format --- .../long_range_tag_detector.cpp | 3 +- .../long_range_tag_detector.processing.cpp | 55 +++++++++++-------- 2 files changed, 34 insertions(+), 24 deletions(-) diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp index 03252cb8b..b77195acb 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp @@ -8,7 +8,7 @@ namespace mrover { mDetectorParams = new cv::aruco::DetectorParameters(); auto defaultDetectorParams = cv::makePtr(); int dictionaryNumber; - + mPnh.param("publish_images", mPublishImages, true); mPnh.param("min_hit_count_before_publish", mMinHitCountBeforePublish, 5); mPnh.param("max_hit_count", mMaxHitCount, 5); @@ -18,6 +18,7 @@ namespace mrover { mImgPub = mNh.advertise("long_range_tag_detection", 1); mImgSub = mNh.subscribe("long_range_cam/image", 1, &LongRangeTagDetectorNodelet::imageCallback, this); + mDictionary = cv::makePtr(cv::aruco::getPredefinedDictionary(dictionaryNumber)); mServiceEnableDetections = mNh.advertiseService("enable_detections", &LongRangeTagDetectorNodelet::enableDetectionsCallback, this); // Lambda handles passing class pointer (implicit first parameter) to configCallback diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp index 9b9bb2ef3..b80c02750 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp @@ -28,7 +28,7 @@ namespace mrover { } //Publish all tags that meet threshold - publishThresholdTags(); + // publishThresholdTags(); //PUBLISH TAGS } @@ -40,16 +40,16 @@ namespace mrover { assert(msg->height > 0); assert(msg->width > 0); //Store image message to mImgMsg member variable - mImgMsg = *msg; - mImg = cv::Mat{static_cast(msg->height), static_cast(msg->width), CV_8UC4, const_cast(msg->data.data())}; + cv::Mat cvImage = cv::Mat{static_cast(msg->height), static_cast(msg->width), CV_8UC3, const_cast(msg->data.data())}; //Store to mImg member variable - unsure if necessary but need it to persist - // cvImage.copyTo(mImg); + cvImage.copyTo(mImg); // TODO: Set the grayImage if neccesary } void LongRangeTagDetectorNodelet::runTagDetection() { + std::cout << mDetectorParams->adaptiveThreshConstant << std::endl; cv::aruco::detectMarkers(mImg, mDictionary, mImmediateCorners, mImmediateIds, mDetectorParams); } @@ -141,7 +141,7 @@ namespace mrover { return centerPoint; } - cv::Poinjt2f LongRangeTagDetectorNodelet::getNormedTagCenterOffset(std::vector& tagCorners) const { + cv::Point2f LongRangeTagDetectorNodelet::getNormedTagCenterOffset(std::vector& tagCorners) const { cv::Point2f offsetCenterPoint = getTagCenterOffsetPixels(tagCorners); offsetCenterPoint.x /= static_cast(mImgMsg.width); @@ -151,15 +151,16 @@ namespace mrover { } float LongRangeTagDetectorNodelet::getTagBearing(std::vector& tagCorners) const { - //TODO: FINISH - //for HD720 resolution - pair center = getTagCenterPixels(tagCorners); - float x = center.first; - int width = //get width of the image - int angleOfFOV = 104; - float bearing = ((x - width / 2) / width) * angleOfFOV; - - return bearing; + // //TODO: FINISH + // //for HD720 resolution + // pair center = getTagCenterPixels(tagCorners); + // float x = center.first; + // int width = //get width of the image + // int angleOfFOV = 104; + // float bearing = ((x - width / 2) / width) * angleOfFOV; + + // return bearing; + return {}; } void LongRangeTagDetectorNodelet::publishThresholdTags() { @@ -187,15 +188,23 @@ namespace mrover { } void LongRangeTagDetectorNodelet::publishTagsOnImage() { - cv::Mat markedImage; - mImg.copyTo(markedImage); - - cv::aruco::drawDetectedMarkers(markedImage, mImmediateCorners); - - sensor_msgs::Image imgMsgOut = mImgMsg; - imgMsgOut.data = markedImage; - - mImgPub.publish(imgMsgOut); + // cv::Mat markedImage; + // markedImage.copyTo(mImg); + // std::cout << markedImage.total() << ", " << markedImage.channels() << std::endl; + + cv::aruco::drawDetectedMarkers(mImg, mImmediateCorners, mImmediateIds); + mImgMsg.header.seq = mSeqNum; + mImgMsg.header.stamp = ros::Time::now(); + mImgMsg.header.frame_id = "long_range_cam_frame"; + mImgMsg.height = mImg.rows; + mImgMsg.width = mImg.cols; + mImgMsg.encoding = sensor_msgs::image_encodings::BGR8; + mImgMsg.step = mImg.step; + mImgMsg.is_bigendian = __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__; + size_t size = mImgMsg.step * mImgMsg.height; + mImgMsg.data.resize(size); + std::uninitialized_copy(std::execution::par_unseq, mImg.data, mImg.data + size, mImgMsg.data.begin()); + mImgPub.publish(mImgMsg); } } // namespace mrover \ No newline at end of file From 0a4db393b61f35ecbeb92571b5e72e0d7942ef16 Mon Sep 17 00:00:00 2001 From: umroverPerception Date: Mon, 6 Nov 2023 11:30:13 -0500 Subject: [PATCH 026/126] tag detector works --- src/perception/long_range_cam/long_range_cam.cpp | 4 ++-- .../long_range_cam/long_range_tag_detector.processing.cpp | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/perception/long_range_cam/long_range_cam.cpp b/src/perception/long_range_cam/long_range_cam.cpp index 8720f9657..8b7fde3eb 100644 --- a/src/perception/long_range_cam/long_range_cam.cpp +++ b/src/perception/long_range_cam/long_range_cam.cpp @@ -32,7 +32,7 @@ namespace mrover { msg->height = bgra.rows; msg->width = bgra.cols; - msg->encoding = sensor_msgs::image_encodings::BGRA8; + msg->encoding = sensor_msgs::image_encodings::BGR8; msg->step = bgra.step[0]; msg->is_bigendian = __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__; auto* imgPtr = bgra.ptr(0); @@ -66,7 +66,7 @@ namespace mrover { if (mImgPub.getNumSubscribers()) { auto imgMsg = boost::make_shared(); cv::Mat bgra; - cv::cvtColor(frame, bgra, cv::COLOR_YUV2BGRA_I420); + cv::cvtColor(frame, bgra, cv::COLOR_YUV2BGR_I420); fillImageMessage(bgra, imgMsg); imgMsg->header.frame_id = "long_range_cam_frame"; imgMsg->header.stamp = ros::Time::now(); diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp index b80c02750..fd582b3ab 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp @@ -24,7 +24,7 @@ namespace mrover { // 3. We only want to publish the tags if the topic has subscribers if (mPublishImages && mImgPub.getNumSubscribers()) { // Draw the tags on the image using OpenCV - publishTagsOnImage(); + // publishTagsOnImage(); } //Publish all tags that meet threshold @@ -45,6 +45,7 @@ namespace mrover { //Store to mImg member variable - unsure if necessary but need it to persist cvImage.copyTo(mImg); + publishTagsOnImage(); // TODO: Set the grayImage if neccesary } From cccf7b0249d7d36387772da3410d144185eddb7b Mon Sep 17 00:00:00 2001 From: lorraine Date: Sun, 12 Nov 2023 13:59:15 -0500 Subject: [PATCH 027/126] builds correctly now --- .../long_range_tag_detector.processing.cpp | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp index fd582b3ab..f42653e6c 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp @@ -152,20 +152,17 @@ namespace mrover { } float LongRangeTagDetectorNodelet::getTagBearing(std::vector& tagCorners) const { - // //TODO: FINISH - // //for HD720 resolution - // pair center = getTagCenterPixels(tagCorners); - // float x = center.first; - // int width = //get width of the image - // int angleOfFOV = 104; - // float bearing = ((x - width / 2) / width) * angleOfFOV; - - // return bearing; - return {}; + //for HD720 resolution + cv::Point2f center = getTagCenterPixels(tagCorners); + float width = 2 * center.x; + float angleOfFOV = 104; + float bearing = ((center.x - width / 2) / width) * angleOfFOV; + + return bearing; } void LongRangeTagDetectorNodelet::publishThresholdTags() { - //Loop through all the tags + //Loop through all the tagsj LongRangeTags tagsMessage; // for (auto& tag: mTags) { From 23758747d728041b96e2865556204c0f073436e3 Mon Sep 17 00:00:00 2001 From: umroverPerception Date: Tue, 14 Nov 2023 18:19:56 -0500 Subject: [PATCH 028/126] add launch --- launch/long_range_cam.launch | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 launch/long_range_cam.launch diff --git a/launch/long_range_cam.launch b/launch/long_range_cam.launch new file mode 100644 index 000000000..be21fde31 --- /dev/null +++ b/launch/long_range_cam.launch @@ -0,0 +1,19 @@ + + + + + + + + + \ No newline at end of file From f996b2d1ba959187005edfb6af99d330df3950fb Mon Sep 17 00:00:00 2001 From: Emerson-Ramey Date: Sun, 19 Nov 2023 06:48:34 +0000 Subject: [PATCH 029/126] initial files --- src/navigation/approach_object.py | 36 +++++++++++++++ src/navigation/approach_post.py | 32 ++++++++++++- src/navigation/approach_target_base.py | 63 ++++++++++++++++++++++++++ src/navigation/long_range.py | 37 +++++++++++++++ 4 files changed, 167 insertions(+), 1 deletion(-) create mode 100644 src/navigation/approach_object.py create mode 100644 src/navigation/approach_target_base.py create mode 100644 src/navigation/long_range.py diff --git a/src/navigation/approach_object.py b/src/navigation/approach_object.py new file mode 100644 index 000000000..a53eaca46 --- /dev/null +++ b/src/navigation/approach_object.py @@ -0,0 +1,36 @@ +import tf2_ros + +from util.ros_utils import get_rosparam +from util.state_lib.state import State +from typing import Optional + +from navigation import state +from navigation.approach_target_base import ApproachTargetBaseState + + +class ApproachObjectState(ApproachTargetBaseState): + """ + State for when we see an object (rubber mallet or water bottle). + We are only using the ZED camera. + Transitions: + -If arrived at target: DoneState + -Did not arrive at target: ApproachObjectState + -Arrived at the waypoint where the fiducial should be but have not seen it yet: SearchState + -Stuck? + """ + + def on_enter(self, context): + pass + + def on_exit(self, context): + pass + + def get_target_pos(self, context) -> Optional[int]: + object_pos = None # TODO: replace + return object_pos + + def determine_next(self, context, finished: bool) -> State: + if finished: + return state.DoneState() + else: + return self diff --git a/src/navigation/approach_post.py b/src/navigation/approach_post.py index b3ebfb676..a4c8d45c7 100644 --- a/src/navigation/approach_post.py +++ b/src/navigation/approach_post.py @@ -2,8 +2,38 @@ from util.ros_utils import get_rosparam from util.state_lib.state import State +from typing import Optional -from navigation import search, waypoint +from navigation import search, state, waypoint +from navigation.approach_target_base import ApproachTargetBaseState + + +class NewApproachPostState(ApproachTargetBaseState): + """ + State for when the tag is seen in the ZED camera. + Transitions: + -If arrived at target: DoneState + -Did not arrive at target: ApproachPostState + -Arrived at the waypoint where the fiducial should be but have not seen it yet: SearchState + -Stuck? + """ + + def on_enter(self, context): + pass + + def on_exit(self, context): + pass + + def get_target_pos(self, context) -> Optional[int]: + # return fid_pos, either position or None + fid_pos = context.env.current_fid_pos() + return fid_pos + + def determine_next(self, context, finished: bool) -> State: + if finished: + return state.DoneState() + else: + return self class ApproachPostState(State): diff --git a/src/navigation/approach_target_base.py b/src/navigation/approach_target_base.py new file mode 100644 index 000000000..ab28f4b76 --- /dev/null +++ b/src/navigation/approach_target_base.py @@ -0,0 +1,63 @@ +import tf2_ros + +from util.ros_utils import get_rosparam +from util.state_lib.state import State +from abc import abstractmethod +from typing import Optional + +from navigation import search + + +class ApproachTargetBaseState(State): + STOP_THRESH = get_rosparam("single_fiducial/stop_thresh", 0.7) + FIDUCIAL_STOP_THRESHOLD = get_rosparam("single_fiducial/fiducial_stop_threshold", 1.75) + DRIVE_FWD_THRESH = get_rosparam("waypoint/drive_fwd_thresh", 0.34) # 20 degrees + + def on_enter(self, context) -> None: + pass + + def on_exit(self, context) -> None: + pass + + @abstractmethod + def get_target_pos(self, context) -> Optional[int]: + raise NotImplementedError + + @abstractmethod + def determine_next(self, context, finished: bool) -> State: + raise NotImplementedError + + def on_loop(self, context) -> State: + """ + Drive towards a target based on what gets returned from get_target_pos(). + Return to search if there is no target position. + :param context: rover context + :return: Next state + """ + target_pos = self.get_target_pos(context) + if target_pos is None: + return search.SearchState() + + try: + cmd_vel, arrived = context.rover.driver.get_drive_command( + target_pos, + context.rover.get_pose(in_odom_frame=True), + self.STOP_THRESH, + self.DRIVE_FWD_THRESH, + in_odom=context.use_odom, + ) + next_state = self.determine_next(arrived) + if arrived: + context.env.arrived_at_post = True # TODO: change post to target? + context.env.last_post_location = self.get_target_pos() # TODO: change post to target? + context.course.increment_waypoint() + else: + context.rover.send_drive_command(cmd_vel) + except ( + tf2_ros.LookupException, + tf2_ros.ConnectivityException, + tf2_ros.ExtrapolationException, + ): + pass + + return next_state diff --git a/src/navigation/long_range.py b/src/navigation/long_range.py new file mode 100644 index 000000000..fe8641d2e --- /dev/null +++ b/src/navigation/long_range.py @@ -0,0 +1,37 @@ +import tf2_ros + +from util.ros_utils import get_rosparam +from util.state_lib.state import State +from abc import abstractmethod +from typing import Optional + +from navigation import approach_post +from navigation.approach_target_base import ApproachTargetBaseState + + +class LongRangeState(ApproachTargetBaseState): + """ + State for when the tag is seen only in the long range camera. + Transitions: + -If tag seen in ZED: ApproachPostState + -Not seen in ZED and moved: LongRangeState + -Don't see the tag in long range camera: SearchState + -Stuck? + """ + + def on_enter(self, context): + pass + + def on_exit(self, context): + pass + + def get_target_pos(self, context) -> Optional[int]: + target_pos = None # TODO: replace + return target_pos + + def determine_next(self, context, finished: bool) -> State: + fid_pos = context.env.current_fid_pos() + if fid_pos is None: + return self + else: + return approach_post.ApproachPostState() From e7f7e9e268992529149da7cd1ef5e211d3cdbdd3 Mon Sep 17 00:00:00 2001 From: emmalinhart Date: Thu, 30 Nov 2023 20:03:18 -0500 Subject: [PATCH 030/126] camera shouldn't crash --- .../long_range_tag_detector.processing.cpp | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp index f42653e6c..117364047 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp @@ -24,11 +24,11 @@ namespace mrover { // 3. We only want to publish the tags if the topic has subscribers if (mPublishImages && mImgPub.getNumSubscribers()) { // Draw the tags on the image using OpenCV - // publishTagsOnImage(); + //publishTagsOnImage(); } //Publish all tags that meet threshold - // publishThresholdTags(); + //publishThresholdTags(); //PUBLISH TAGS } @@ -57,25 +57,29 @@ namespace mrover { void LongRangeTagDetectorNodelet::updateHitCounts() { //loop through all identified IDs - for (size_t i = 0; i < mImmediateIds.size(); i++) + for (size_t i = 0; i < mImmediateIds.size(); i++) { updateNewlyIdentifiedTags(i); + } //Now decrement all the hitcounts for tags that were not updated // Set updated status to false - - for (auto& mTag: mTags) { - LongRangeTagStruct& currentTag = mTag.second; - + for (auto it = mTags.begin(); it != mTags.end();) { + LongRangeTagStruct& currentTag = it->second; + std::cout << mTags.size() << "!!!" << std::endl; if (currentTag.updated) { currentTag.updated = false; + it++; } else { //Decrement weight of undetected tags currentTag.hitCount -= mTagDecrementWeight; //if the value has fallen belown the minimum, remove it if (currentTag.hitCount <= mTagRemoveWeight) { - mTags.erase(mTag.first); + std::cout << "erasing" << std::endl; + it = mTags.erase(it); + } else { + it++; } } } @@ -108,7 +112,6 @@ namespace mrover { //Key exist sin mTags lrt.hitCount = std::min(mTags[currentId].hitCount + mTagIncrementWeight, mMaxHitCount); } - mTags[currentId] = lrt; } From 4f17971f5f444c887b717e42cd466a42b11cf8d6 Mon Sep 17 00:00:00 2001 From: emmalinhart Date: Sun, 3 Dec 2023 08:55:17 -0500 Subject: [PATCH 031/126] updated tag bearing function --- .../long_range_tag_detector.processing.cpp | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp index 117364047..30f36cecb 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp @@ -59,14 +59,13 @@ namespace mrover { //loop through all identified IDs for (size_t i = 0; i < mImmediateIds.size(); i++) { updateNewlyIdentifiedTags(i); + std::cout << "bearing: " << getTagBearing(mImmediateCorners[i]) << "!!!" << std::endl; } - //Now decrement all the hitcounts for tags that were not updated // Set updated status to false for (auto it = mTags.begin(); it != mTags.end();) { LongRangeTagStruct& currentTag = it->second; - std::cout << mTags.size() << "!!!" << std::endl; if (currentTag.updated) { currentTag.updated = false; it++; @@ -76,7 +75,6 @@ namespace mrover { //if the value has fallen belown the minimum, remove it if (currentTag.hitCount <= mTagRemoveWeight) { - std::cout << "erasing" << std::endl; it = mTags.erase(it); } else { it++; @@ -157,9 +155,10 @@ namespace mrover { float LongRangeTagDetectorNodelet::getTagBearing(std::vector& tagCorners) const { //for HD720 resolution cv::Point2f center = getTagCenterPixels(tagCorners); - float width = 2 * center.x; - float angleOfFOV = 104; - float bearing = ((center.x - width / 2) / width) * angleOfFOV; + auto imageWidth = (float) mImgMsg.width; + std::cout << "width: " << imageWidth << std::endl; + float angleOfFOV = 101; + float bearing = ((center.x - (imageWidth / 2)) / imageWidth) * angleOfFOV; return bearing; } From 52d57636b729c9a0202aa14439b73cbcee65a8ca Mon Sep 17 00:00:00 2001 From: Emerson-Ramey Date: Tue, 9 Jan 2024 06:34:42 +0000 Subject: [PATCH 032/126] outline changes for new states --- src/navigation/approach_object.py | 2 +- src/navigation/approach_post.py | 8 ++++---- src/navigation/context.py | 32 +++++++++++++++++++++---------- src/navigation/long_range.py | 9 +++++++-- src/navigation/search.py | 14 ++++++++++++-- src/navigation/waypoint.py | 12 ++++++++++-- 6 files changed, 56 insertions(+), 21 deletions(-) diff --git a/src/navigation/approach_object.py b/src/navigation/approach_object.py index a53eaca46..400d627bc 100644 --- a/src/navigation/approach_object.py +++ b/src/navigation/approach_object.py @@ -26,7 +26,7 @@ def on_exit(self, context): pass def get_target_pos(self, context) -> Optional[int]: - object_pos = None # TODO: replace + object_pos = context.env.current_target_pos() return object_pos def determine_next(self, context, finished: bool) -> State: diff --git a/src/navigation/approach_post.py b/src/navigation/approach_post.py index a4c8d45c7..e4aa1ec6b 100644 --- a/src/navigation/approach_post.py +++ b/src/navigation/approach_post.py @@ -26,7 +26,7 @@ def on_exit(self, context): def get_target_pos(self, context) -> Optional[int]: # return fid_pos, either position or None - fid_pos = context.env.current_fid_pos() + fid_pos = context.env.current_target_pos() return fid_pos def determine_next(self, context, finished: bool) -> State: @@ -54,7 +54,7 @@ def on_loop(self, context) -> State: :param ud: State machine user data :return: Next state """ - fid_pos = context.env.current_fid_pos() + fid_pos = context.env.current_target_pos() if fid_pos is None: return search.SearchState() @@ -68,9 +68,9 @@ def on_loop(self, context) -> State: ) if arrived: context.env.arrived_at_post = True - context.env.last_post_location = context.env.current_fid_pos(odom_override=False) + context.env.last_post_location = context.env.current_target_pos(odom_override=False) context.course.increment_waypoint() - return waypoint.WaypointState() + return waypoint.WaypointState() # we want to go to done state now context.rover.send_drive_command(cmd_vel) except ( tf2_ros.LookupException, diff --git a/src/navigation/context.py b/src/navigation/context.py index 24468dd70..abcab5884 100644 --- a/src/navigation/context.py +++ b/src/navigation/context.py @@ -64,8 +64,9 @@ class Environment: NO_FIDUCIAL: ClassVar[int] = -1 arrived_at_post: bool = False last_post_location: Optional[np.ndarray] = None - - def get_fid_pos(self, fid_id: int, in_odom_frame: bool = True) -> Optional[np.ndarray]: + # TODO add dictionary for long range tag id : (time received, our hit counter, bearing) + + def get_target_pos(self, id: str, in_odom_frame: bool = True) -> Optional[np.ndarray]: """ Retrieves the pose of the given fiducial ID from the TF tree in the odom frame if in_odom_frame is True otherwise in the world frame @@ -73,12 +74,12 @@ def get_fid_pos(self, fid_id: int, in_odom_frame: bool = True) -> Optional[np.nd """ try: parent_frame = self.ctx.odom_frame if in_odom_frame else self.ctx.world_frame - fid_pose, time = SE3.from_tf_time( - self.ctx.tf_buffer, parent_frame=parent_frame, child_frame=f"fiducial{fid_id}" + target_pose, time = SE3.from_tf_time( + self.ctx.tf_buffer, parent_frame=parent_frame, child_frame=f"{id}" ) now = rospy.Time.now() if now.to_sec() - time.to_sec() >= TAG_EXPIRATION_TIME_SECONDS: - print(f"TAG EXPIRED {fid_id}!") + print(f"EXPIRED {id}!") return None except ( tf2_ros.LookupException, @@ -86,11 +87,11 @@ def get_fid_pos(self, fid_id: int, in_odom_frame: bool = True) -> Optional[np.nd tf2_ros.ExtrapolationException, ) as e: return None - return fid_pose.position - - def current_fid_pos(self, odom_override: bool = True) -> Optional[np.ndarray]: + return target_pose.position + + def current_target_pos(self, odom_override: bool = True) -> Optional[np.ndarray]: """ - Retrieves the position of the current fiducial (and we are looking for it) + Retrieves the position of the current fiducial or object (and we are looking for it) :param: odom_override if false will force it to be in the map frame """ assert self.ctx.course @@ -100,7 +101,15 @@ def current_fid_pos(self, odom_override: bool = True) -> Optional[np.ndarray]: print("CURRENT WAYPOINT IS NONE") return None - return self.get_fid_pos(current_waypoint.tag_id, in_odom) + if current_waypoint.type == WaypointType.POST: + return self.get_target_pos(f"fiducial{current_waypoint.tag_id}", in_odom) + elif current_waypoint.type == WaypointType.MALLET: + return self.get_target_pos("mallet", in_odom) + elif current_waypoint.type == WaypointType.WATER_BOTTLE: + return self.get_target_pos("water_bottle", in_odom) + else: + print("CURRENT WAYPOINT IS NOT A POST OR OBJECT") + return None @dataclass @@ -202,6 +211,7 @@ class Context: vis_publisher: rospy.Publisher course_listener: rospy.Subscriber stuck_listener: rospy.Subscriber + # TODO create listener for long range cam # Use these as the primary interfaces in states course: Optional[Course] @@ -242,3 +252,5 @@ def recv_enable_auton(self, req: mrover.srv.PublishEnableAutonRequest) -> mrover def stuck_callback(self, msg: Bool): self.rover.stuck = msg.data + + # TODO create callback for long range listener diff --git a/src/navigation/long_range.py b/src/navigation/long_range.py index fe8641d2e..76c0d4949 100644 --- a/src/navigation/long_range.py +++ b/src/navigation/long_range.py @@ -26,12 +26,17 @@ def on_exit(self, context): pass def get_target_pos(self, context) -> Optional[int]: - target_pos = None # TODO: replace + target_pos = None # TODO: get bearing from most recent message for current tag_id and create a “target” location to drive to by setting a point a certain look ahead distance far away from the rover (20-30 meters) once aligned with the bearing return target_pos def determine_next(self, context, finished: bool) -> State: - fid_pos = context.env.current_fid_pos() + # if we see the tag in the ZED transition to approach post + fid_pos = context.env.current_target_pos() if fid_pos is None: + # if we haven’t seen the tag in the long range cam in the last 3 seconds, go back to search + # TODO if time.now - last message received for tag_id > 3: + # return search.SearchState() + # otherwise continue in long range state return self else: return approach_post.ApproachPostState() diff --git a/src/navigation/search.py b/src/navigation/search.py index 2b0d8ee3b..76aec55e4 100644 --- a/src/navigation/search.py +++ b/src/navigation/search.py @@ -94,6 +94,7 @@ class SearchState(State): def on_enter(self, context) -> None: search_center = context.course.current_waypoint() if not self.is_recovering: + # TODO give different parameters to spiral_traj based on if search_center.type is POST or RUBBER_MALLET (water bottle has diff search state) self.traj = SearchTrajectory.spiral_traj( context.rover.get_pose().position[0:2], self.SPIRAL_COVERAGE_RADIUS, @@ -134,6 +135,15 @@ def on_loop(self, context) -> State: ) context.rover.send_drive_command(cmd_vel) - if context.env.current_fid_pos() is not None and context.course.look_for_post(): - return approach_post.ApproachPostState() + # TODO get current waypoint + # if current waypoint type is POST + if context.env.current_target_pos() is not None and context.course.look_for_post(): # indent this if statement + return approach_post.ApproachPostState() # if we see the tag in the ZED, go to ApproachPostState + # if we see the tag in the long range camera, go to LongRangeState + # if tag id has hit count > 3: + # return long_range.LongRangeState() + + # elif current waypoint type is RUBBER MALLET + # if context.env.current_target_pos() is not None and context.course.look_for_object(): + # return approach_object.ApproachObjectState() # if we see the object return self diff --git a/src/navigation/waypoint.py b/src/navigation/waypoint.py index 0a964dc48..8aa3398c2 100644 --- a/src/navigation/waypoint.py +++ b/src/navigation/waypoint.py @@ -35,8 +35,14 @@ def on_loop(self, context) -> State: return post_backup.PostBackupState() if context.course.look_for_post(): - if context.env.current_fid_pos() is not None: + if context.env.current_target_pos() is not None: return approach_post.ApproachPostState() + # TODO: + # elif tag id has hit count > 3: + # return long_range.LongRangeState() + # elif context.course.look_for_object(): + # if context.env.current_target_pos() is not None: + # return approach_object.ApproachObjectState() # Attempt to find the waypoint in the TF tree and drive to it try: @@ -51,9 +57,11 @@ def on_loop(self, context) -> State: if not context.course.look_for_post(): # We finished a regular waypoint, go onto the next one context.course.increment_waypoint() - else: + else: # TODO: elif looking for post or rubber mallet # We finished a waypoint associated with a tag id, but we have not seen it yet. return search.SearchState() + # TODO elif looking for water bottle: + # return obstacle_avoidance.ObstacleAvoidanceSearchState() if context.rover.stuck: context.rover.previous_state = self From fa6a6f0f2e0112cf1eedb9e09fbe9fc40466c1aa Mon Sep 17 00:00:00 2001 From: Marvin Jirapongsuwan Date: Thu, 18 Jan 2024 15:00:16 -0500 Subject: [PATCH 033/126] wrote mLongRangeTagsPub --- .../long_range_cam/long_range_tag_detector.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp index b77195acb..777b3ba92 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp @@ -1,4 +1,6 @@ #include "long_range_tag_detector.hpp" +#include "mrover/LongRangeTags.h" +#include namespace mrover { @@ -16,8 +18,11 @@ namespace mrover { mPnh.param("tag_decrement_weight", mTagDecrementWeight, 1); mImgPub = mNh.advertise("long_range_tag_detection", 1); + mLongRangeTagsPub = mNh.advertise("tags", 1); - mImgSub = mNh.subscribe("long_range_cam/image", 1, &LongRangeTagDetectorNodelet::imageCallback, this); + // topic: long_range_cam/image + //todo + mImgSub = mNh.subscribe(, 1, &LongRangeTagDetectorNodelet::imageCallback, this); mDictionary = cv::makePtr(cv::aruco::getPredefinedDictionary(dictionaryNumber)); mServiceEnableDetections = mNh.advertiseService("enable_detections", &LongRangeTagDetectorNodelet::enableDetectionsCallback, this); From 704e3145ee7fde6c26f18d5b44d81ab48df9234e Mon Sep 17 00:00:00 2001 From: Emerson-Ramey Date: Sun, 21 Jan 2024 01:18:49 +0000 Subject: [PATCH 034/126] update todos --- msg/WaypointType.msg | 2 + src/navigation/approach_object.py | 2 - src/navigation/approach_post.py | 84 +++++++++++++++---------------- src/navigation/context.py | 6 ++- src/navigation/waypoint.py | 2 +- 5 files changed, 48 insertions(+), 48 deletions(-) diff --git a/msg/WaypointType.msg b/msg/WaypointType.msg index df2088fbd..a7995aec5 100644 --- a/msg/WaypointType.msg +++ b/msg/WaypointType.msg @@ -1,3 +1,5 @@ int32 NO_SEARCH = 0 int32 POST = 1 +int32 MALLET = 2 +int32 WATER_BOTTLE = 3 int32 val \ No newline at end of file diff --git a/src/navigation/approach_object.py b/src/navigation/approach_object.py index 400d627bc..c4e2c089e 100644 --- a/src/navigation/approach_object.py +++ b/src/navigation/approach_object.py @@ -15,8 +15,6 @@ class ApproachObjectState(ApproachTargetBaseState): Transitions: -If arrived at target: DoneState -Did not arrive at target: ApproachObjectState - -Arrived at the waypoint where the fiducial should be but have not seen it yet: SearchState - -Stuck? """ def on_enter(self, context): diff --git a/src/navigation/approach_post.py b/src/navigation/approach_post.py index e4aa1ec6b..5eaae0bc8 100644 --- a/src/navigation/approach_post.py +++ b/src/navigation/approach_post.py @@ -8,14 +8,12 @@ from navigation.approach_target_base import ApproachTargetBaseState -class NewApproachPostState(ApproachTargetBaseState): +class ApproachPostState(ApproachTargetBaseState): """ State for when the tag is seen in the ZED camera. Transitions: -If arrived at target: DoneState -Did not arrive at target: ApproachPostState - -Arrived at the waypoint where the fiducial should be but have not seen it yet: SearchState - -Stuck? """ def on_enter(self, context): @@ -36,48 +34,48 @@ def determine_next(self, context, finished: bool) -> State: return self -class ApproachPostState(State): - STOP_THRESH = get_rosparam("single_fiducial/stop_thresh", 0.7) - FIDUCIAL_STOP_THRESHOLD = get_rosparam("single_fiducial/fiducial_stop_threshold", 1.75) - DRIVE_FWD_THRESH = get_rosparam("waypoint/drive_fwd_thresh", 0.34) # 20 degrees +# class ApproachPostState(State): +# STOP_THRESH = get_rosparam("single_fiducial/stop_thresh", 0.7) +# FIDUCIAL_STOP_THRESHOLD = get_rosparam("single_fiducial/fiducial_stop_threshold", 1.75) +# DRIVE_FWD_THRESH = get_rosparam("waypoint/drive_fwd_thresh", 0.34) # 20 degrees - def on_enter(self, context) -> None: - pass +# def on_enter(self, context) -> None: +# pass - def on_exit(self, context) -> None: - pass +# def on_exit(self, context) -> None: +# pass - def on_loop(self, context) -> State: - """ - Drive towards a single fiducial if we see it and stop within a certain threshold if we see it. - Else conduct a search to find it. - :param ud: State machine user data - :return: Next state - """ - fid_pos = context.env.current_target_pos() - if fid_pos is None: - return search.SearchState() +# def on_loop(self, context) -> State: +# """ +# Drive towards a single fiducial if we see it and stop within a certain threshold if we see it. +# Else conduct a search to find it. +# :param ud: State machine user data +# :return: Next state +# """ +# fid_pos = context.env.current_target_pos() +# if fid_pos is None: +# return search.SearchState() - try: - cmd_vel, arrived = context.rover.driver.get_drive_command( - fid_pos, - context.rover.get_pose(in_odom_frame=True), - self.STOP_THRESH, - self.DRIVE_FWD_THRESH, - in_odom=context.use_odom, - ) - if arrived: - context.env.arrived_at_post = True - context.env.last_post_location = context.env.current_target_pos(odom_override=False) - context.course.increment_waypoint() - return waypoint.WaypointState() # we want to go to done state now - context.rover.send_drive_command(cmd_vel) - except ( - tf2_ros.LookupException, - tf2_ros.ConnectivityException, - tf2_ros.ExtrapolationException, - ): - # TODO: probably go into some waiting state - pass +# try: +# cmd_vel, arrived = context.rover.driver.get_drive_command( +# fid_pos, +# context.rover.get_pose(in_odom_frame=True), +# self.STOP_THRESH, +# self.DRIVE_FWD_THRESH, +# in_odom=context.use_odom, +# ) +# if arrived: +# context.env.arrived_at_post = True +# context.env.last_post_location = context.env.current_target_pos(odom_override=False) +# context.course.increment_waypoint() +# return waypoint.WaypointState() # we want to go to done state now +# context.rover.send_drive_command(cmd_vel) +# except ( +# tf2_ros.LookupException, +# tf2_ros.ConnectivityException, +# tf2_ros.ExtrapolationException, +# ): +# # TODO: probably go into some waiting state (old todo) +# pass - return self +# return self diff --git a/src/navigation/context.py b/src/navigation/context.py index abcab5884..723cd5842 100644 --- a/src/navigation/context.py +++ b/src/navigation/context.py @@ -62,8 +62,8 @@ class Environment: ctx: Context NO_FIDUCIAL: ClassVar[int] = -1 - arrived_at_post: bool = False - last_post_location: Optional[np.ndarray] = None + arrived_at_post: bool = False # TODO change to arrived_at_target + last_post_location: Optional[np.ndarray] = None # TODO change to last_target_location # TODO add dictionary for long range tag id : (time received, our hit counter, bearing) def get_target_pos(self, id: str, in_odom_frame: bool = True) -> Optional[np.ndarray]: @@ -156,6 +156,8 @@ def look_for_post(self) -> bool: return waypoint.type.val == mrover.msg.WaypointType.POST else: return False + + # TODO add look_for_object() def is_complete(self) -> bool: return self.waypoint_index == len(self.course_data.waypoints) diff --git a/src/navigation/waypoint.py b/src/navigation/waypoint.py index 8aa3398c2..d07780563 100644 --- a/src/navigation/waypoint.py +++ b/src/navigation/waypoint.py @@ -61,7 +61,7 @@ def on_loop(self, context) -> State: # We finished a waypoint associated with a tag id, but we have not seen it yet. return search.SearchState() # TODO elif looking for water bottle: - # return obstacle_avoidance.ObstacleAvoidanceSearchState() + # return water_bottle_search.WaterBottleSearchState() if context.rover.stuck: context.rover.previous_state = self From 6bfcae6a024f9d4328b59732bd4138a90ca9be76 Mon Sep 17 00:00:00 2001 From: Stephen Barstys Date: Sun, 21 Jan 2024 14:21:00 -0500 Subject: [PATCH 035/126] progress on TODOs --- src/navigation/approach_target_base.py | 4 +- src/navigation/context.py | 41 +++++++++++++----- src/navigation/long_range_state.py | 60 ++++++++++++++++++++++++++ src/navigation/search.py | 38 ++++++++++------ 4 files changed, 117 insertions(+), 26 deletions(-) create mode 100644 src/navigation/long_range_state.py diff --git a/src/navigation/approach_target_base.py b/src/navigation/approach_target_base.py index ab28f4b76..f0dbb4650 100644 --- a/src/navigation/approach_target_base.py +++ b/src/navigation/approach_target_base.py @@ -48,8 +48,8 @@ def on_loop(self, context) -> State: ) next_state = self.determine_next(arrived) if arrived: - context.env.arrived_at_post = True # TODO: change post to target? - context.env.last_post_location = self.get_target_pos() # TODO: change post to target? + context.env.arrived_at_target = True + context.env.last_target_location = self.get_target_pos() context.course.increment_waypoint() else: context.rover.send_drive_command(cmd_vel) diff --git a/src/navigation/context.py b/src/navigation/context.py index 28250a3bb..8c7c75597 100644 --- a/src/navigation/context.py +++ b/src/navigation/context.py @@ -62,10 +62,11 @@ class Environment: ctx: Context NO_FIDUCIAL: ClassVar[int] = -1 - arrived_at_post: bool = False # TODO change to arrived_at_target - last_post_location: Optional[np.ndarray] = None # TODO change to last_target_location + arrived_at_target: bool = None + last_target_location: Optional[np.ndarray] = None # TODO add dictionary for long range tag id : (time received, our hit counter, bearing) - + tag_data_dict = {} + def get_target_pos(self, id: str, in_odom_frame: bool = True) -> Optional[np.ndarray]: """ Retrieves the pose of the given fiducial ID from the TF tree in the odom frame @@ -74,9 +75,7 @@ def get_target_pos(self, id: str, in_odom_frame: bool = True) -> Optional[np.nda """ try: parent_frame = self.ctx.odom_frame if in_odom_frame else self.ctx.world_frame - target_pose, time = SE3.from_tf_time( - self.ctx.tf_buffer, parent_frame=parent_frame, child_frame=f"{id}" - ) + target_pose, time = SE3.from_tf_time(self.ctx.tf_buffer, parent_frame=parent_frame, child_frame=f"{id}") now = rospy.Time.now() if now.to_sec() - time.to_sec() >= TAG_EXPIRATION_TIME_SECONDS: print(f"EXPIRED {id}!") @@ -88,7 +87,7 @@ def get_target_pos(self, id: str, in_odom_frame: bool = True) -> Optional[np.nda ) as e: return None return target_pose.position - + def current_target_pos(self, odom_override: bool = True) -> Optional[np.ndarray]: """ Retrieves the position of the current fiducial or object (and we are looking for it) @@ -156,8 +155,20 @@ def look_for_post(self) -> bool: return waypoint.type.val == mrover.msg.WaypointType.POST else: return False - - # TODO add look_for_object() + + def look_for_object(self) -> bool: + """ + Returns whether the currently active waypoint (if it exists) indicates + that we should go to either the mallet or the water bottle. + """ + waypoint = self.current_waypoint() + if waypoint is not None: + return ( + waypoint.type.val == mrover.msg.WaypointType.MALLET + or waypoint.type.val == mrover.msg.WaypointType.WATER_BOTTLE + ) + else: + return False def is_complete(self) -> bool: return self.waypoint_index == len(self.course_data.waypoints) @@ -213,7 +224,7 @@ class Context: vis_publisher: rospy.Publisher course_listener: rospy.Subscriber stuck_listener: rospy.Subscriber - # TODO create listener for long range cam + long_range_cam_listener: rospy.Subscriber # Use these as the primary interfaces in states course: Optional[Course] @@ -243,6 +254,8 @@ def __init__(self): self.world_frame = rospy.get_param("world_frame") self.odom_frame = rospy.get_param("odom_frame") self.rover_frame = rospy.get_param("rover_frame") + self.tag_data_listener = rospy.Subscriber("tag_data", list, self.tag_data_callback) + self.long_range_cam_listener = rospy.Subscriber("long_range_cam", Bool, self.long_range_cam_listener) def recv_enable_auton(self, req: mrover.srv.PublishEnableAutonRequest) -> mrover.srv.PublishEnableAutonResponse: enable_msg = req.enableMsg @@ -255,4 +268,10 @@ def recv_enable_auton(self, req: mrover.srv.PublishEnableAutonRequest) -> mrover def stuck_callback(self, msg: Bool): self.rover.stuck = msg.data - # TODO create callback for long range listener + def tag_data_callback(self, tags: list) -> None: + for tag in tags: + tag_id = tag["id"] + hit_count = tag["hitCount"] + bearing = tag["bearing"] + time_received = rospy.Time() + self.env.tag_data_dict[tag_id] = (time_received, hit_count, bearing) diff --git a/src/navigation/long_range_state.py b/src/navigation/long_range_state.py new file mode 100644 index 000000000..13df495af --- /dev/null +++ b/src/navigation/long_range_state.py @@ -0,0 +1,60 @@ +import tf2_ros +import rospy +from aenum import Enum, NoAlias +from geometry_msgs.msg import Twist +from util.ros_utils import get_rosparam +from context import Context +from util.state_lib.state import State +from util.state_lib.state_machine import StateMachine +from abc import ABC, abstractmethod +from state import DoneState +from search import SearchState +from util.state_lib.state_publisher_server import StatePublisher +from typing import Optional +from util.state_lib.state import State +from abc import abstractmethod +import state +from state import ApproachTargetBaseState +from state import approach_post +import numpy as np + +TAG_SEARCH_EXPIRATION = 3 + + +class LongRangeState(ApproachTargetBaseState): + """ + State for when the tag is seen only in the long range camera. + Transitions: + -If tag seen in ZED: ApproachPostState + -Not seen in ZED and moved: LongRangeState + -Don't see the tag in long range camera: SearchState + -Stuck? + """ + + def on_enter(self, context): + pass + + def on_exit(self, context): + pass + + def get_target_pos(self, context) -> Optional[int]: + tag_id = context.current_waypoint()["tagid"] + bearing = context.env.tag_data_dict[tag_id][2] + rover_position = context.rover.get_pose().position + target_pos_x = 90 - bearing + target_pos_y = bearing + target_pos_z = rover_position[2] + target_coordinates = np.ndarray + context.tag_data_callback() + return target_pos + + def determine_next(self, context, finished: bool) -> State: + fid_pos = context.env.current_fid_pos() # Need to pass in the fid_id + tag_time = context.env.tag_data_dict[context.current_tag_id][0] + if fid_pos is None: + if rospy.Time() - tag_time > TAG_SEARCH_EXPIRATION: + return state.SearchState() + else: + return state.LongRangeState() + else: + return approach_post.ApproachPostState() diff --git a/src/navigation/search.py b/src/navigation/search.py index 76aec55e4..5e31537aa 100644 --- a/src/navigation/search.py +++ b/src/navigation/search.py @@ -5,6 +5,7 @@ import numpy as np from mrover.msg import GPSPointList +import mrover.msg from util.ros_utils import get_rosparam from util.state_lib.state import State @@ -93,15 +94,26 @@ class SearchState(State): def on_enter(self, context) -> None: search_center = context.course.current_waypoint() + if not self.is_recovering: # TODO give different parameters to spiral_traj based on if search_center.type is POST or RUBBER_MALLET (water bottle has diff search state) - self.traj = SearchTrajectory.spiral_traj( - context.rover.get_pose().position[0:2], - self.SPIRAL_COVERAGE_RADIUS, - self.DISTANCE_BETWEEN_SPIRALS, - self.SEGMENTS_PER_ROTATION, - search_center.tag_id, - ) + if search_center.type == mrover.msg.WaypointType.POST: + self.traj = SearchTrajectory.spiral_traj( + context.rover.get_pose().position[0:2], + self.SPIRAL_COVERAGE_RADIUS, + self.DISTANCE_BETWEEN_SPIRALS, + self.SEGMENTS_PER_ROTATION, + search_center.tag_id, + ) + + if mrover.msg.WaypointType.RUBBER_MALLET: + self.traj = SearchTrajectory.spiral_traj( + context.rover.get_pose().position[0:2], + self.SPIRAL_COVERAGE_RADIUS / 2, + self.DISTANCE_BETWEEN_SPIRALS / 2, + self.SEGMENTS_PER_ROTATION, + search_center.tag_id, + ) self.prev_target = None def on_exit(self, context) -> None: @@ -137,13 +149,13 @@ def on_loop(self, context) -> State: # TODO get current waypoint # if current waypoint type is POST - if context.env.current_target_pos() is not None and context.course.look_for_post(): # indent this if statement - return approach_post.ApproachPostState() # if we see the tag in the ZED, go to ApproachPostState - # if we see the tag in the long range camera, go to LongRangeState + if context.env.current_target_pos() is not None and context.course.look_for_post(): # indent this if statement + return approach_post.ApproachPostState() # if we see the tag in the ZED, go to ApproachPostState + # if we see the tag in the long range camera, go to LongRangeState # if tag id has hit count > 3: # return long_range.LongRangeState() - + # elif current waypoint type is RUBBER MALLET - # if context.env.current_target_pos() is not None and context.course.look_for_object(): - # return approach_object.ApproachObjectState() # if we see the object + # if context.env.current_target_pos() is not None and context.course.look_for_object(): + # return approach_object.ApproachObjectState() # if we see the object return self From f51c02d567a24e47dd605acadbb39600e8c5867e Mon Sep 17 00:00:00 2001 From: Alison Ryckman Date: Mon, 22 Jan 2024 18:30:18 -0500 Subject: [PATCH 036/126] add udev rule for usb cam --- src/perception/long_range_cam/long_range_cam.cpp | 11 +++-------- .../long_range_cam/long_range_tag_detector.cpp | 2 +- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/src/perception/long_range_cam/long_range_cam.cpp b/src/perception/long_range_cam/long_range_cam.cpp index 8b7fde3eb..16388d0e3 100644 --- a/src/perception/long_range_cam/long_range_cam.cpp +++ b/src/perception/long_range_cam/long_range_cam.cpp @@ -10,12 +10,9 @@ namespace mrover { using namespace std::chrono_literals; - - // gst-launch-1.0 -e -v v4l2src device=/dev/video4 ! videoconvert ! videoscale ! videorate ! "video/x-raw,format=I420,width=640,height=480,framerate=30/1" ! x264enc key-int-max=15 ! rtph264pay ! udpsink host=localhost port=5000 - // gst-launch-1.0 -v udpsrc uri=udp://localhost:5000 ! application/x-rtp,media=video,clock-rate=90000,encoding-name=H264,payload=96 ! rtph264depay ! avdec_h264 ! autovideosink /** - * @brief Load config, open the camera, and start our threads - */ + * @brief Load config, open the camera, and start our threads + */ void LongRangeCamNodelet::onInit() { try { mGrabThread = std::jthread(&LongRangeCamNodelet::grabUpdate, this); @@ -26,8 +23,6 @@ namespace mrover { } void fillImageMessage(cv::Mat& bgra, sensor_msgs::ImagePtr const& msg) { - // TODO: Uncomment this - // assert(img.channels() == 4); assert(msg); msg->height = bgra.rows; @@ -52,7 +47,7 @@ namespace mrover { mImgPub = mNh.advertise("long_range_cam/image", 1); // While dtor not called while (ros::ok()) { - cv::VideoCapture mCapture{std::format("v4l2src device=/dev/video4 ! videoconvert ! video/x-raw,width={},height={},format=I420,framerate={}/1 ! appsink", 1920, 1080, 5), cv::CAP_GSTREAMER}; + cv::VideoCapture mCapture{std::format("v4l2src device=/dev/arducam ! videoconvert ! video/x-raw,width={},height={},format=I420,framerate={}/1 ! appsink", 1920, 1080, 5), cv::CAP_GSTREAMER}; if (!mCapture.isOpened()) { throw std::runtime_error("Long range cam failed to open"); } diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp index 777b3ba92..29688efaa 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp @@ -22,7 +22,7 @@ namespace mrover { // topic: long_range_cam/image //todo - mImgSub = mNh.subscribe(, 1, &LongRangeTagDetectorNodelet::imageCallback, this); + mImgSub = mNh.subscribe("long_range_cam/image", 1, &LongRangeTagDetectorNodelet::imageCallback, this); mDictionary = cv::makePtr(cv::aruco::getPredefinedDictionary(dictionaryNumber)); mServiceEnableDetections = mNh.advertiseService("enable_detections", &LongRangeTagDetectorNodelet::enableDetectionsCallback, this); From d6abfe9acdff7a51620c6015e57f34fcb7629d19 Mon Sep 17 00:00:00 2001 From: qhdwight Date: Mon, 22 Jan 2024 21:08:51 -0500 Subject: [PATCH 037/126] Update name --- src/navigation/context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/navigation/context.py b/src/navigation/context.py index c0d72c76d..ee22f54cb 100644 --- a/src/navigation/context.py +++ b/src/navigation/context.py @@ -101,7 +101,7 @@ def current_target_pos(self, odom_override: bool = True) -> Optional[np.ndarray] if current_waypoint.type == WaypointType.POST: return self.get_target_pos(f"fiducial{current_waypoint.tag_id}", in_odom) elif current_waypoint.type == WaypointType.MALLET: - return self.get_target_pos("mallet", in_odom) + return self.get_target_pos("hammer", in_odom) elif current_waypoint.type == WaypointType.WATER_BOTTLE: return self.get_target_pos("water_bottle", in_odom) else: From c35561f71a5fc00b1b71fb61d41e3c0890890dc5 Mon Sep 17 00:00:00 2001 From: Emerson-Ramey Date: Tue, 23 Jan 2024 19:16:24 -0500 Subject: [PATCH 038/126] change to MALLET --- src/navigation/approach_object.py | 2 +- src/navigation/context.py | 25 ++++++++++++------------- src/navigation/post_backup.py | 4 ++-- src/navigation/search.py | 12 ++++++------ src/navigation/waypoint.py | 6 +++--- 5 files changed, 24 insertions(+), 25 deletions(-) diff --git a/src/navigation/approach_object.py b/src/navigation/approach_object.py index c4e2c089e..55faf80ed 100644 --- a/src/navigation/approach_object.py +++ b/src/navigation/approach_object.py @@ -10,7 +10,7 @@ class ApproachObjectState(ApproachTargetBaseState): """ - State for when we see an object (rubber mallet or water bottle). + State for when we see an object (mallet or water bottle). We are only using the ZED camera. Transitions: -If arrived at target: DoneState diff --git a/src/navigation/context.py b/src/navigation/context.py index 0bb07e2ef..4c6b17312 100644 --- a/src/navigation/context.py +++ b/src/navigation/context.py @@ -63,7 +63,7 @@ class Environment: arrived_at_target: bool = None last_target_location: Optional[np.ndarray] = None # TODO add dictionary for long range tag id : (time received, our hit counter, bearing) - tag_data_dict = {} + # tag_data_dict = {} def get_target_pos(self, id: str, in_odom_frame: bool = True) -> Optional[np.ndarray]: """ @@ -162,8 +162,8 @@ def look_for_object(self) -> bool: waypoint = self.current_waypoint() if waypoint is not None: return ( - waypoint.type.val == mrover.msg.WaypointType.MALLET - or waypoint.type.val == mrover.msg.WaypointType.WATER_BOTTLE + waypoint.type.val == WaypointType.MALLET + or waypoint.type.val == WaypointType.WATER_BOTTLE ) else: return False @@ -222,7 +222,7 @@ class Context: vis_publisher: rospy.Publisher course_listener: rospy.Subscriber stuck_listener: rospy.Subscriber - long_range_cam_listener: rospy.Subscriber + # tag_data_listener: rospy.Subscriber # Use these as the primary interfaces in states course: Optional[Course] @@ -252,8 +252,7 @@ def __init__(self): self.world_frame = rospy.get_param("world_frame") self.odom_frame = rospy.get_param("odom_frame") self.rover_frame = rospy.get_param("rover_frame") - self.tag_data_listener = rospy.Subscriber("tag_data", list, self.tag_data_callback) - self.long_range_cam_listener = rospy.Subscriber("long_range_cam", Bool, self.long_range_cam_listener) + # self.tag_data_listener = rospy.Subscriber("tag_data", list, self.tag_data_callback) def recv_enable_auton(self, req: EnableAutonRequest) -> EnableAutonResponse: if req.enable: @@ -265,10 +264,10 @@ def recv_enable_auton(self, req: EnableAutonRequest) -> EnableAutonResponse: def stuck_callback(self, msg: Bool): self.rover.stuck = msg.data - def tag_data_callback(self, tags: list) -> None: - for tag in tags: - tag_id = tag["id"] - hit_count = tag["hitCount"] - bearing = tag["bearing"] - time_received = rospy.Time() - self.env.tag_data_dict[tag_id] = (time_received, hit_count, bearing) + # def tag_data_callback(self, tags: list) -> None: + # for tag in tags: + # tag_id = tag["id"] + # hit_count = tag["hitCount"] + # bearing = tag["bearing"] + # time_received = rospy.Time() + # self.env.tag_data_dict[tag_id] = (time_received, hit_count, bearing) diff --git a/src/navigation/post_backup.py b/src/navigation/post_backup.py index b7c51246f..d205cd5c9 100644 --- a/src/navigation/post_backup.py +++ b/src/navigation/post_backup.py @@ -99,12 +99,12 @@ def on_exit(self, context): self.traj = None def on_enter(self, context) -> None: - if context.env.last_post_location is None: + if context.env.last_target_location is None: self.traj = None else: self.traj = AvoidPostTrajectory.avoid_post_trajectory( context.rover.get_pose(), - context.env.last_post_location, + context.env.last_target_location, context.course.current_waypoint_pose().position, ) self.traj.cur_pt = 0 diff --git a/src/navigation/search.py b/src/navigation/search.py index 5e31537aa..f0afce082 100644 --- a/src/navigation/search.py +++ b/src/navigation/search.py @@ -4,8 +4,8 @@ from typing import Optional import numpy as np -from mrover.msg import GPSPointList -import mrover.msg +from mrover.msg import GPSPointList, WaypointType + from util.ros_utils import get_rosparam from util.state_lib.state import State @@ -96,8 +96,8 @@ def on_enter(self, context) -> None: search_center = context.course.current_waypoint() if not self.is_recovering: - # TODO give different parameters to spiral_traj based on if search_center.type is POST or RUBBER_MALLET (water bottle has diff search state) - if search_center.type == mrover.msg.WaypointType.POST: + # TODO give different parameters to spiral_traj based on if search_center.type is POST or MALLET (water bottle has diff search state) + if search_center.type == WaypointType.POST: self.traj = SearchTrajectory.spiral_traj( context.rover.get_pose().position[0:2], self.SPIRAL_COVERAGE_RADIUS, @@ -106,7 +106,7 @@ def on_enter(self, context) -> None: search_center.tag_id, ) - if mrover.msg.WaypointType.RUBBER_MALLET: + if WaypointType.MALLET: self.traj = SearchTrajectory.spiral_traj( context.rover.get_pose().position[0:2], self.SPIRAL_COVERAGE_RADIUS / 2, @@ -155,7 +155,7 @@ def on_loop(self, context) -> State: # if tag id has hit count > 3: # return long_range.LongRangeState() - # elif current waypoint type is RUBBER MALLET + # elif current waypoint type is MALLET # if context.env.current_target_pos() is not None and context.course.look_for_object(): # return approach_object.ApproachObjectState() # if we see the object return self diff --git a/src/navigation/waypoint.py b/src/navigation/waypoint.py index d07780563..122d3b6fe 100644 --- a/src/navigation/waypoint.py +++ b/src/navigation/waypoint.py @@ -30,8 +30,8 @@ def on_loop(self, context) -> State: return state.DoneState() # if we are at a post currently (from a previous leg), backup to avoid collision - if context.env.arrived_at_post: - context.env.arrived_at_post = False + if context.env.arrived_at_target: + context.env.arrived_at_target = False return post_backup.PostBackupState() if context.course.look_for_post(): @@ -57,7 +57,7 @@ def on_loop(self, context) -> State: if not context.course.look_for_post(): # We finished a regular waypoint, go onto the next one context.course.increment_waypoint() - else: # TODO: elif looking for post or rubber mallet + else: # TODO: elif looking for post or mallet # We finished a waypoint associated with a tag id, but we have not seen it yet. return search.SearchState() # TODO elif looking for water bottle: From 8ee12e7a1865968d761803b9445f0900504c9c89 Mon Sep 17 00:00:00 2001 From: Emerson-Ramey Date: Tue, 23 Jan 2024 19:54:33 -0500 Subject: [PATCH 039/126] edit search and waypoint --- src/navigation/search.py | 24 +++++++++++------------- src/navigation/waypoint.py | 17 ++++++++--------- 2 files changed, 19 insertions(+), 22 deletions(-) diff --git a/src/navigation/search.py b/src/navigation/search.py index f0afce082..95ec83ba1 100644 --- a/src/navigation/search.py +++ b/src/navigation/search.py @@ -10,7 +10,7 @@ from util.ros_utils import get_rosparam from util.state_lib.state import State -from navigation import approach_post, recovery, waypoint +from navigation import approach_post, approach_object, recovery, waypoint from navigation.context import convert_cartesian_to_gps from navigation.trajectory import Trajectory @@ -96,7 +96,6 @@ def on_enter(self, context) -> None: search_center = context.course.current_waypoint() if not self.is_recovering: - # TODO give different parameters to spiral_traj based on if search_center.type is POST or MALLET (water bottle has diff search state) if search_center.type == WaypointType.POST: self.traj = SearchTrajectory.spiral_traj( context.rover.get_pose().position[0:2], @@ -147,15 +146,14 @@ def on_loop(self, context) -> State: ) context.rover.send_drive_command(cmd_vel) - # TODO get current waypoint - # if current waypoint type is POST - if context.env.current_target_pos() is not None and context.course.look_for_post(): # indent this if statement - return approach_post.ApproachPostState() # if we see the tag in the ZED, go to ApproachPostState - # if we see the tag in the long range camera, go to LongRangeState - # if tag id has hit count > 3: - # return long_range.LongRangeState() - - # elif current waypoint type is MALLET - # if context.env.current_target_pos() is not None and context.course.look_for_object(): - # return approach_object.ApproachObjectState() # if we see the object + current_waypoint = context.course.current_waypoint() + if current_waypoint.type == "POST": + if context.env.current_target_pos() is not None and context.course.look_for_post(): + return approach_post.ApproachPostState() # if we see the tag in the ZED, go to ApproachPostState + # if we see the tag in the long range camera, go to LongRangeState + # if tag id has hit count > 3: + # return long_range.LongRangeState() + elif current_waypoint.type == "MALLET": + if context.env.current_target_pos() is not None and context.course.look_for_object(): + return approach_object.ApproachObjectState() # if we see the object return self diff --git a/src/navigation/waypoint.py b/src/navigation/waypoint.py index 122d3b6fe..de930f02e 100644 --- a/src/navigation/waypoint.py +++ b/src/navigation/waypoint.py @@ -3,7 +3,7 @@ from util.ros_utils import get_rosparam from util.state_lib.state import State -from navigation import search, recovery, approach_post, post_backup, state +from navigation import search, recovery, approach_post, post_backup, state, approach_object class WaypointState(State): @@ -36,13 +36,12 @@ def on_loop(self, context) -> State: if context.course.look_for_post(): if context.env.current_target_pos() is not None: - return approach_post.ApproachPostState() - # TODO: - # elif tag id has hit count > 3: + return approach_post.ApproachPostState() + # TODO: elif tag id has hit count > 3: # return long_range.LongRangeState() - # elif context.course.look_for_object(): - # if context.env.current_target_pos() is not None: - # return approach_object.ApproachObjectState() + elif context.course.look_for_object(): + if context.env.current_target_pos() is not None: + return approach_object.ApproachObjectState() # Attempt to find the waypoint in the TF tree and drive to it try: @@ -57,8 +56,8 @@ def on_loop(self, context) -> State: if not context.course.look_for_post(): # We finished a regular waypoint, go onto the next one context.course.increment_waypoint() - else: # TODO: elif looking for post or mallet - # We finished a waypoint associated with a tag id, but we have not seen it yet. + elif context.course.look_for_post() or context.course.look_for_object(): + # We finished a waypoint associated with a post or mallet, but we have not seen it yet. return search.SearchState() # TODO elif looking for water bottle: # return water_bottle_search.WaterBottleSearchState() From b97bc64ada490ec04f24ed46be16815e4594f0d1 Mon Sep 17 00:00:00 2001 From: umroverPerception Date: Thu, 25 Jan 2024 19:08:17 -0500 Subject: [PATCH 040/126] approach object (mallet) working --- scripts/debug_course_publisher.py | 33 +++++++++++++++----------- src/navigation/approach_target_base.py | 4 ++-- src/navigation/context.py | 11 ++++----- src/navigation/navigation.py | 19 ++++++++++++--- src/navigation/search.py | 6 ++--- src/navigation/waypoint.py | 8 +++---- 6 files changed, 48 insertions(+), 33 deletions(-) diff --git a/scripts/debug_course_publisher.py b/scripts/debug_course_publisher.py index 35162f32f..b329e8662 100755 --- a/scripts/debug_course_publisher.py +++ b/scripts/debug_course_publisher.py @@ -16,17 +16,22 @@ if __name__ == "__main__": rospy.init_node("debug_course_publisher") - publish_waypoints([convert_waypoint_to_gps(waypoint) for waypoint in [ - ( - Waypoint(tag_id=0, type=WaypointType(val=WaypointType.NO_SEARCH)), - SE3(position=np.array([4, 4, 0])), - ), - ( - Waypoint(tag_id=0, type=WaypointType(val=WaypointType.POST)), - SE3(position=np.array([-2, -2, 0])), - ), - ( - Waypoint(tag_id=1, type=WaypointType(val=WaypointType.POST)), - SE3(position=np.array([11, -10, 0])), - ) - ]]) + publish_waypoints( + [ + convert_waypoint_to_gps(waypoint) + for waypoint in [ + ( + Waypoint(tag_id=0, type=WaypointType(val=WaypointType.MALLET)), + SE3(position=np.array([8, 2, 0])), + ) + # ( + # Waypoint(tag_id=0, type=WaypointType(val=WaypointType.POST)), + # SE3(position=np.array([-2, -2, 0])), + # ), + # ( + # Waypoint(tag_id=1, type=WaypointType(val=WaypointType.POST)), + # SE3(position=np.array([11, -10, 0])), + # ) + ] + ] + ) diff --git a/src/navigation/approach_target_base.py b/src/navigation/approach_target_base.py index f0dbb4650..135acc1f9 100644 --- a/src/navigation/approach_target_base.py +++ b/src/navigation/approach_target_base.py @@ -46,10 +46,10 @@ def on_loop(self, context) -> State: self.DRIVE_FWD_THRESH, in_odom=context.use_odom, ) - next_state = self.determine_next(arrived) + next_state = self.determine_next(context, arrived) if arrived: context.env.arrived_at_target = True - context.env.last_target_location = self.get_target_pos() + context.env.last_target_location = self.get_target_pos(context) context.course.increment_waypoint() else: context.rover.send_drive_command(cmd_vel) diff --git a/src/navigation/context.py b/src/navigation/context.py index 4c6b17312..d9f9b8ea6 100644 --- a/src/navigation/context.py +++ b/src/navigation/context.py @@ -98,14 +98,14 @@ def current_target_pos(self, odom_override: bool = True) -> Optional[np.ndarray] print("CURRENT WAYPOINT IS NONE") return None - if current_waypoint.type == WaypointType.POST: + if current_waypoint.type.val == WaypointType.POST: return self.get_target_pos(f"fiducial{current_waypoint.tag_id}", in_odom) - elif current_waypoint.type == WaypointType.MALLET: + elif current_waypoint.type.val == WaypointType.MALLET: return self.get_target_pos("hammer", in_odom) elif current_waypoint.type == WaypointType.WATER_BOTTLE: return self.get_target_pos("water_bottle", in_odom) else: - print("CURRENT WAYPOINT IS NOT A POST OR OBJECT") + # print("CURRENT WAYPOINT IS NOT A POST OR OBJECT") return None @@ -161,10 +161,7 @@ def look_for_object(self) -> bool: """ waypoint = self.current_waypoint() if waypoint is not None: - return ( - waypoint.type.val == WaypointType.MALLET - or waypoint.type.val == WaypointType.WATER_BOTTLE - ) + return waypoint.type.val == WaypointType.MALLET or waypoint.type.val == WaypointType.WATER_BOTTLE else: return False diff --git a/src/navigation/navigation.py b/src/navigation/navigation.py index 82eeee331..300d87c08 100755 --- a/src/navigation/navigation.py +++ b/src/navigation/navigation.py @@ -14,6 +14,7 @@ from navigation.search import SearchState from navigation.state import DoneState, OffState, off_check from navigation.waypoint import WaypointState +from navigation.approach_object import ApproachObjectState class Navigation(threading.Thread): @@ -30,13 +31,25 @@ def __init__(self, context: Context): ) self.state_machine.add_transitions(PostBackupState(), [WaypointState(), RecoveryState()]) self.state_machine.add_transitions( - RecoveryState(), [WaypointState(), SearchState(), PostBackupState(), ApproachPostState()] + RecoveryState(), + [WaypointState(), SearchState(), PostBackupState(), ApproachPostState(), ApproachObjectState()], + ) + self.state_machine.add_transitions( + SearchState(), [ApproachPostState(), ApproachObjectState(), WaypointState(), RecoveryState()] ) - self.state_machine.add_transitions(SearchState(), [ApproachPostState(), WaypointState(), RecoveryState()]) self.state_machine.add_transitions(DoneState(), [WaypointState()]) self.state_machine.add_transitions( - WaypointState(), [PostBackupState(), ApproachPostState(), SearchState(), RecoveryState(), DoneState()] + WaypointState(), + [ + PostBackupState(), + ApproachPostState(), + ApproachObjectState(), + SearchState(), + RecoveryState(), + DoneState(), + ], ) + self.state_machine.add_transitions(ApproachObjectState(), [DoneState(), SearchState()]) self.state_machine.add_transitions(OffState(), [WaypointState(), DoneState()]) self.state_machine.configure_off_switch(OffState(), off_check) self.state_machine_server = StatePublisher(self.state_machine, "nav_structure", 1, "nav_state", 10) diff --git a/src/navigation/search.py b/src/navigation/search.py index 95ec83ba1..f94d36664 100644 --- a/src/navigation/search.py +++ b/src/navigation/search.py @@ -147,13 +147,13 @@ def on_loop(self, context) -> State: context.rover.send_drive_command(cmd_vel) current_waypoint = context.course.current_waypoint() - if current_waypoint.type == "POST": + if current_waypoint.type.val == WaypointType.POST: if context.env.current_target_pos() is not None and context.course.look_for_post(): return approach_post.ApproachPostState() # if we see the tag in the ZED, go to ApproachPostState # if we see the tag in the long range camera, go to LongRangeState # if tag id has hit count > 3: # return long_range.LongRangeState() - elif current_waypoint.type == "MALLET": + elif current_waypoint.type.val == WaypointType.MALLET: if context.env.current_target_pos() is not None and context.course.look_for_object(): - return approach_object.ApproachObjectState() # if we see the object + return approach_object.ApproachObjectState() # if we see the object return self diff --git a/src/navigation/waypoint.py b/src/navigation/waypoint.py index de930f02e..bad8b6cf4 100644 --- a/src/navigation/waypoint.py +++ b/src/navigation/waypoint.py @@ -36,11 +36,11 @@ def on_loop(self, context) -> State: if context.course.look_for_post(): if context.env.current_target_pos() is not None: - return approach_post.ApproachPostState() + return approach_post.ApproachPostState() # TODO: elif tag id has hit count > 3: # return long_range.LongRangeState() elif context.course.look_for_object(): - if context.env.current_target_pos() is not None: + if context.env.current_target_pos() is not None: return approach_object.ApproachObjectState() # Attempt to find the waypoint in the TF tree and drive to it @@ -53,14 +53,14 @@ def on_loop(self, context) -> State: self.DRIVE_FWD_THRESH, ) if arrived: - if not context.course.look_for_post(): + if not context.course.look_for_post() and not context.course.look_for_object(): # We finished a regular waypoint, go onto the next one context.course.increment_waypoint() elif context.course.look_for_post() or context.course.look_for_object(): # We finished a waypoint associated with a post or mallet, but we have not seen it yet. return search.SearchState() # TODO elif looking for water bottle: - # return water_bottle_search.WaterBottleSearchState() + # return water_bottle_search.WaterBottleSearchState() if context.rover.stuck: context.rover.previous_state = self From 987155a846f34921e8fd983d9394177277cb2f65 Mon Sep 17 00:00:00 2001 From: Ajay Sumanth Date: Thu, 25 Jan 2024 16:51:54 -0800 Subject: [PATCH 041/126] added target coord calculations --- .catkin_tools/CATKIN_IGNORE | 0 .catkin_tools/README | 13 +++++++++++ .catkin_tools/VERSION | 1 + .catkin_tools/profiles/default/build.yaml | 22 +++++++++++++++++++ .../catkin_tools_prebuild/package.xml | 10 +++++++++ 5 files changed, 46 insertions(+) create mode 100644 .catkin_tools/CATKIN_IGNORE create mode 100644 .catkin_tools/README create mode 100644 .catkin_tools/VERSION create mode 100644 .catkin_tools/profiles/default/build.yaml create mode 100644 .catkin_tools/profiles/default/packages/catkin_tools_prebuild/package.xml diff --git a/.catkin_tools/CATKIN_IGNORE b/.catkin_tools/CATKIN_IGNORE new file mode 100644 index 000000000..e69de29bb diff --git a/.catkin_tools/README b/.catkin_tools/README new file mode 100644 index 000000000..4706f4756 --- /dev/null +++ b/.catkin_tools/README @@ -0,0 +1,13 @@ +# Catkin Tools Metadata + +This directory was generated by catkin_tools and it contains persistent +configuration information used by the `catkin` command and its sub-commands. + +Each subdirectory of the `profiles` directory contains a set of persistent +configuration options for separate profiles. The default profile is called +`default`. If another profile is desired, it can be described in the +`profiles.yaml` file in this directory. + +Please see the catkin_tools documentation before editing any files in this +directory. Most actions can be performed with the `catkin` command-line +program. diff --git a/.catkin_tools/VERSION b/.catkin_tools/VERSION new file mode 100644 index 000000000..f76f91317 --- /dev/null +++ b/.catkin_tools/VERSION @@ -0,0 +1 @@ +0.9.2 \ No newline at end of file diff --git a/.catkin_tools/profiles/default/build.yaml b/.catkin_tools/profiles/default/build.yaml new file mode 100644 index 000000000..910ef4283 --- /dev/null +++ b/.catkin_tools/profiles/default/build.yaml @@ -0,0 +1,22 @@ +authors: [] +blacklist: [] +build_space: build +catkin_make_args: [] +cmake_args: [] +devel_layout: linked +devel_space: devel +extend_path: null +extends: null +install: false +install_space: install +isolate_install: false +jobs_args: [] +licenses: +- TODO +log_space: logs +maintainers: [] +make_args: [] +source_space: src +use_env_cache: false +use_internal_make_jobserver: true +whitelist: [] diff --git a/.catkin_tools/profiles/default/packages/catkin_tools_prebuild/package.xml b/.catkin_tools/profiles/default/packages/catkin_tools_prebuild/package.xml new file mode 100644 index 000000000..134c59ade --- /dev/null +++ b/.catkin_tools/profiles/default/packages/catkin_tools_prebuild/package.xml @@ -0,0 +1,10 @@ + + catkin_tools_prebuild + + This package is used to generate catkin setup files. + + 0.0.0 + BSD + jbohren + catkin + From 6e9249693f61a1706f44a2ed5f3f66b457c76dd6 Mon Sep 17 00:00:00 2001 From: Ajay Sumanth Date: Thu, 25 Jan 2024 16:55:23 -0800 Subject: [PATCH 042/126] added target coord calculations --- src/navigation/long_range_state.py | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/src/navigation/long_range_state.py b/src/navigation/long_range_state.py index 13df495af..2bd9737ea 100644 --- a/src/navigation/long_range_state.py +++ b/src/navigation/long_range_state.py @@ -17,6 +17,7 @@ from state import ApproachTargetBaseState from state import approach_post import numpy as np +import math TAG_SEARCH_EXPIRATION = 3 @@ -40,10 +41,26 @@ def on_exit(self, context): def get_target_pos(self, context) -> Optional[int]: tag_id = context.current_waypoint()["tagid"] bearing = context.env.tag_data_dict[tag_id][2] - rover_position = context.rover.get_pose().position - target_pos_x = 90 - bearing - target_pos_y = bearing - target_pos_z = rover_position[2] + rover_position = context.rover.get_pose().rover_position + #target_pos = context.rover.get_target_pos().target_pos + + #Get x and y coords from pose + rover_position_x = rover_position[1][0][0] #if not, try 0,0 on 2d coords + rover_position_y = rover_position[0][1][0] + + #target_pos_x: target_pos[1][0][0] + #target_pos_y: target_pos[0][1][0] + + + target_position_x= math.sin(bearing)*25 + rover_position_x + target_position_y= math.cos(bearing)*25 + rover_position_y + target_pos = target_position_x,target_position_y + #Bearing and dist to coord: + + # target_pos_x = 90 - bearing + # target_pos_y = bearing + # target_pos_z = rover_position[2] + target_coordinates = np.ndarray context.tag_data_callback() return target_pos From 1cdc56160653080315e3a4afcbed8e1a26df2f0d Mon Sep 17 00:00:00 2001 From: Emerson-Ramey Date: Sat, 27 Jan 2024 04:52:51 +0000 Subject: [PATCH 043/126] update to approach water bottle too --- src/navigation/context.py | 2 +- src/navigation/search.py | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/navigation/context.py b/src/navigation/context.py index d9f9b8ea6..42b2cf09c 100644 --- a/src/navigation/context.py +++ b/src/navigation/context.py @@ -103,7 +103,7 @@ def current_target_pos(self, odom_override: bool = True) -> Optional[np.ndarray] elif current_waypoint.type.val == WaypointType.MALLET: return self.get_target_pos("hammer", in_odom) elif current_waypoint.type == WaypointType.WATER_BOTTLE: - return self.get_target_pos("water_bottle", in_odom) + return self.get_target_pos("bottle", in_odom) else: # print("CURRENT WAYPOINT IS NOT A POST OR OBJECT") return None diff --git a/src/navigation/search.py b/src/navigation/search.py index f94d36664..ab71505e7 100644 --- a/src/navigation/search.py +++ b/src/navigation/search.py @@ -96,7 +96,7 @@ def on_enter(self, context) -> None: search_center = context.course.current_waypoint() if not self.is_recovering: - if search_center.type == WaypointType.POST: + if search_center.type.val == WaypointType.POST: self.traj = SearchTrajectory.spiral_traj( context.rover.get_pose().position[0:2], self.SPIRAL_COVERAGE_RADIUS, @@ -104,8 +104,7 @@ def on_enter(self, context) -> None: self.SEGMENTS_PER_ROTATION, search_center.tag_id, ) - - if WaypointType.MALLET: + else: # water bottle or mallet self.traj = SearchTrajectory.spiral_traj( context.rover.get_pose().position[0:2], self.SPIRAL_COVERAGE_RADIUS / 2, @@ -153,7 +152,7 @@ def on_loop(self, context) -> State: # if we see the tag in the long range camera, go to LongRangeState # if tag id has hit count > 3: # return long_range.LongRangeState() - elif current_waypoint.type.val == WaypointType.MALLET: + else: if context.env.current_target_pos() is not None and context.course.look_for_object(): return approach_object.ApproachObjectState() # if we see the object return self From e006721adf5ccec392bfce689e4fc86b746155b4 Mon Sep 17 00:00:00 2001 From: Alison Ryckman Date: Sat, 27 Jan 2024 14:10:06 -0500 Subject: [PATCH 044/126] fix bearing calc --- msg/LongRangeTag.msg | 7 ++---- .../long_range_tag_detector.cpp | 1 + .../long_range_tag_detector.hpp | 5 ++-- .../long_range_tag_detector.processing.cpp | 25 +++++++++---------- 4 files changed, 18 insertions(+), 20 deletions(-) diff --git a/msg/LongRangeTag.msg b/msg/LongRangeTag.msg index 6a8ea2cb2..ae4d49ab8 100644 --- a/msg/LongRangeTag.msg +++ b/msg/LongRangeTag.msg @@ -1,7 +1,4 @@ # Message published by the LongRangeTagDetector nodelet. -# -0.5 <= xOffset, yOffset <= 0.5 -# Normed to image size, and have signs reflecting typical cartesian coordinates -# Where (0, 0) is at the image center +# Bearing is in degrees from center of image uint8 id -float32 xOffset -float32 yOffset \ No newline at end of file +float32 bearing \ No newline at end of file diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp index 29688efaa..e85126d8a 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp @@ -16,6 +16,7 @@ namespace mrover { mPnh.param("max_hit_count", mMaxHitCount, 5); mPnh.param("tag_increment_weight", mTagIncrementWeight, 2); mPnh.param("tag_decrement_weight", mTagDecrementWeight, 1); + mPnh.param("long_range_fov", mLongRangeFov, 80.0); mImgPub = mNh.advertise("long_range_tag_detection", 1); mLongRangeTagsPub = mNh.advertise("tags", 1); diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.hpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.hpp index 4826554a8..cb0b4bf3d 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.hpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.hpp @@ -32,6 +32,7 @@ namespace mrover { int mTagIncrementWeight{}; int mTagDecrementWeight{}; int mTagRemoveWeight{}; + float mLongRangeFov{}; cv::Ptr mDetectorParams; cv::Ptr mDictionary; @@ -126,7 +127,7 @@ namespace mrover { * @param tagCorners reference to tag corners, passed to @see getTagCenterPixels * @return float of tag bearing */ - float getTagBearing(std::vector& tagCorners) const; + float getTagBearing(cv::Point2f& tagCenter) const; /** * @see updateHitCounts() @@ -142,7 +143,7 @@ namespace mrover { * Publish the tags which have been detected for more than * mMinHitCountBeforePublish */ - void publishThresholdTags(); + void publishPermanentTags(); /** * publishes the thresholded tags onto an image using OpenCV diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp index 30f36cecb..e54a78150 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp @@ -1,5 +1,6 @@ #include "long_range_tag_detector.hpp" #include +#include #include namespace mrover { @@ -28,7 +29,7 @@ namespace mrover { } //Publish all tags that meet threshold - //publishThresholdTags(); + publishPermanentTags(); //PUBLISH TAGS } @@ -50,7 +51,7 @@ namespace mrover { } void LongRangeTagDetectorNodelet::runTagDetection() { - std::cout << mDetectorParams->adaptiveThreshConstant << std::endl; + // std::cout << mDetectorParams->adaptiveThreshConstant << std::endl; cv::aruco::detectMarkers(mImg, mDictionary, mImmediateCorners, mImmediateIds, mDetectorParams); } @@ -59,7 +60,8 @@ namespace mrover { //loop through all identified IDs for (size_t i = 0; i < mImmediateIds.size(); i++) { updateNewlyIdentifiedTags(i); - std::cout << "bearing: " << getTagBearing(mImmediateCorners[i]) << "!!!" << std::endl; + cv::Point2f center = getTagCenterPixels(mImmediateCorners[i]); + // std::cout << "bearing: " << getTagBearing(center) << "!!!" << std::endl; } //Now decrement all the hitcounts for tags that were not updated @@ -95,6 +97,7 @@ namespace mrover { lrt.updated = true; lrt.imageCenter = getNormedTagCenterOffset(tagCorners); + std::cout << "lrt image center " << lrt.imageCenter.x << std::endl; return lrt; } @@ -152,18 +155,16 @@ namespace mrover { return offsetCenterPoint; } - float LongRangeTagDetectorNodelet::getTagBearing(std::vector& tagCorners) const { + float LongRangeTagDetectorNodelet::getTagBearing(cv::Point2f& tagCenter) const { //for HD720 resolution - cv::Point2f center = getTagCenterPixels(tagCorners); auto imageWidth = (float) mImgMsg.width; - std::cout << "width: " << imageWidth << std::endl; - float angleOfFOV = 101; - float bearing = ((center.x - (imageWidth / 2)) / imageWidth) * angleOfFOV; - + std::cout << "width: " << imageWidth << " tag center x: " << tagCenter.x << std::endl; + float bearing = -1 * ((float) tagCenter.x + 0.5) * mLongRangeFov; + std::cout << "bearing: " << bearing << std::endl; return bearing; } - void LongRangeTagDetectorNodelet::publishThresholdTags() { + void LongRangeTagDetectorNodelet::publishPermanentTags() { //Loop through all the tagsj LongRangeTags tagsMessage; // @@ -174,9 +175,7 @@ namespace mrover { //Fill in fields newTagMessage.id = tag.second.id; - newTagMessage.xOffset = tag.second.imageCenter.x; - newTagMessage.yOffset = tag.second.imageCenter.y; - + newTagMessage.bearing = getTagBearing(tag.second.imageCenter); //Add to back of tagsMessage tagsMessage.longRangeTags.push_back(newTagMessage); } From 3eb039b5b79e1bfdf63d318affb9cbe30c370fac Mon Sep 17 00:00:00 2001 From: ankithu Date: Sat, 27 Jan 2024 14:33:30 -0500 Subject: [PATCH 045/126] navigation ready for long range testing --- src/navigation/context.py | 62 ++++++++++++++++++++++++------ src/navigation/long_range_state.py | 49 ++++++++++------------- src/navigation/search.py | 11 +++--- 3 files changed, 77 insertions(+), 45 deletions(-) diff --git a/src/navigation/context.py b/src/navigation/context.py index 42b2cf09c..5a9d4cce6 100644 --- a/src/navigation/context.py +++ b/src/navigation/context.py @@ -9,7 +9,15 @@ import rospy import tf2_ros from geometry_msgs.msg import Twist -from mrover.msg import Waypoint, GPSWaypoint, WaypointType, GPSPointList, Course as CourseMsg +from mrover.msg import ( + Waypoint, + GPSWaypoint, + WaypointType, + GPSPointList, + Course as CourseMsg, + LongRangeTag, + LongRangeTags, +) from mrover.srv import EnableAuton, EnableAutonRequest, EnableAutonResponse from navigation.drive import DriveController from std_msgs.msg import Time, Bool @@ -59,11 +67,10 @@ class Environment: """ ctx: Context + long_range_tags: LongRangeTagStore NO_FIDUCIAL: ClassVar[int] = -1 arrived_at_target: bool = None last_target_location: Optional[np.ndarray] = None - # TODO add dictionary for long range tag id : (time received, our hit counter, bearing) - # tag_data_dict = {} def get_target_pos(self, id: str, in_odom_frame: bool = True) -> Optional[np.ndarray]: """ @@ -109,6 +116,41 @@ def current_target_pos(self, odom_override: bool = True) -> Optional[np.ndarray] return None +class LongRangeTagStore: + @dataclass + class TagData: + hit_count: int + tag: LongRangeTag + + ctx: Context + __data: dict[int, TagData] + min_hits: int + + def __init__(self, ctx: Context, min_hits: int) -> None: + self.ctx = ctx + self.__data = {} + self.min_hits = min_hits + + def push_frame(self, tags: List[LongRangeTag]) -> None: + for _, cur_tag in self.__data.items(): + if cur_tag.tag.id not in tags: + cur_tag.hit_count -= 1 + if cur_tag.hit_count <= 0: + del self.__data[cur_tag.tag.id] + else: + cur_tag.hit_count += 1 + + for tag in tags: + if tag.id not in self.__data: + self.__data[tag.id] = self.TagData(hit_count=1, tag=tag) + + def get(self, tag_id: int) -> Optional[LongRangeTag]: + if tag_id in self.__data and self.__data[tag_id].hit_count > self.min_hits: + return self.__data[tag_id].tag + else: + return None + + @dataclass class Course: ctx: Context @@ -243,13 +285,14 @@ def __init__(self): self.stuck_listener = rospy.Subscriber("nav_stuck", Bool, self.stuck_callback) self.course = None self.rover = Rover(self, False, "") - self.env = Environment(self) + # TODO update min_hits to be a param + self.env = Environment(self, long_range_tags=LongRangeTagStore(self, 3)) self.disable_requested = False self.use_odom = rospy.get_param("use_odom_frame") self.world_frame = rospy.get_param("world_frame") self.odom_frame = rospy.get_param("odom_frame") self.rover_frame = rospy.get_param("rover_frame") - # self.tag_data_listener = rospy.Subscriber("tag_data", list, self.tag_data_callback) + self.tag_data_listener = rospy.Subscriber("tags", LongRangeTags, self.tag_data_callback) def recv_enable_auton(self, req: EnableAutonRequest) -> EnableAutonResponse: if req.enable: @@ -261,10 +304,5 @@ def recv_enable_auton(self, req: EnableAutonRequest) -> EnableAutonResponse: def stuck_callback(self, msg: Bool): self.rover.stuck = msg.data - # def tag_data_callback(self, tags: list) -> None: - # for tag in tags: - # tag_id = tag["id"] - # hit_count = tag["hitCount"] - # bearing = tag["bearing"] - # time_received = rospy.Time() - # self.env.tag_data_dict[tag_id] = (time_received, hit_count, bearing) + def tag_data_callback(self, tags: LongRangeTags) -> None: + self.env.long_range_tags.push_frame(tags.longRangeTags) diff --git a/src/navigation/long_range_state.py b/src/navigation/long_range_state.py index 2bd9737ea..74149daf2 100644 --- a/src/navigation/long_range_state.py +++ b/src/navigation/long_range_state.py @@ -6,6 +6,7 @@ from context import Context from util.state_lib.state import State from util.state_lib.state_machine import StateMachine +from util.np_utils import normalize from abc import ABC, abstractmethod from state import DoneState from search import SearchState @@ -19,8 +20,6 @@ import numpy as np import math -TAG_SEARCH_EXPIRATION = 3 - class LongRangeState(ApproachTargetBaseState): """ @@ -39,39 +38,33 @@ def on_exit(self, context): pass def get_target_pos(self, context) -> Optional[int]: - tag_id = context.current_waypoint()["tagid"] - bearing = context.env.tag_data_dict[tag_id][2] - rover_position = context.rover.get_pose().rover_position - #target_pos = context.rover.get_target_pos().target_pos - - #Get x and y coords from pose - rover_position_x = rover_position[1][0][0] #if not, try 0,0 on 2d coords - rover_position_y = rover_position[0][1][0] + tag_id = context.course.get_current_waypoint().tag_id + tag = context.env.long_range_tags.get(tag_id) + if tag is None: + return None + pose = context.rover.get_pose() + if pose is None: + return None - #target_pos_x: target_pos[1][0][0] - #target_pos_y: target_pos[0][1][0] + rover_position = pose.position + rover_direction = pose.rotation.direction_vector() + bearing_to_tag = np.radians(tag.bearing) - target_position_x= math.sin(bearing)*25 + rover_position_x - target_position_y= math.cos(bearing)*25 + rover_position_y - target_pos = target_position_x,target_position_y - #Bearing and dist to coord: + # rover_direction rotated by bearing to tag + bearing_rotation_mat = np.array( + [[np.cos(bearing_to_tag), -np.sin(bearing_to_tag)], [np.sin(bearing_to_tag), np.cos(bearing_to_tag)]] + ) - # target_pos_x = 90 - bearing - # target_pos_y = bearing - # target_pos_z = rover_position[2] + direction_to_tag = bearing_rotation_mat @ rover_direction[:2] - target_coordinates = np.ndarray - context.tag_data_callback() - return target_pos + direction_to_tag = normalize(direction_to_tag) + distance = 20 # TODO replace with rosparam + tag_position = rover_position + direction_to_tag * distance def determine_next(self, context, finished: bool) -> State: - fid_pos = context.env.current_fid_pos() # Need to pass in the fid_id - tag_time = context.env.tag_data_dict[context.current_tag_id][0] + fid_pos = context.env.current_fid_pos() if fid_pos is None: - if rospy.Time() - tag_time > TAG_SEARCH_EXPIRATION: - return state.SearchState() - else: - return state.LongRangeState() + return state.LongRangeState() else: return approach_post.ApproachPostState() diff --git a/src/navigation/search.py b/src/navigation/search.py index ab71505e7..e08f32c28 100644 --- a/src/navigation/search.py +++ b/src/navigation/search.py @@ -104,7 +104,7 @@ def on_enter(self, context) -> None: self.SEGMENTS_PER_ROTATION, search_center.tag_id, ) - else: # water bottle or mallet + else: # water bottle or mallet self.traj = SearchTrajectory.spiral_traj( context.rover.get_pose().position[0:2], self.SPIRAL_COVERAGE_RADIUS / 2, @@ -147,11 +147,12 @@ def on_loop(self, context) -> State: current_waypoint = context.course.current_waypoint() if current_waypoint.type.val == WaypointType.POST: + # if we see the tag in the ZED, go to ApproachPostState if context.env.current_target_pos() is not None and context.course.look_for_post(): - return approach_post.ApproachPostState() # if we see the tag in the ZED, go to ApproachPostState - # if we see the tag in the long range camera, go to LongRangeState - # if tag id has hit count > 3: - # return long_range.LongRangeState() + return approach_post.ApproachPostState() + # if we see the tag in the long range camera, go to LongRangeState + if context.env.long_range_tags.get(current_waypoint.tag_id) is not None: + return approach_post.LongRangeState() else: if context.env.current_target_pos() is not None and context.course.look_for_object(): return approach_object.ApproachObjectState() # if we see the object From 16e4b13d96ec102e3c39205396b512c6088010b7 Mon Sep 17 00:00:00 2001 From: ankithu Date: Sun, 28 Jan 2024 12:50:09 -0500 Subject: [PATCH 046/126] max hit count --- src/navigation/context.py | 7 +++++-- src/navigation/long_range_state.py | 1 + 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/navigation/context.py b/src/navigation/context.py index 5a9d4cce6..d86b4a94b 100644 --- a/src/navigation/context.py +++ b/src/navigation/context.py @@ -121,15 +121,17 @@ class LongRangeTagStore: class TagData: hit_count: int tag: LongRangeTag + time: rospy.Time ctx: Context __data: dict[int, TagData] min_hits: int - def __init__(self, ctx: Context, min_hits: int) -> None: + def __init__(self, ctx: Context, min_hits: int, max_hits: int = 10) -> None: self.ctx = ctx self.__data = {} self.min_hits = min_hits + self.max_hits = max_hits def push_frame(self, tags: List[LongRangeTag]) -> None: for _, cur_tag in self.__data.items(): @@ -139,13 +141,14 @@ def push_frame(self, tags: List[LongRangeTag]) -> None: del self.__data[cur_tag.tag.id] else: cur_tag.hit_count += 1 + cur_tag.hit_count = min(cur_tag.hit_count, self.max_hits) for tag in tags: if tag.id not in self.__data: self.__data[tag.id] = self.TagData(hit_count=1, tag=tag) def get(self, tag_id: int) -> Optional[LongRangeTag]: - if tag_id in self.__data and self.__data[tag_id].hit_count > self.min_hits: + if tag_id in self.__data and self.__data[tag_id].hit_count >= self.min_hits: return self.__data[tag_id].tag else: return None diff --git a/src/navigation/long_range_state.py b/src/navigation/long_range_state.py index 74149daf2..a9ee40e27 100644 --- a/src/navigation/long_range_state.py +++ b/src/navigation/long_range_state.py @@ -61,6 +61,7 @@ def get_target_pos(self, context) -> Optional[int]: direction_to_tag = normalize(direction_to_tag) distance = 20 # TODO replace with rosparam tag_position = rover_position + direction_to_tag * distance + return tag_position def determine_next(self, context, finished: bool) -> State: fid_pos = context.env.current_fid_pos() From bf08fc524cee12c3bbcd8cd19cefd4561c246977 Mon Sep 17 00:00:00 2001 From: Stephen Barstys Date: Sun, 28 Jan 2024 13:13:52 -0500 Subject: [PATCH 047/126] time threshold check for LongRangeTagData --- config/navigation.yaml | 6 +++++- src/navigation/context.py | 10 +++++++++- src/navigation/long_range_state.py | 4 +++- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/config/navigation.yaml b/config/navigation.yaml index fe5a6adfb..153a40dc7 100644 --- a/config/navigation.yaml +++ b/config/navigation.yaml @@ -46,5 +46,9 @@ failure_identification: watchdog: window_size: 100 #size of window we are looking at for being stuck - angular_threshold: 0.1 + angular_threshold: 0.1 linear_threshold: 0.2 + +long_range: + distance_ahead: 20 + time_threshold: 5 diff --git a/src/navigation/context.py b/src/navigation/context.py index d86b4a94b..ba963f426 100644 --- a/src/navigation/context.py +++ b/src/navigation/context.py @@ -23,9 +23,12 @@ from std_msgs.msg import Time, Bool from util.SE3 import SE3 from visualization_msgs.msg import Marker +from util.ros_utils import get_rosparam TAG_EXPIRATION_TIME_SECONDS = 60 +TIME_THRESHOLD = get_rosparam("long_range/time_threshold", 5) + REF_LAT = rospy.get_param("gps_linearization/reference_point_latitude") REF_LON = rospy.get_param("gps_linearization/reference_point_longitude") @@ -148,7 +151,12 @@ def push_frame(self, tags: List[LongRangeTag]) -> None: self.__data[tag.id] = self.TagData(hit_count=1, tag=tag) def get(self, tag_id: int) -> Optional[LongRangeTag]: - if tag_id in self.__data and self.__data[tag_id].hit_count >= self.min_hits: + time_difference = rospy.get_time() - self.__data[tag_id].time + if ( + tag_id in self.__data + and self.__data[tag_id].hit_count >= self.min_hits + and time_difference <= TIME_THRESHOLD + ): return self.__data[tag_id].tag else: return None diff --git a/src/navigation/long_range_state.py b/src/navigation/long_range_state.py index a9ee40e27..6504dd17d 100644 --- a/src/navigation/long_range_state.py +++ b/src/navigation/long_range_state.py @@ -20,6 +20,8 @@ import numpy as np import math +DIST_AHEAD = get_rosparam("long_range/distance_ahead", 20) + class LongRangeState(ApproachTargetBaseState): """ @@ -59,7 +61,7 @@ def get_target_pos(self, context) -> Optional[int]: direction_to_tag = bearing_rotation_mat @ rover_direction[:2] direction_to_tag = normalize(direction_to_tag) - distance = 20 # TODO replace with rosparam + distance = DIST_AHEAD tag_position = rover_position + direction_to_tag * distance return tag_position From 0a5cef58fbfd28b90d99eba2df8089a3ea3251a6 Mon Sep 17 00:00:00 2001 From: ankithu Date: Sun, 28 Jan 2024 13:52:52 -0500 Subject: [PATCH 048/126] fix mypy errors --- src/navigation/context.py | 4 +- src/navigation/long_range.py | 57 +++++++++++++++++------ src/navigation/long_range_state.py | 73 ------------------------------ src/navigation/search.py | 4 +- src/navigation/waypoint.py | 4 +- 5 files changed, 50 insertions(+), 92 deletions(-) delete mode 100644 src/navigation/long_range_state.py diff --git a/src/navigation/context.py b/src/navigation/context.py index ba963f426..fa68db9c5 100644 --- a/src/navigation/context.py +++ b/src/navigation/context.py @@ -72,7 +72,7 @@ class Environment: ctx: Context long_range_tags: LongRangeTagStore NO_FIDUCIAL: ClassVar[int] = -1 - arrived_at_target: bool = None + arrived_at_target: bool = False last_target_location: Optional[np.ndarray] = None def get_target_pos(self, id: str, in_odom_frame: bool = True) -> Optional[np.ndarray]: @@ -148,7 +148,7 @@ def push_frame(self, tags: List[LongRangeTag]) -> None: for tag in tags: if tag.id not in self.__data: - self.__data[tag.id] = self.TagData(hit_count=1, tag=tag) + self.__data[tag.id] = self.TagData(hit_count=1, tag=tag, time=rospy.get_time()) def get(self, tag_id: int) -> Optional[LongRangeTag]: time_difference = rospy.get_time() - self.__data[tag_id].time diff --git a/src/navigation/long_range.py b/src/navigation/long_range.py index 76c0d4949..500511517 100644 --- a/src/navigation/long_range.py +++ b/src/navigation/long_range.py @@ -1,12 +1,26 @@ import tf2_ros - +import rospy +from aenum import Enum, NoAlias +from geometry_msgs.msg import Twist from util.ros_utils import get_rosparam +from context import Context from util.state_lib.state import State -from abc import abstractmethod +from util.state_lib.state_machine import StateMachine +from util.np_utils import normalized +from abc import ABC, abstractmethod +from state import DoneState +from search import SearchState +from util.state_lib.state_publisher_server import StatePublisher from typing import Optional +from util.state_lib.state import State +from abc import abstractmethod +import state +from state import ApproachTargetBaseState +from state import approach_post +import numpy as np +import math -from navigation import approach_post -from navigation.approach_target_base import ApproachTargetBaseState +DIST_AHEAD = get_rosparam("long_range/distance_ahead", 20) class LongRangeState(ApproachTargetBaseState): @@ -26,17 +40,34 @@ def on_exit(self, context): pass def get_target_pos(self, context) -> Optional[int]: - target_pos = None # TODO: get bearing from most recent message for current tag_id and create a “target” location to drive to by setting a point a certain look ahead distance far away from the rover (20-30 meters) once aligned with the bearing - return target_pos + tag_id = context.course.get_current_waypoint().tag_id + tag = context.env.long_range_tags.get(tag_id) + if tag is None: + return None + pose = context.rover.get_pose() + if pose is None: + return None + + rover_position = pose.position + rover_direction = pose.rotation.direction_vector() + + bearing_to_tag = np.radians(tag.bearing) + + # rover_direction rotated by bearing to tag + bearing_rotation_mat = np.array( + [[np.cos(bearing_to_tag), -np.sin(bearing_to_tag)], [np.sin(bearing_to_tag), np.cos(bearing_to_tag)]] + ) + + direction_to_tag = bearing_rotation_mat @ rover_direction[:2] + + direction_to_tag = normalized(direction_to_tag) + distance = DIST_AHEAD + tag_position = rover_position + direction_to_tag * distance + return tag_position def determine_next(self, context, finished: bool) -> State: - # if we see the tag in the ZED transition to approach post - fid_pos = context.env.current_target_pos() + fid_pos = context.env.current_fid_pos() if fid_pos is None: - # if we haven’t seen the tag in the long range cam in the last 3 seconds, go back to search - # TODO if time.now - last message received for tag_id > 3: - # return search.SearchState() - # otherwise continue in long range state - return self + return state.LongRangeState() else: return approach_post.ApproachPostState() diff --git a/src/navigation/long_range_state.py b/src/navigation/long_range_state.py deleted file mode 100644 index 6504dd17d..000000000 --- a/src/navigation/long_range_state.py +++ /dev/null @@ -1,73 +0,0 @@ -import tf2_ros -import rospy -from aenum import Enum, NoAlias -from geometry_msgs.msg import Twist -from util.ros_utils import get_rosparam -from context import Context -from util.state_lib.state import State -from util.state_lib.state_machine import StateMachine -from util.np_utils import normalize -from abc import ABC, abstractmethod -from state import DoneState -from search import SearchState -from util.state_lib.state_publisher_server import StatePublisher -from typing import Optional -from util.state_lib.state import State -from abc import abstractmethod -import state -from state import ApproachTargetBaseState -from state import approach_post -import numpy as np -import math - -DIST_AHEAD = get_rosparam("long_range/distance_ahead", 20) - - -class LongRangeState(ApproachTargetBaseState): - """ - State for when the tag is seen only in the long range camera. - Transitions: - -If tag seen in ZED: ApproachPostState - -Not seen in ZED and moved: LongRangeState - -Don't see the tag in long range camera: SearchState - -Stuck? - """ - - def on_enter(self, context): - pass - - def on_exit(self, context): - pass - - def get_target_pos(self, context) -> Optional[int]: - tag_id = context.course.get_current_waypoint().tag_id - tag = context.env.long_range_tags.get(tag_id) - if tag is None: - return None - pose = context.rover.get_pose() - if pose is None: - return None - - rover_position = pose.position - rover_direction = pose.rotation.direction_vector() - - bearing_to_tag = np.radians(tag.bearing) - - # rover_direction rotated by bearing to tag - bearing_rotation_mat = np.array( - [[np.cos(bearing_to_tag), -np.sin(bearing_to_tag)], [np.sin(bearing_to_tag), np.cos(bearing_to_tag)]] - ) - - direction_to_tag = bearing_rotation_mat @ rover_direction[:2] - - direction_to_tag = normalize(direction_to_tag) - distance = DIST_AHEAD - tag_position = rover_position + direction_to_tag * distance - return tag_position - - def determine_next(self, context, finished: bool) -> State: - fid_pos = context.env.current_fid_pos() - if fid_pos is None: - return state.LongRangeState() - else: - return approach_post.ApproachPostState() diff --git a/src/navigation/search.py b/src/navigation/search.py index 0500c74a6..3db5ede04 100644 --- a/src/navigation/search.py +++ b/src/navigation/search.py @@ -10,7 +10,7 @@ from util.ros_utils import get_rosparam from util.state_lib.state import State -from navigation import approach_post, approach_object, recovery, waypoint +from navigation import approach_post, approach_object, recovery, waypoint, long_range from navigation.context import convert_cartesian_to_gps from navigation.trajectory import Trajectory @@ -152,7 +152,7 @@ def on_loop(self, context) -> State: # return approach_post.ApproachPostState() # if we see the tag in the long range camera, go to LongRangeState if context.env.long_range_tags.get(current_waypoint.tag_id) is not None: - return approach_post.LongRangeState() + return long_range.LongRangeState() else: if context.env.current_target_pos() is not None and context.course.look_for_object(): return approach_object.ApproachObjectState() # if we see the object diff --git a/src/navigation/waypoint.py b/src/navigation/waypoint.py index a03b2fa84..62181a5c6 100644 --- a/src/navigation/waypoint.py +++ b/src/navigation/waypoint.py @@ -3,7 +3,7 @@ from util.ros_utils import get_rosparam from util.state_lib.state import State -from navigation import search, recovery, approach_post, post_backup, state, approach_object +from navigation import search, recovery, approach_post, post_backup, state, approach_object, long_range class WaypointState(State): @@ -39,7 +39,7 @@ def on_loop(self, context) -> State: # return approach_post.ApproachPostState() # if we see the tag in the long range camera, go to LongRangeState if context.env.long_range_tags.get(current_waypoint.tag_id) is not None: - return approach_post.LongRangeState() + return long_range.LongRangeState() elif context.course.look_for_object(): if context.env.current_target_pos() is not None: return approach_object.ApproachObjectState() From 5b5f4f11309dc4a1d5d0a9275e0c50a17cce4223 Mon Sep 17 00:00:00 2001 From: Alison Ryckman Date: Mon, 29 Jan 2024 21:25:29 -0500 Subject: [PATCH 049/126] merge long range with new states --- launch/perception.launch | 9 +- plugins/tag_detector_plugin.xml | 2 +- src/perception/long_range_cam/main.cpp | 23 ++ src/perception/long_range_cam/pch.hpp | 2 - .../tag_detector/long_range_cam/main.cpp | 22 ++ .../tag_detector/long_range_cam/pch.hpp | 2 - .../tag_detector/{ => zed}/main.cpp | 0 .../zed/tag_detector.processing.cpp | 346 +++++++++--------- 8 files changed, 226 insertions(+), 180 deletions(-) create mode 100644 src/perception/long_range_cam/main.cpp create mode 100644 src/perception/tag_detector/long_range_cam/main.cpp rename src/perception/tag_detector/{ => zed}/main.cpp (100%) diff --git a/launch/perception.launch b/launch/perception.launch index 0fcce3c20..67f0e5aeb 100644 --- a/launch/perception.launch +++ b/launch/perception.launch @@ -2,7 +2,12 @@ + + + pkg="nodelet" type="nodelet" name="long_range_tag_detector" + args="load mrover/LongRangeTagDetectorNodelet nodelet_manager" output="screen" /> + \ No newline at end of file diff --git a/plugins/tag_detector_plugin.xml b/plugins/tag_detector_plugin.xml index bd6238412..babad0bed 100644 --- a/plugins/tag_detector_plugin.xml +++ b/plugins/tag_detector_plugin.xml @@ -1,4 +1,4 @@ - + diff --git a/src/perception/long_range_cam/main.cpp b/src/perception/long_range_cam/main.cpp new file mode 100644 index 000000000..0e5e833da --- /dev/null +++ b/src/perception/long_range_cam/main.cpp @@ -0,0 +1,23 @@ +#include "long_range_cam.hpp" + +#ifdef MROVER_IS_NODELET + +#include +PLUGINLIB_EXPORT_CLASS(mrover::LongRangeCamNodelet, nodelet::Nodelet) + +#else + +int main(int argc, char** argv) { + ros::init(argc, argv, "usb_camera"); + + // Start the ZED Nodelet + nodelet::Loader nodelet; + std::cout << ros::this_node::getName() << std::endl; + nodelet.load(ros::this_node::getName(), "mrover/LongRangeCamNodelet", ros::names::getRemappings(), {}); + + ros::spin(); + + return EXIT_SUCCESS; +} + +#endif \ No newline at end of file diff --git a/src/perception/long_range_cam/pch.hpp b/src/perception/long_range_cam/pch.hpp index d39a1b1aa..f0c16770d 100644 --- a/src/perception/long_range_cam/pch.hpp +++ b/src/perception/long_range_cam/pch.hpp @@ -13,8 +13,6 @@ #include #include -#include - #include #include #include diff --git a/src/perception/tag_detector/long_range_cam/main.cpp b/src/perception/tag_detector/long_range_cam/main.cpp new file mode 100644 index 000000000..d62371420 --- /dev/null +++ b/src/perception/tag_detector/long_range_cam/main.cpp @@ -0,0 +1,22 @@ +#include "long_range_tag_detector.hpp" + +#ifdef MROVER_IS_NODELET + +#include +PLUGINLIB_EXPORT_CLASS(mrover::LongRangeTagDetectorNodelet, nodelet::Nodelet) + +#else + +int main(int argc, char** argv) { + ros::init(argc, argv, "long_range_tag_detector"); + + // Start the long range cam Nodelet + nodelet::Loader nodelet; + nodelet.load(ros::this_node::getName(), "mrover/LongRangeTagDetectorNodelet", ros::names::getRemappings(), {}); + + ros::spin(); + + return EXIT_SUCCESS; +} + +#endif \ No newline at end of file diff --git a/src/perception/tag_detector/long_range_cam/pch.hpp b/src/perception/tag_detector/long_range_cam/pch.hpp index fad6365fd..afaebe976 100644 --- a/src/perception/tag_detector/long_range_cam/pch.hpp +++ b/src/perception/tag_detector/long_range_cam/pch.hpp @@ -12,8 +12,6 @@ #include #include -#include - #include #include #include diff --git a/src/perception/tag_detector/main.cpp b/src/perception/tag_detector/zed/main.cpp similarity index 100% rename from src/perception/tag_detector/main.cpp rename to src/perception/tag_detector/zed/main.cpp diff --git a/src/perception/tag_detector/zed/tag_detector.processing.cpp b/src/perception/tag_detector/zed/tag_detector.processing.cpp index 18e7fcdb9..a101d4fff 100644 --- a/src/perception/tag_detector/zed/tag_detector.processing.cpp +++ b/src/perception/tag_detector/zed/tag_detector.processing.cpp @@ -1,192 +1,192 @@ #include "tag_detector.hpp" -#include "../../point.hpp" +#include "point.hpp" namespace mrover { -/** + /** * @brief Retrieve the pose of the tag in camera space * @param msg 3D Point Cloud with points stored relative to the camera * @param u X Pixel Position * @param v Y Pixel Position */ -std::optional TagDetectorNodelet::getTagInCamFromPixel( - sensor_msgs::PointCloud2ConstPtr const &cloudPtr, size_t u, size_t v) { - assert(cloudPtr); - - if (u >= cloudPtr->width || v >= cloudPtr->height) { - NODELET_WARN("Tag center out of bounds: [%zu %zu]", u, v); - return std::nullopt; - } - - Point point = reinterpret_cast( - cloudPtr->data.data())[u + v * cloudPtr->width]; - - if (!std::isfinite(point.x) || !std::isfinite(point.y) || - !std::isfinite(point.z)) { - NODELET_WARN("Tag center point not finite: [%f %f %f]", point.x, point.y, - point.z); - return std::nullopt; - } - - return std::make_optional(R3{point.x, point.y, point.z}, SO3{}); -} + std::optional TagDetectorNodelet::getTagInCamFromPixel( + sensor_msgs::PointCloud2ConstPtr const& cloudPtr, size_t u, size_t v) { + assert(cloudPtr); + + if (u >= cloudPtr->width || v >= cloudPtr->height) { + NODELET_WARN("Tag center out of bounds: [%zu %zu]", u, v); + return std::nullopt; + } + + Point point = reinterpret_cast( + cloudPtr->data.data())[u + v * cloudPtr->width]; + + if (!std::isfinite(point.x) || !std::isfinite(point.y) || + !std::isfinite(point.z)) { + NODELET_WARN("Tag center point not finite: [%f %f %f]", point.x, point.y, + point.z); + return std::nullopt; + } + + return std::make_optional(R3{point.x, point.y, point.z}, SO3{}); + } -/** + /** * For each tag we have detected so far, fuse point cloud information. * This information is where it is in the world. * * @param msg Point cloud message */ -void TagDetectorNodelet::pointCloudCallback( - sensor_msgs::PointCloud2ConstPtr const &msg) { - assert(msg); - assert(msg->height > 0); - assert(msg->width > 0); - - if (!mEnableDetections) - return; - - mProfiler.beginLoop(); - - NODELET_DEBUG("Got point cloud %d", msg->header.seq); - - // OpenCV needs a dense BGR image |BGR|...| but out point cloud is - // |BGRAXYZ...|...| So we need to copy the data into the correct format - if (static_cast(msg->height) != mImg.rows || - static_cast(msg->width) != mImg.cols) { - NODELET_INFO("Image size changed from [%d %d] to [%u %u]", mImg.cols, - mImg.rows, msg->width, msg->height); - mImg = cv::Mat{static_cast(msg->height), static_cast(msg->width), - CV_8UC3, cv::Scalar{0, 0, 0}}; - } - auto *pixelPtr = reinterpret_cast(mImg.data); - auto *pointPtr = reinterpret_cast(msg->data.data()); - std::for_each(std::execution::par_unseq, pixelPtr, pixelPtr + mImg.total(), - [&](cv::Vec3b &pixel) { - size_t i = &pixel - pixelPtr; - pixel[0] = pointPtr[i].b; - pixel[1] = pointPtr[i].g; - pixel[2] = pointPtr[i].r; - }); - mProfiler.measureEvent("Convert"); - - // Call thresholding - publishThresholdedImage(); - mProfiler.measureEvent("Threshold"); - - // Detect the tag vertices in screen space and their respective ids - // {mImmediateCorneres, mImmediateIds} are the outputs from OpenCV - cv::aruco::detectMarkers(mImg, mDictionary, mImmediateCorners, mImmediateIds, - mDetectorParams); - NODELET_DEBUG("OpenCV detect size: %zu", mImmediateIds.size()); - mProfiler.measureEvent("OpenCV Detect"); - - // Update ID, image center, and increment hit count for all detected tags - for (size_t i = 0; i < mImmediateIds.size(); ++i) { - int id = mImmediateIds[i]; - Tag &tag = mTags[id]; - tag.hitCount = - std::clamp(tag.hitCount + mTagIncrementWeight, 0, mMaxHitCount); - tag.id = id; - tag.imageCenter = - std::reduce(mImmediateCorners[i].begin(), mImmediateCorners[i].end()) / - static_cast(mImmediateCorners[i].size()); - tag.tagInCam = getTagInCamFromPixel(msg, std::lround(tag.imageCenter.x), - std::lround(tag.imageCenter.y)); - - if (tag.tagInCam) { - // Publish tag to immediate - std::string immediateFrameId = - "immediateFiducial" + std::to_string(tag.id); - SE3::pushToTfTree(mTfBroadcaster, immediateFrameId, mCameraFrameId, - tag.tagInCam.value()); - } - } - - // Handle tags that were not seen this update - // Decrement their hit count and remove if they hit zero - auto it = mTags.begin(); - while (it != mTags.end()) { - auto &[id, tag] = *it; - if (std::ranges::find(mImmediateIds, id) == mImmediateIds.end()) { - tag.hitCount -= mTagDecrementWeight; - tag.tagInCam = std::nullopt; - if (tag.hitCount <= 0) { - it = mTags.erase(it); - continue; - } - } - ++it; - } - - // Publish all tags to the tf tree that have been seen enough times - for (auto const &[id, tag] : mTags) { - if (tag.hitCount >= mMinHitCountBeforePublish && tag.tagInCam) { - try { - std::string immediateFrameId = - "immediateFiducial" + std::to_string(tag.id); - // Publish tag to odom - std::string const &parentFrameId = - mUseOdom ? mOdomFrameId : mMapFrameId; - SE3 tagInParent = - SE3::fromTfTree(mTfBuffer, parentFrameId, immediateFrameId); - SE3::pushToTfTree(mTfBroadcaster, "fiducial" + std::to_string(id), - parentFrameId, tagInParent); - } catch (tf2::ExtrapolationException const &) { - NODELET_WARN("Old data for immediate tag"); - } catch (tf2::LookupException const &) { - NODELET_WARN("Expected transform for immediate tag"); - } catch (tf::ConnectivityException const &) { - NODELET_WARN( - "Expected connection to odom frame. Is visual odometry running?"); - } - } - } - - if (mPublishImages && mImgPub.getNumSubscribers()) { - - cv::aruco::drawDetectedMarkers(mImg, mImmediateCorners, mImmediateIds); - // Max number of tags the hit counter can display = 10; - if (!mTags.empty()) { - // TODO: remove some magic numbers in this block - int tagCount = 1; - auto tagBoxWidth = static_cast(mImg.cols / (mTags.size() * 2)); - for (auto &[id, tag] : mTags) { - cv::Scalar color{255, 0, 0}; - cv::Point pt{tagBoxWidth * tagCount, mImg.rows / 10}; - std::string text = - "id" + std::to_string(id) + ":" + std::to_string(tag.hitCount); - cv::putText(mImg, text, pt, cv::FONT_HERSHEY_COMPLEX, mImg.cols / 800.0, - color, mImg.cols / 300); - - ++tagCount; - } + void TagDetectorNodelet::pointCloudCallback( + sensor_msgs::PointCloud2ConstPtr const& msg) { + assert(msg); + assert(msg->height > 0); + assert(msg->width > 0); + + if (!mEnableDetections) + return; + + mProfiler.beginLoop(); + + NODELET_DEBUG("Got point cloud %d", msg->header.seq); + + // OpenCV needs a dense BGR image |BGR|...| but out point cloud is + // |BGRAXYZ...|...| So we need to copy the data into the correct format + if (static_cast(msg->height) != mImg.rows || + static_cast(msg->width) != mImg.cols) { + NODELET_INFO("Image size changed from [%d %d] to [%u %u]", mImg.cols, + mImg.rows, msg->width, msg->height); + mImg = cv::Mat{static_cast(msg->height), static_cast(msg->width), + CV_8UC3, cv::Scalar{0, 0, 0}}; + } + auto* pixelPtr = reinterpret_cast(mImg.data); + auto* pointPtr = reinterpret_cast(msg->data.data()); + std::for_each(std::execution::par_unseq, pixelPtr, pixelPtr + mImg.total(), + [&](cv::Vec3b& pixel) { + size_t i = &pixel - pixelPtr; + pixel[0] = pointPtr[i].b; + pixel[1] = pointPtr[i].g; + pixel[2] = pointPtr[i].r; + }); + mProfiler.measureEvent("Convert"); + + // Call thresholding + publishThresholdedImage(); + mProfiler.measureEvent("Threshold"); + + // Detect the tag vertices in screen space and their respective ids + // {mImmediateCorneres, mImmediateIds} are the outputs from OpenCV + cv::aruco::detectMarkers(mImg, mDictionary, mImmediateCorners, mImmediateIds, + mDetectorParams); + NODELET_DEBUG("OpenCV detect size: %zu", mImmediateIds.size()); + mProfiler.measureEvent("OpenCV Detect"); + + // Update ID, image center, and increment hit count for all detected tags + for (size_t i = 0; i < mImmediateIds.size(); ++i) { + int id = mImmediateIds[i]; + Tag& tag = mTags[id]; + tag.hitCount = + std::clamp(tag.hitCount + mTagIncrementWeight, 0, mMaxHitCount); + tag.id = id; + tag.imageCenter = + std::reduce(mImmediateCorners[i].begin(), mImmediateCorners[i].end()) / + static_cast(mImmediateCorners[i].size()); + tag.tagInCam = getTagInCamFromPixel(msg, std::lround(tag.imageCenter.x), + std::lround(tag.imageCenter.y)); + + if (tag.tagInCam) { + // Publish tag to immediate + std::string immediateFrameId = + "immediateFiducial" + std::to_string(tag.id); + SE3::pushToTfTree(mTfBroadcaster, immediateFrameId, mCameraFrameId, + tag.tagInCam.value()); + } + } + + // Handle tags that were not seen this update + // Decrement their hit count and remove if they hit zero + auto it = mTags.begin(); + while (it != mTags.end()) { + auto& [id, tag] = *it; + if (std::ranges::find(mImmediateIds, id) == mImmediateIds.end()) { + tag.hitCount -= mTagDecrementWeight; + tag.tagInCam = std::nullopt; + if (tag.hitCount <= 0) { + it = mTags.erase(it); + continue; + } + } + ++it; + } + + // Publish all tags to the tf tree that have been seen enough times + for (auto const& [id, tag]: mTags) { + if (tag.hitCount >= mMinHitCountBeforePublish && tag.tagInCam) { + try { + std::string immediateFrameId = + "immediateFiducial" + std::to_string(tag.id); + // Publish tag to odom + std::string const& parentFrameId = + mUseOdom ? mOdomFrameId : mMapFrameId; + SE3 tagInParent = + SE3::fromTfTree(mTfBuffer, parentFrameId, immediateFrameId); + SE3::pushToTfTree(mTfBroadcaster, "fiducial" + std::to_string(id), + parentFrameId, tagInParent); + } catch (tf2::ExtrapolationException const&) { + NODELET_WARN("Old data for immediate tag"); + } catch (tf2::LookupException const&) { + NODELET_WARN("Expected transform for immediate tag"); + } catch (tf2::ConnectivityException const&) { + NODELET_WARN( + "Expected connection to odom frame. Is visual odometry running?"); + } + } + } + + if (mPublishImages && mImgPub.getNumSubscribers()) { + + cv::aruco::drawDetectedMarkers(mImg, mImmediateCorners, mImmediateIds); + // Max number of tags the hit counter can display = 10; + if (!mTags.empty()) { + // TODO: remove some magic numbers in this block + int tagCount = 1; + auto tagBoxWidth = static_cast(mImg.cols / (mTags.size() * 2)); + for (auto& [id, tag]: mTags) { + cv::Scalar color{255, 0, 0}; + cv::Point pt{tagBoxWidth * tagCount, mImg.rows / 10}; + std::string text = + "id" + std::to_string(id) + ":" + std::to_string(tag.hitCount); + cv::putText(mImg, text, pt, cv::FONT_HERSHEY_COMPLEX, mImg.cols / 800.0, + color, mImg.cols / 300); + + ++tagCount; + } + } + mImgMsg.header.seq = mSeqNum; + mImgMsg.header.stamp = ros::Time::now(); + mImgMsg.header.frame_id = "zed2i_left_camera_frame"; + mImgMsg.height = mImg.rows; + mImgMsg.width = mImg.cols; + mImgMsg.encoding = sensor_msgs::image_encodings::BGR8; + mImgMsg.step = mImg.step; + mImgMsg.is_bigendian = __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__; + size_t size = mImgMsg.step * mImgMsg.height; + mImgMsg.data.resize(size); + std::uninitialized_copy(std::execution::par_unseq, mImg.data, + mImg.data + size, mImgMsg.data.begin()); + mImgPub.publish(mImgMsg); + } + + size_t detectedCount = mImmediateIds.size(); + NODELET_INFO_COND(!mPrevDetectedCount.has_value() || + detectedCount != mPrevDetectedCount.value(), + "Detected %zu markers", detectedCount); + mPrevDetectedCount = detectedCount; + + mProfiler.measureEvent("Publish"); + + mSeqNum++; } - mImgMsg.header.seq = mSeqNum; - mImgMsg.header.stamp = ros::Time::now(); - mImgMsg.header.frame_id = "zed2i_left_camera_frame"; - mImgMsg.height = mImg.rows; - mImgMsg.width = mImg.cols; - mImgMsg.encoding = sensor_msgs::image_encodings::BGR8; - mImgMsg.step = mImg.step; - mImgMsg.is_bigendian = __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__; - size_t size = mImgMsg.step * mImgMsg.height; - mImgMsg.data.resize(size); - std::uninitialized_copy(std::execution::par_unseq, mImg.data, - mImg.data + size, mImgMsg.data.begin()); - mImgPub.publish(mImgMsg); - } - - size_t detectedCount = mImmediateIds.size(); - NODELET_INFO_COND(!mPrevDetectedCount.has_value() || - detectedCount != mPrevDetectedCount.value(), - "Detected %zu markers", detectedCount); - mPrevDetectedCount = detectedCount; - - mProfiler.measureEvent("Publish"); - - mSeqNum++; -} } // namespace mrover From b9d69574904cb1957bb343208993a5294e9826a9 Mon Sep 17 00:00:00 2001 From: Alison Ryckman Date: Mon, 29 Jan 2024 21:28:22 -0500 Subject: [PATCH 050/126] remove mains from percep nodelets --- src/perception/long_range_cam/long_range_cam.cpp | 13 ------------- .../long_range_cam/long_range_tag_detector.cpp | 12 ------------ 2 files changed, 25 deletions(-) diff --git a/src/perception/long_range_cam/long_range_cam.cpp b/src/perception/long_range_cam/long_range_cam.cpp index 16388d0e3..783ffe52d 100644 --- a/src/perception/long_range_cam/long_range_cam.cpp +++ b/src/perception/long_range_cam/long_range_cam.cpp @@ -82,19 +82,6 @@ namespace mrover { } // namespace mrover -int main(int argc, char** argv) { - ros::init(argc, argv, "usb_camera"); - - // Start the ZED Nodelet - nodelet::Loader nodelet; - std::cout << ros::this_node::getName() << std::endl; - nodelet.load(ros::this_node::getName(), "mrover/LongRangeCamNodelet", ros::names::getRemappings(), {}); - - ros::spin(); - - return EXIT_SUCCESS; -} - #ifdef MROVER_IS_NODELET #include PLUGINLIB_EXPORT_CLASS(mrover::LongRangeCamNodelet, nodelet::Nodelet) diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp index e85126d8a..45f72b9ed 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp @@ -143,17 +143,5 @@ namespace mrover { } // namespace mrover -int main(int argc, char** argv) { - ros::init(argc, argv, "long_range_tag_detector"); - - // Start the long range cam Nodelet - nodelet::Loader nodelet; - nodelet.load(ros::this_node::getName(), "mrover/LongRangeTagDetectorNodelet", ros::names::getRemappings(), {}); - - ros::spin(); - - return EXIT_SUCCESS; -} - #include PLUGINLIB_EXPORT_CLASS(mrover::LongRangeTagDetectorNodelet, nodelet::Nodelet) From 34818a966550f12492042d43a48df7382f17bc42 Mon Sep 17 00:00:00 2001 From: Alison Ryckman Date: Tue, 30 Jan 2024 15:14:58 -0500 Subject: [PATCH 051/126] nav includes --- src/navigation/long_range.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/navigation/long_range.py b/src/navigation/long_range.py index 500511517..b1e132dfb 100644 --- a/src/navigation/long_range.py +++ b/src/navigation/long_range.py @@ -15,8 +15,8 @@ from util.state_lib.state import State from abc import abstractmethod import state -from state import ApproachTargetBaseState -from state import approach_post +from navigation.approach_target_base import ApproachTargetBaseState +import approach_post import numpy as np import math From 92379ab34a472131053b58940ab436aa2f193d39 Mon Sep 17 00:00:00 2001 From: umroverPerception Date: Tue, 30 Jan 2024 16:11:12 -0500 Subject: [PATCH 052/126] add transtions in navigation.py --- .catkin_tools/CATKIN_IGNORE | 0 .catkin_tools/README | 13 ----------- .catkin_tools/VERSION | 1 - .catkin_tools/profiles/default/build.yaml | 22 ------------------- .../catkin_tools_prebuild/package.xml | 10 --------- config/simulator/simulator.yaml | 18 +++++++-------- scripts/debug_course_publisher.py | 2 +- src/navigation/context.py | 3 +++ src/navigation/long_range.py | 18 ++++----------- src/navigation/navigation.py | 15 +++++++++++-- 10 files changed, 30 insertions(+), 72 deletions(-) delete mode 100644 .catkin_tools/CATKIN_IGNORE delete mode 100644 .catkin_tools/README delete mode 100644 .catkin_tools/VERSION delete mode 100644 .catkin_tools/profiles/default/build.yaml delete mode 100644 .catkin_tools/profiles/default/packages/catkin_tools_prebuild/package.xml diff --git a/.catkin_tools/CATKIN_IGNORE b/.catkin_tools/CATKIN_IGNORE deleted file mode 100644 index e69de29bb..000000000 diff --git a/.catkin_tools/README b/.catkin_tools/README deleted file mode 100644 index 4706f4756..000000000 --- a/.catkin_tools/README +++ /dev/null @@ -1,13 +0,0 @@ -# Catkin Tools Metadata - -This directory was generated by catkin_tools and it contains persistent -configuration information used by the `catkin` command and its sub-commands. - -Each subdirectory of the `profiles` directory contains a set of persistent -configuration options for separate profiles. The default profile is called -`default`. If another profile is desired, it can be described in the -`profiles.yaml` file in this directory. - -Please see the catkin_tools documentation before editing any files in this -directory. Most actions can be performed with the `catkin` command-line -program. diff --git a/.catkin_tools/VERSION b/.catkin_tools/VERSION deleted file mode 100644 index f76f91317..000000000 --- a/.catkin_tools/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.9.2 \ No newline at end of file diff --git a/.catkin_tools/profiles/default/build.yaml b/.catkin_tools/profiles/default/build.yaml deleted file mode 100644 index 910ef4283..000000000 --- a/.catkin_tools/profiles/default/build.yaml +++ /dev/null @@ -1,22 +0,0 @@ -authors: [] -blacklist: [] -build_space: build -catkin_make_args: [] -cmake_args: [] -devel_layout: linked -devel_space: devel -extend_path: null -extends: null -install: false -install_space: install -isolate_install: false -jobs_args: [] -licenses: -- TODO -log_space: logs -maintainers: [] -make_args: [] -source_space: src -use_env_cache: false -use_internal_make_jobserver: true -whitelist: [] diff --git a/.catkin_tools/profiles/default/packages/catkin_tools_prebuild/package.xml b/.catkin_tools/profiles/default/packages/catkin_tools_prebuild/package.xml deleted file mode 100644 index 134c59ade..000000000 --- a/.catkin_tools/profiles/default/packages/catkin_tools_prebuild/package.xml +++ /dev/null @@ -1,10 +0,0 @@ - - catkin_tools_prebuild - - This package is used to generate catkin setup files. - - 0.0.0 - BSD - jbohren - catkin - diff --git a/config/simulator/simulator.yaml b/config/simulator/simulator.yaml index acd6866b2..910c97198 100644 --- a/config/simulator/simulator.yaml +++ b/config/simulator/simulator.yaml @@ -2,39 +2,39 @@ objects: - type: urdf name: rover uri: package://mrover/urdf/rover/rover.urdf.xacro - translation: [ 0, 0, 0.5 ] + translation: [0, 0, 0.5] - type: urdf name: world uri: package://mrover/urdf/world/world.urdf.xacro - type: urdf name: tag_0 uri: package://mrover/urdf/world/tag_0.urdf.xacro - translation: [ -3, -3, 0.7 ] + translation: [5, 0, 0.7] - type: urdf name: tag_1 uri: package://mrover/urdf/world/tag_1.urdf.xacro - translation: [ 15, -14, 2.4 ] + translation: [15, -14, 2.4] - type: urdf name: hammer uri: package://mrover/urdf/world/hammer.urdf.xacro - translation: [ 8, 7, 0.6 ] + translation: [8, 7, 0.6] - type: urdf name: bottle uri: package://mrover/urdf/world/bottle.urdf.xacro - translation: [ -6.25, 10.5, 0.3 ] + translation: [-6.25, 10.5, 0.3] - type: urdf name: rock1 uri: package://mrover/urdf/world/rock.urdf.xacro - translation: [ 2, -2, 0.1 ] + translation: [2, -2, 0.1] - type: urdf name: rock2 uri: package://mrover/urdf/world/rock.urdf.xacro - translation: [ 7, -6, 0.2 ] + translation: [7, -6, 0.2] - type: urdf name: rock3 uri: package://mrover/urdf/world/rock.urdf.xacro - translation: [ 8, -10, 1 ] + translation: [8, -10, 1] - type: urdf name: rock4 uri: package://mrover/urdf/world/rock.urdf.xacro - translation: [ 9, -2, 0.2 ] \ No newline at end of file + translation: [9, -2, 0.2] diff --git a/scripts/debug_course_publisher.py b/scripts/debug_course_publisher.py index 3bfe991bc..5f1468ba0 100755 --- a/scripts/debug_course_publisher.py +++ b/scripts/debug_course_publisher.py @@ -26,7 +26,7 @@ # ) ( Waypoint(tag_id=0, type=WaypointType(val=WaypointType.POST)), - SE3(position=np.array([-2, -2, 0])), + SE3(position=np.array([3, 0, 0])), ) # ( # Waypoint(tag_id=1, type=WaypointType(val=WaypointType.POST)), diff --git a/src/navigation/context.py b/src/navigation/context.py index fa68db9c5..bc3fc19bf 100644 --- a/src/navigation/context.py +++ b/src/navigation/context.py @@ -151,6 +151,9 @@ def push_frame(self, tags: List[LongRangeTag]) -> None: self.__data[tag.id] = self.TagData(hit_count=1, tag=tag, time=rospy.get_time()) def get(self, tag_id: int) -> Optional[LongRangeTag]: + if len(self.__data) == 0: + return None + time_difference = rospy.get_time() - self.__data[tag_id].time if ( tag_id in self.__data diff --git a/src/navigation/long_range.py b/src/navigation/long_range.py index b1e132dfb..2f13bc4b0 100644 --- a/src/navigation/long_range.py +++ b/src/navigation/long_range.py @@ -1,24 +1,14 @@ import tf2_ros import rospy -from aenum import Enum, NoAlias -from geometry_msgs.msg import Twist + from util.ros_utils import get_rosparam -from context import Context from util.state_lib.state import State -from util.state_lib.state_machine import StateMachine from util.np_utils import normalized -from abc import ABC, abstractmethod -from state import DoneState -from search import SearchState -from util.state_lib.state_publisher_server import StatePublisher from typing import Optional -from util.state_lib.state import State -from abc import abstractmethod -import state + +from navigation import approach_post from navigation.approach_target_base import ApproachTargetBaseState -import approach_post import numpy as np -import math DIST_AHEAD = get_rosparam("long_range/distance_ahead", 20) @@ -68,6 +58,6 @@ def get_target_pos(self, context) -> Optional[int]: def determine_next(self, context, finished: bool) -> State: fid_pos = context.env.current_fid_pos() if fid_pos is None: - return state.LongRangeState() + return self else: return approach_post.ApproachPostState() diff --git a/src/navigation/navigation.py b/src/navigation/navigation.py index 300d87c08..056ac5cfc 100755 --- a/src/navigation/navigation.py +++ b/src/navigation/navigation.py @@ -15,6 +15,7 @@ from navigation.state import DoneState, OffState, off_check from navigation.waypoint import WaypointState from navigation.approach_object import ApproachObjectState +from navigation.long_range import LongRangeState class Navigation(threading.Thread): @@ -32,10 +33,18 @@ def __init__(self, context: Context): self.state_machine.add_transitions(PostBackupState(), [WaypointState(), RecoveryState()]) self.state_machine.add_transitions( RecoveryState(), - [WaypointState(), SearchState(), PostBackupState(), ApproachPostState(), ApproachObjectState()], + [ + WaypointState(), + SearchState(), + PostBackupState(), + ApproachPostState(), + ApproachObjectState(), + LongRangeState(), + ], ) self.state_machine.add_transitions( - SearchState(), [ApproachPostState(), ApproachObjectState(), WaypointState(), RecoveryState()] + SearchState(), + [ApproachPostState(), ApproachObjectState(), LongRangeState(), WaypointState(), RecoveryState()], ) self.state_machine.add_transitions(DoneState(), [WaypointState()]) self.state_machine.add_transitions( @@ -44,12 +53,14 @@ def __init__(self, context: Context): PostBackupState(), ApproachPostState(), ApproachObjectState(), + LongRangeState(), SearchState(), RecoveryState(), DoneState(), ], ) self.state_machine.add_transitions(ApproachObjectState(), [DoneState(), SearchState()]) + self.state_machine.add_transitions(LongRangeState(), [ApproachPostState()]) self.state_machine.add_transitions(OffState(), [WaypointState(), DoneState()]) self.state_machine.configure_off_switch(OffState(), off_check) self.state_machine_server = StatePublisher(self.state_machine, "nav_structure", 1, "nav_state", 10) From dcae199e00e4e8413d9948209f5a0d496414116f Mon Sep 17 00:00:00 2001 From: Alison Ryckman Date: Tue, 30 Jan 2024 16:16:42 -0500 Subject: [PATCH 053/126] fix image input to work in sim --- .../long_range_cam/long_range_tag_detector.cpp | 2 +- .../long_range_tag_detector.processing.cpp | 11 ++++++++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp index 45f72b9ed..ce825d003 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp @@ -23,7 +23,7 @@ namespace mrover { // topic: long_range_cam/image //todo - mImgSub = mNh.subscribe("long_range_cam/image", 1, &LongRangeTagDetectorNodelet::imageCallback, this); + mImgSub = mNh.subscribe("long_range_image", 1, &LongRangeTagDetectorNodelet::imageCallback, this); mDictionary = cv::makePtr(cv::aruco::getPredefinedDictionary(dictionaryNumber)); mServiceEnableDetections = mNh.advertiseService("enable_detections", &LongRangeTagDetectorNodelet::enableDetectionsCallback, this); diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp index e54a78150..6f76b94cc 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp @@ -3,6 +3,8 @@ #include #include +#include + namespace mrover { /** @@ -25,7 +27,7 @@ namespace mrover { // 3. We only want to publish the tags if the topic has subscribers if (mPublishImages && mImgPub.getNumSubscribers()) { // Draw the tags on the image using OpenCV - //publishTagsOnImage(); + publishTagsOnImage(); } //Publish all tags that meet threshold @@ -40,8 +42,11 @@ namespace mrover { assert(msg); assert(msg->height > 0); assert(msg->width > 0); - //Store image message to mImgMsg member variable - cv::Mat cvImage = cv::Mat{static_cast(msg->height), static_cast(msg->width), CV_8UC3, const_cast(msg->data.data())}; + + // TODO: Modify when going back to real cam + cv::Mat cvImage_C4 = cv::Mat{static_cast(msg->height), static_cast(msg->width), CV_8UC4, const_cast(msg->data.data())}; + cv::Mat cvImage = cv::Mat(); + cv::cvtColor(cvImage_C4, cvImage, cv::COLOR_BGRA2BGR); //Store to mImg member variable - unsure if necessary but need it to persist cvImage.copyTo(mImg); From 31f0702cc75ab1be764f9ac007e58156bed79f5b Mon Sep 17 00:00:00 2001 From: umroverPerception Date: Tue, 30 Jan 2024 19:55:12 -0500 Subject: [PATCH 054/126] testing long range --- config/navigation.yaml | 2 + launch/perception.launch | 4 +- scripts/debug_course_publisher.py | 2 +- src/navigation/context.py | 31 ++-- src/navigation/long_range.py | 5 +- src/navigation/navigation.py | 2 +- urdf/rover/rover.urdf.xacro | 240 +++++++++++++++--------------- 7 files changed, 150 insertions(+), 136 deletions(-) diff --git a/config/navigation.yaml b/config/navigation.yaml index 153a40dc7..96ba22aa2 100644 --- a/config/navigation.yaml +++ b/config/navigation.yaml @@ -52,3 +52,5 @@ watchdog: long_range: distance_ahead: 20 time_threshold: 5 + increment_weight: 100 + decrement_weight: 1 diff --git a/launch/perception.launch b/launch/perception.launch index 67f0e5aeb..76141dc77 100644 --- a/launch/perception.launch +++ b/launch/perception.launch @@ -2,9 +2,9 @@ - + args="load mrover/TagDetectorNodelet nodelet_manager" output="screen" /> None: + def __init__(self, ctx: Context, min_hits: int, max_hits: int = 100) -> None: self.ctx = ctx self.__data = {} self.min_hits = min_hits self.max_hits = max_hits def push_frame(self, tags: List[LongRangeTag]) -> None: - for _, cur_tag in self.__data.items(): - if cur_tag.tag.id not in tags: - cur_tag.hit_count -= 1 + for _, cur_tag in list(self.__data.items()): + tags_ids = [tag.id for tag in tags] + if cur_tag.tag.id not in tags_ids: + cur_tag.hit_count -= DECREMENT_WEIGHT + print(f"dec {cur_tag.tag.id} to {cur_tag.hit_count}") if cur_tag.hit_count <= 0: del self.__data[cur_tag.tag.id] + print("deleted from dict") else: - cur_tag.hit_count += 1 - cur_tag.hit_count = min(cur_tag.hit_count, self.max_hits) + cur_tag.hit_count += INCREMENT_WEIGHT + if cur_tag.hit_count > self.max_hits: + cur_tag.hit_count = self.max_hits + print("TAG ID", cur_tag.tag.id, "HIT COUNT:", cur_tag.hit_count) for tag in tags: if tag.id not in self.__data: - self.__data[tag.id] = self.TagData(hit_count=1, tag=tag, time=rospy.get_time()) + self.__data[tag.id] = self.TagData(hit_count=INCREMENT_WEIGHT, tag=tag, time=rospy.get_time()) def get(self, tag_id: int) -> Optional[LongRangeTag]: if len(self.__data) == 0: return None - + if tag_id not in self.__data: + print(f"{tag_id} not seen.") + return None time_difference = rospy.get_time() - self.__data[tag_id].time + print(f"tag id hc: {self.__data[tag_id].hit_count}") if ( tag_id in self.__data and self.__data[tag_id].hit_count >= self.min_hits @@ -275,7 +286,7 @@ class Context: vis_publisher: rospy.Publisher course_listener: rospy.Subscriber stuck_listener: rospy.Subscriber - # tag_data_listener: rospy.Subscriber + tag_data_listener: rospy.Subscriber # Use these as the primary interfaces in states course: Optional[Course] @@ -300,7 +311,7 @@ def __init__(self): self.course = None self.rover = Rover(self, False, "") # TODO update min_hits to be a param - self.env = Environment(self, long_range_tags=LongRangeTagStore(self, 3)) + self.env = Environment(self, long_range_tags=LongRangeTagStore(self, 50)) self.disable_requested = False self.use_odom = rospy.get_param("use_odom_frame") self.world_frame = rospy.get_param("world_frame") diff --git a/src/navigation/long_range.py b/src/navigation/long_range.py index 2f13bc4b0..9c7184ae5 100644 --- a/src/navigation/long_range.py +++ b/src/navigation/long_range.py @@ -30,7 +30,7 @@ def on_exit(self, context): pass def get_target_pos(self, context) -> Optional[int]: - tag_id = context.course.get_current_waypoint().tag_id + tag_id = context.course.current_waypoint().tag_id tag = context.env.long_range_tags.get(tag_id) if tag is None: return None @@ -52,11 +52,12 @@ def get_target_pos(self, context) -> Optional[int]: direction_to_tag = normalized(direction_to_tag) distance = DIST_AHEAD + direction_to_tag = np.array([direction_to_tag[0], direction_to_tag[1], 0.0]) tag_position = rover_position + direction_to_tag * distance return tag_position def determine_next(self, context, finished: bool) -> State: - fid_pos = context.env.current_fid_pos() + fid_pos = context.env.current_target_pos() if fid_pos is None: return self else: diff --git a/src/navigation/navigation.py b/src/navigation/navigation.py index 056ac5cfc..44d4db391 100755 --- a/src/navigation/navigation.py +++ b/src/navigation/navigation.py @@ -60,7 +60,7 @@ def __init__(self, context: Context): ], ) self.state_machine.add_transitions(ApproachObjectState(), [DoneState(), SearchState()]) - self.state_machine.add_transitions(LongRangeState(), [ApproachPostState()]) + self.state_machine.add_transitions(LongRangeState(), [ApproachPostState(), SearchState()]) self.state_machine.add_transitions(OffState(), [WaypointState(), DoneState()]) self.state_machine.configure_off_switch(OffState(), off_check) self.state_machine_server = StatePublisher(self.state_machine, "nav_structure", 1, "nav_state", 10) diff --git a/urdf/rover/rover.urdf.xacro b/urdf/rover/rover.urdf.xacro index 7869e4836..85db94adf 100644 --- a/urdf/rover/rover.urdf.xacro +++ b/urdf/rover/rover.urdf.xacro @@ -3,43 +3,43 @@ - - - - - - - - + + + + + + + + - + - + + iyy="${m*(3*r*r+h*h)/12}" iyz="0" + izz="${m*r*r/2}" /> - + + iyy="${m / 12.0 * (d*d + h*h)}" iyz="0" + izz="${m / 12.0 * (d*d + w*w)}" /> - + + iyy="${2.0*m*(r*r)/5.0}" iyz="0" + izz="${2.0*m*(r*r)/5.0}" /> @@ -47,16 +47,16 @@ - + - + - + - + @@ -64,229 +64,229 @@ - - - - - + + + + + - + - + - + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - + + + + + - - - - - + + + + + - - - - - + + + + + - - - - - + + + + + - - - - - + + + + + - + - + - + - + - + - + - + - - - + + + - - - - - + + + + + - - - - - + + + + + + x="0.034346" y="0" z="0.049024" + lower="${-TAU / 8}" upper="0" /> + x="0.534365" y="0" z="0.009056" + lower="-0.959931" upper="2.87979" /> + x="0.546033" y="0" z="0.088594" + lower="-2.35619" upper="2.35619" /> + x="0.044886" y="0" z="0" + lower="-2.35619" upper="2.35619" /> - - + + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + - - - + + + - + From 9be5063329d9d4341453d8439b15f8d983e07eff Mon Sep 17 00:00:00 2001 From: umroverPerception Date: Wed, 31 Jan 2024 21:18:35 -0500 Subject: [PATCH 055/126] update time and add transitions to approach post --- config/navigation.yaml | 2 +- scripts/debug_course_publisher.py | 10 +++---- src/navigation/approach_post.py | 47 ------------------------------- src/navigation/context.py | 18 ++++++------ src/navigation/search.py | 4 +-- src/navigation/waypoint.py | 4 +-- 6 files changed, 20 insertions(+), 65 deletions(-) diff --git a/config/navigation.yaml b/config/navigation.yaml index 96ba22aa2..cc54f3043 100644 --- a/config/navigation.yaml +++ b/config/navigation.yaml @@ -52,5 +52,5 @@ watchdog: long_range: distance_ahead: 20 time_threshold: 5 - increment_weight: 100 + increment_weight: 5 decrement_weight: 1 diff --git a/scripts/debug_course_publisher.py b/scripts/debug_course_publisher.py index 6afda25dc..248af2777 100755 --- a/scripts/debug_course_publisher.py +++ b/scripts/debug_course_publisher.py @@ -27,11 +27,11 @@ ( Waypoint(tag_id=0, type=WaypointType(val=WaypointType.POST)), SE3(position=np.array([5, 0, 0])), - ) - # ( - # Waypoint(tag_id=1, type=WaypointType(val=WaypointType.POST)), - # SE3(position=np.array([11, -10, 0])), - # ) + ), + ( + Waypoint(tag_id=1, type=WaypointType(val=WaypointType.POST)), + SE3(position=np.array([11, -10, 0])), + ), ] ] ) diff --git a/src/navigation/approach_post.py b/src/navigation/approach_post.py index 5eaae0bc8..53c2acfe3 100644 --- a/src/navigation/approach_post.py +++ b/src/navigation/approach_post.py @@ -32,50 +32,3 @@ def determine_next(self, context, finished: bool) -> State: return state.DoneState() else: return self - - -# class ApproachPostState(State): -# STOP_THRESH = get_rosparam("single_fiducial/stop_thresh", 0.7) -# FIDUCIAL_STOP_THRESHOLD = get_rosparam("single_fiducial/fiducial_stop_threshold", 1.75) -# DRIVE_FWD_THRESH = get_rosparam("waypoint/drive_fwd_thresh", 0.34) # 20 degrees - -# def on_enter(self, context) -> None: -# pass - -# def on_exit(self, context) -> None: -# pass - -# def on_loop(self, context) -> State: -# """ -# Drive towards a single fiducial if we see it and stop within a certain threshold if we see it. -# Else conduct a search to find it. -# :param ud: State machine user data -# :return: Next state -# """ -# fid_pos = context.env.current_target_pos() -# if fid_pos is None: -# return search.SearchState() - -# try: -# cmd_vel, arrived = context.rover.driver.get_drive_command( -# fid_pos, -# context.rover.get_pose(in_odom_frame=True), -# self.STOP_THRESH, -# self.DRIVE_FWD_THRESH, -# in_odom=context.use_odom, -# ) -# if arrived: -# context.env.arrived_at_post = True -# context.env.last_post_location = context.env.current_target_pos(odom_override=False) -# context.course.increment_waypoint() -# return waypoint.WaypointState() # we want to go to done state now -# context.rover.send_drive_command(cmd_vel) -# except ( -# tf2_ros.LookupException, -# tf2_ros.ConnectivityException, -# tf2_ros.ExtrapolationException, -# ): -# # TODO: probably go into some waiting state (old todo) -# pass - -# return self diff --git a/src/navigation/context.py b/src/navigation/context.py index 016288834..7b6e90611 100644 --- a/src/navigation/context.py +++ b/src/navigation/context.py @@ -28,7 +28,7 @@ TAG_EXPIRATION_TIME_SECONDS = 60 TIME_THRESHOLD = get_rosparam("long_range/time_threshold", 5) -INCREMENT_WEIGHT = get_rosparam("long_range/increment_weight", 100) +INCREMENT_WEIGHT = get_rosparam("long_range/increment_weight", 5) DECREMENT_WEIGHT = get_rosparam("long_range/decrement_weight", 1) REF_LAT = rospy.get_param("gps_linearization/reference_point_latitude") @@ -133,7 +133,7 @@ class TagData: min_hits: int max_hits: int - def __init__(self, ctx: Context, min_hits: int, max_hits: int = 100) -> None: + def __init__(self, ctx: Context, min_hits: int, max_hits: int = 10) -> None: self.ctx = ctx self.__data = {} self.min_hits = min_hits @@ -144,28 +144,30 @@ def push_frame(self, tags: List[LongRangeTag]) -> None: tags_ids = [tag.id for tag in tags] if cur_tag.tag.id not in tags_ids: cur_tag.hit_count -= DECREMENT_WEIGHT - print(f"dec {cur_tag.tag.id} to {cur_tag.hit_count}") + # print(f"DEC {cur_tag.tag.id} to {cur_tag.hit_count}") if cur_tag.hit_count <= 0: del self.__data[cur_tag.tag.id] - print("deleted from dict") + # print("DELETED from dict") else: cur_tag.hit_count += INCREMENT_WEIGHT + cur_tag.time = rospy.get_time() if cur_tag.hit_count > self.max_hits: cur_tag.hit_count = self.max_hits - print("TAG ID", cur_tag.tag.id, "HIT COUNT:", cur_tag.hit_count) + # print(f"INC {cur_tag.tag.id} to {cur_tag.hit_count}") for tag in tags: if tag.id not in self.__data: + # print(f"INC {tag.id} to {INCREMENT_WEIGHT}") self.__data[tag.id] = self.TagData(hit_count=INCREMENT_WEIGHT, tag=tag, time=rospy.get_time()) def get(self, tag_id: int) -> Optional[LongRangeTag]: if len(self.__data) == 0: return None if tag_id not in self.__data: - print(f"{tag_id} not seen.") + # print(f"{tag_id} not seen.") return None time_difference = rospy.get_time() - self.__data[tag_id].time - print(f"tag id hc: {self.__data[tag_id].hit_count}") + # print(f"HIT COUNT: {self.__data[tag_id].hit_count}") if ( tag_id in self.__data and self.__data[tag_id].hit_count >= self.min_hits @@ -311,7 +313,7 @@ def __init__(self): self.course = None self.rover = Rover(self, False, "") # TODO update min_hits to be a param - self.env = Environment(self, long_range_tags=LongRangeTagStore(self, 50)) + self.env = Environment(self, long_range_tags=LongRangeTagStore(self, 3)) self.disable_requested = False self.use_odom = rospy.get_param("use_odom_frame") self.world_frame = rospy.get_param("world_frame") diff --git a/src/navigation/search.py b/src/navigation/search.py index 3db5ede04..18db30adc 100644 --- a/src/navigation/search.py +++ b/src/navigation/search.py @@ -148,8 +148,8 @@ def on_loop(self, context) -> State: current_waypoint = context.course.current_waypoint() if current_waypoint.type.val == WaypointType.POST: # if we see the tag in the ZED, go to ApproachPostState - # if context.env.current_target_pos() is not None and context.course.look_for_post(): - # return approach_post.ApproachPostState() + if context.env.current_target_pos() is not None and context.course.look_for_post(): + return approach_post.ApproachPostState() # if we see the tag in the long range camera, go to LongRangeState if context.env.long_range_tags.get(current_waypoint.tag_id) is not None: return long_range.LongRangeState() diff --git a/src/navigation/waypoint.py b/src/navigation/waypoint.py index 62181a5c6..723f90cd3 100644 --- a/src/navigation/waypoint.py +++ b/src/navigation/waypoint.py @@ -35,8 +35,8 @@ def on_loop(self, context) -> State: return post_backup.PostBackupState() if context.course.look_for_post(): - # if context.env.current_target_pos() is not None: - # return approach_post.ApproachPostState() + if context.env.current_target_pos() is not None: + return approach_post.ApproachPostState() # if we see the tag in the long range camera, go to LongRangeState if context.env.long_range_tags.get(current_waypoint.tag_id) is not None: return long_range.LongRangeState() From cec93d69f4059c039e00d83bfc163d7f91bf9733 Mon Sep 17 00:00:00 2001 From: umroverPerception Date: Sat, 3 Feb 2024 17:28:25 -0500 Subject: [PATCH 056/126] remove prints and add min_hits and max_hits to navigation.yaml --- config/navigation.yaml | 2 ++ src/navigation/context.py | 25 ++++++++++--------------- 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/config/navigation.yaml b/config/navigation.yaml index cc54f3043..c7a9904e0 100644 --- a/config/navigation.yaml +++ b/config/navigation.yaml @@ -54,3 +54,5 @@ long_range: time_threshold: 5 increment_weight: 5 decrement_weight: 1 + min_hits: 3 + max_hits: 10 diff --git a/src/navigation/context.py b/src/navigation/context.py index 7b6e90611..69cdf42ee 100644 --- a/src/navigation/context.py +++ b/src/navigation/context.py @@ -30,6 +30,8 @@ TIME_THRESHOLD = get_rosparam("long_range/time_threshold", 5) INCREMENT_WEIGHT = get_rosparam("long_range/increment_weight", 5) DECREMENT_WEIGHT = get_rosparam("long_range/decrement_weight", 1) +MIN_HITS = get_rosparam("long_range/min_hits", 3) +MAX_HITS = get_rosparam("long_range/max_hits", 10) REF_LAT = rospy.get_param("gps_linearization/reference_point_latitude") REF_LON = rospy.get_param("gps_linearization/reference_point_longitude") @@ -79,16 +81,17 @@ class Environment: def get_target_pos(self, id: str, in_odom_frame: bool = True) -> Optional[np.ndarray]: """ - Retrieves the pose of the given fiducial ID from the TF tree in the odom frame - if in_odom_frame is True otherwise in the world frame - if it exists and is more recent than TAG_EXPIRATION_TIME_SECONDS, otherwise returns None + Retrieves the pose of the given target from the TF tree in the odom frame (if in_odom_frame is True otherwise in + the world frame) if it exists and is more recent than TAG_EXPIRATION_TIME_SECONDS, otherwise returns None + :param id: id of what we want from the TF tree. Could be for a fiducial, the hammer, or the water bottle + :param in_odom_frame: bool for if we are in the odom fram or world frame + :return: pose of the target or None """ try: parent_frame = self.ctx.odom_frame if in_odom_frame else self.ctx.world_frame - target_pose, time = SE3.from_tf_time(self.ctx.tf_buffer, parent_frame=parent_frame, child_frame=f"{id}") + target_pose, time = SE3.from_tf_time(self.ctx.tf_buffer, parent_frame=parent_frame, child_frame=id) now = rospy.Time.now() if now.to_sec() - time.to_sec() >= TAG_EXPIRATION_TIME_SECONDS: - print(f"EXPIRED {id}!") return None except ( tf2_ros.LookupException, @@ -117,7 +120,6 @@ def current_target_pos(self, odom_override: bool = True) -> Optional[np.ndarray] elif current_waypoint.type == WaypointType.WATER_BOTTLE: return self.get_target_pos("bottle", in_odom) else: - # print("CURRENT WAYPOINT IS NOT A POST OR OBJECT") return None @@ -133,7 +135,7 @@ class TagData: min_hits: int max_hits: int - def __init__(self, ctx: Context, min_hits: int, max_hits: int = 10) -> None: + def __init__(self, ctx: Context, min_hits: int = MIN_HITS, max_hits: int = MAX_HITS) -> None: self.ctx = ctx self.__data = {} self.min_hits = min_hits @@ -144,30 +146,24 @@ def push_frame(self, tags: List[LongRangeTag]) -> None: tags_ids = [tag.id for tag in tags] if cur_tag.tag.id not in tags_ids: cur_tag.hit_count -= DECREMENT_WEIGHT - # print(f"DEC {cur_tag.tag.id} to {cur_tag.hit_count}") if cur_tag.hit_count <= 0: del self.__data[cur_tag.tag.id] - # print("DELETED from dict") else: cur_tag.hit_count += INCREMENT_WEIGHT cur_tag.time = rospy.get_time() if cur_tag.hit_count > self.max_hits: cur_tag.hit_count = self.max_hits - # print(f"INC {cur_tag.tag.id} to {cur_tag.hit_count}") for tag in tags: if tag.id not in self.__data: - # print(f"INC {tag.id} to {INCREMENT_WEIGHT}") self.__data[tag.id] = self.TagData(hit_count=INCREMENT_WEIGHT, tag=tag, time=rospy.get_time()) def get(self, tag_id: int) -> Optional[LongRangeTag]: if len(self.__data) == 0: return None if tag_id not in self.__data: - # print(f"{tag_id} not seen.") return None time_difference = rospy.get_time() - self.__data[tag_id].time - # print(f"HIT COUNT: {self.__data[tag_id].hit_count}") if ( tag_id in self.__data and self.__data[tag_id].hit_count >= self.min_hits @@ -312,8 +308,7 @@ def __init__(self): self.stuck_listener = rospy.Subscriber("nav_stuck", Bool, self.stuck_callback) self.course = None self.rover = Rover(self, False, "") - # TODO update min_hits to be a param - self.env = Environment(self, long_range_tags=LongRangeTagStore(self, 3)) + self.env = Environment(self, long_range_tags=LongRangeTagStore(self)) self.disable_requested = False self.use_odom = rospy.get_param("use_odom_frame") self.world_frame = rospy.get_param("world_frame") From ccffb4f38b519060c7ba510f51193a42e1c00d64 Mon Sep 17 00:00:00 2001 From: Alison Ryckman Date: Thu, 8 Feb 2024 19:20:39 -0500 Subject: [PATCH 057/126] fix topic name + finalize image format --- src/perception/long_range_cam/long_range_cam.cpp | 3 ++- .../tag_detector/long_range_cam/long_range_tag_detector.cpp | 2 -- .../long_range_cam/long_range_tag_detector.processing.cpp | 6 +----- 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/src/perception/long_range_cam/long_range_cam.cpp b/src/perception/long_range_cam/long_range_cam.cpp index 783ffe52d..4f9fc2db5 100644 --- a/src/perception/long_range_cam/long_range_cam.cpp +++ b/src/perception/long_range_cam/long_range_cam.cpp @@ -1,4 +1,5 @@ #include "long_range_cam.hpp" +#include #include #include #include @@ -44,7 +45,7 @@ namespace mrover { mNh = getMTNodeHandle(); mPnh = getMTPrivateNodeHandle(); mCamInfoPub = mNh.advertise("long_range_cam/camera_info", 1); - mImgPub = mNh.advertise("long_range_cam/image", 1); + mImgPub = mNh.advertise("long_range_image", 1); // While dtor not called while (ros::ok()) { cv::VideoCapture mCapture{std::format("v4l2src device=/dev/arducam ! videoconvert ! video/x-raw,width={},height={},format=I420,framerate={}/1 ! appsink", 1920, 1080, 5), cv::CAP_GSTREAMER}; diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp index ce825d003..3ac6e929d 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp @@ -21,8 +21,6 @@ namespace mrover { mImgPub = mNh.advertise("long_range_tag_detection", 1); mLongRangeTagsPub = mNh.advertise("tags", 1); - // topic: long_range_cam/image - //todo mImgSub = mNh.subscribe("long_range_image", 1, &LongRangeTagDetectorNodelet::imageCallback, this); mDictionary = cv::makePtr(cv::aruco::getPredefinedDictionary(dictionaryNumber)); mServiceEnableDetections = mNh.advertiseService("enable_detections", &LongRangeTagDetectorNodelet::enableDetectionsCallback, this); diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp index 6f76b94cc..9599b03bd 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp @@ -3,8 +3,6 @@ #include #include -#include - namespace mrover { /** @@ -44,9 +42,7 @@ namespace mrover { assert(msg->width > 0); // TODO: Modify when going back to real cam - cv::Mat cvImage_C4 = cv::Mat{static_cast(msg->height), static_cast(msg->width), CV_8UC4, const_cast(msg->data.data())}; - cv::Mat cvImage = cv::Mat(); - cv::cvtColor(cvImage_C4, cvImage, cv::COLOR_BGRA2BGR); + cv::Mat cvImage = cv::Mat{static_cast(msg->height), static_cast(msg->width), CV_8UC3, const_cast(msg->data.data())}; //Store to mImg member variable - unsure if necessary but need it to persist cvImage.copyTo(mImg); From 6020ab9183994c0422cf77ee85689eb696129cf1 Mon Sep 17 00:00:00 2001 From: Alison Ryckman Date: Sat, 10 Feb 2024 13:54:07 -0500 Subject: [PATCH 058/126] gstreamer shenanigans --- .../long_range_cam/long_range_cam.cpp | 49 +++++++++---------- .../long_range_tag_detector.cpp | 2 +- .../long_range_tag_detector.hpp | 1 - .../long_range_tag_detector.processing.cpp | 19 +++---- 4 files changed, 31 insertions(+), 40 deletions(-) diff --git a/src/perception/long_range_cam/long_range_cam.cpp b/src/perception/long_range_cam/long_range_cam.cpp index 4f9fc2db5..2047cdf14 100644 --- a/src/perception/long_range_cam/long_range_cam.cpp +++ b/src/perception/long_range_cam/long_range_cam.cpp @@ -34,8 +34,6 @@ namespace mrover { auto* imgPtr = bgra.ptr(0); size_t size = msg->step * msg->height; msg->data.resize(size); - std::cout << "data size " << msg->data.size() << std::endl; - std::cout << "size obj " << size << std::endl; memcpy(msg->data.data(), imgPtr, size); } @@ -47,33 +45,32 @@ namespace mrover { mCamInfoPub = mNh.advertise("long_range_cam/camera_info", 1); mImgPub = mNh.advertise("long_range_image", 1); // While dtor not called + cv::VideoCapture mCapture{std::format("v4l2src device=/dev/arducam ! h264parse ! nvh264dec ! nvvidconv ! video/x-raw,width={},height={},format=I420,framerate={}/1 ! appsink", 1920, 1080, 5), cv::CAP_GSTREAMER}; + if (!mCapture.isOpened()) { + throw std::runtime_error("Long range cam failed to open"); + } + cv::Mat frame; while (ros::ok()) { - cv::VideoCapture mCapture{std::format("v4l2src device=/dev/arducam ! videoconvert ! video/x-raw,width={},height={},format=I420,framerate={}/1 ! appsink", 1920, 1080, 5), cv::CAP_GSTREAMER}; - if (!mCapture.isOpened()) { - throw std::runtime_error("Long range cam failed to open"); + // while (true) { + mCapture.read(frame); + if (frame.empty()) { + break; } - cv::Mat frame; - while (true) { - mCapture.read(frame); - if (frame.empty()) { - break; - } - cv::imshow("Sender", frame); - if (mImgPub.getNumSubscribers()) { - auto imgMsg = boost::make_shared(); - cv::Mat bgra; - cv::cvtColor(frame, bgra, cv::COLOR_YUV2BGR_I420); - fillImageMessage(bgra, imgMsg); - imgMsg->header.frame_id = "long_range_cam_frame"; - imgMsg->header.stamp = ros::Time::now(); - imgMsg->header.seq = mGrabUpdateTick; - mImgPub.publish(imgMsg); - } - if (cv::waitKey(1) == 's') { - break; - } - ++mGrabUpdateTick; + if (mImgPub.getNumSubscribers()) { + auto imgMsg = boost::make_shared(); + cv::Mat bgra; + cv::cvtColor(frame, bgra, cv::COLOR_YUV2BGR_I420); + fillImageMessage(bgra, imgMsg); + imgMsg->header.frame_id = "long_range_cam_frame"; + imgMsg->header.stamp = ros::Time::now(); + imgMsg->header.seq = mGrabUpdateTick; + mImgPub.publish(imgMsg); } + // if (cv::waitKey(1) == 's') { + // break; + // } + ++mGrabUpdateTick; + // } } } diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp index 3ac6e929d..f125c075e 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.cpp @@ -9,7 +9,7 @@ namespace mrover { mPnh = getMTPrivateNodeHandle(); mDetectorParams = new cv::aruco::DetectorParameters(); auto defaultDetectorParams = cv::makePtr(); - int dictionaryNumber; + int dictionaryNumber = 0; mPnh.param("publish_images", mPublishImages, true); mPnh.param("min_hit_count_before_publish", mMinHitCountBeforePublish, 5); diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.hpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.hpp index cb0b4bf3d..57c6aa3e5 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.hpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.hpp @@ -57,7 +57,6 @@ namespace mrover { dynamic_reconfigure::Server mConfigServer; dynamic_reconfigure::Server::CallbackType mCallbackType; LoopProfiler mProfiler{"Long Range Tag Detector"}; - std::unordered_map mThreshPubs; // Map from threshold scale to publisher ros::ServiceServer mServiceEnableDetections; bool mUseOdom{}; std::string mOdomFrameId, mMapFrameId, mCameraFrameId; diff --git a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp index 9599b03bd..29afafe38 100644 --- a/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp +++ b/src/perception/tag_detector/long_range_cam/long_range_tag_detector.processing.cpp @@ -31,7 +31,14 @@ namespace mrover { //Publish all tags that meet threshold publishPermanentTags(); + size_t detectedCount = mImmediateIds.size(); + NODELET_INFO_COND(!mPrevDetectedCount.has_value() || + detectedCount != mPrevDetectedCount.value(), + "Detected %zu markers", detectedCount); + mPrevDetectedCount = detectedCount; + //PUBLISH TAGS + ++mSeqNum; } //HELPER FUNCTIONS @@ -52,7 +59,6 @@ namespace mrover { } void LongRangeTagDetectorNodelet::runTagDetection() { - // std::cout << mDetectorParams->adaptiveThreshConstant << std::endl; cv::aruco::detectMarkers(mImg, mDictionary, mImmediateCorners, mImmediateIds, mDetectorParams); } @@ -61,8 +67,6 @@ namespace mrover { //loop through all identified IDs for (size_t i = 0; i < mImmediateIds.size(); i++) { updateNewlyIdentifiedTags(i); - cv::Point2f center = getTagCenterPixels(mImmediateCorners[i]); - // std::cout << "bearing: " << getTagBearing(center) << "!!!" << std::endl; } //Now decrement all the hitcounts for tags that were not updated @@ -98,8 +102,6 @@ namespace mrover { lrt.updated = true; lrt.imageCenter = getNormedTagCenterOffset(tagCorners); - std::cout << "lrt image center " << lrt.imageCenter.x << std::endl; - return lrt; } @@ -158,10 +160,7 @@ namespace mrover { float LongRangeTagDetectorNodelet::getTagBearing(cv::Point2f& tagCenter) const { //for HD720 resolution - auto imageWidth = (float) mImgMsg.width; - std::cout << "width: " << imageWidth << " tag center x: " << tagCenter.x << std::endl; float bearing = -1 * ((float) tagCenter.x + 0.5) * mLongRangeFov; - std::cout << "bearing: " << bearing << std::endl; return bearing; } @@ -188,10 +187,6 @@ namespace mrover { } void LongRangeTagDetectorNodelet::publishTagsOnImage() { - // cv::Mat markedImage; - // markedImage.copyTo(mImg); - // std::cout << markedImage.total() << ", " << markedImage.channels() << std::endl; - cv::aruco::drawDetectedMarkers(mImg, mImmediateCorners, mImmediateIds); mImgMsg.header.seq = mSeqNum; mImgMsg.header.stamp = ros::Time::now(); From 2a7da8bf636defa90b3e9704092c60841b84076c Mon Sep 17 00:00:00 2001 From: umroverPerception Date: Sat, 10 Feb 2024 17:18:56 -0500 Subject: [PATCH 059/126] fix type hints, add object spiral params, transitions to recovery, abstract check approach function --- config/navigation.yaml | 4 ++++ src/navigation/approach_object.py | 12 ++++++++---- src/navigation/approach_post.py | 12 ++++++++---- src/navigation/approach_target_base.py | 4 +++- src/navigation/context.py | 25 ++++++++++++++++++++++--- src/navigation/long_range.py | 11 +++++++---- src/navigation/navigation.py | 4 ++-- src/navigation/search.py | 26 +++++++++++--------------- src/navigation/waypoint.py | 14 +++----------- 9 files changed, 68 insertions(+), 44 deletions(-) diff --git a/config/navigation.yaml b/config/navigation.yaml index c7a9904e0..4483d8c56 100644 --- a/config/navigation.yaml +++ b/config/navigation.yaml @@ -22,6 +22,10 @@ search: segments_per_rotation: 8 distance_between_spirals: 3 +object_search: + coverage_radius: 10 + distance_between_spirals: 3 + single_fiducial: stop_thresh: 1.0 fiducial_stop_threshold: 1.75 diff --git a/src/navigation/approach_object.py b/src/navigation/approach_object.py index 55faf80ed..694840cdf 100644 --- a/src/navigation/approach_object.py +++ b/src/navigation/approach_object.py @@ -3,8 +3,9 @@ from util.ros_utils import get_rosparam from util.state_lib.state import State from typing import Optional +import numpy as np -from navigation import state +from navigation import state, recovery from navigation.approach_target_base import ApproachTargetBaseState @@ -23,12 +24,15 @@ def on_enter(self, context): def on_exit(self, context): pass - def get_target_pos(self, context) -> Optional[int]: + def get_target_pos(self, context) -> Optional[np.ndarray]: object_pos = context.env.current_target_pos() return object_pos def determine_next(self, context, finished: bool) -> State: if finished: return state.DoneState() - else: - return self + if context.rover.stuck: + context.rover.previous_state = self + return recovery.RecoveryState() + + return self diff --git a/src/navigation/approach_post.py b/src/navigation/approach_post.py index 53c2acfe3..5fb010194 100644 --- a/src/navigation/approach_post.py +++ b/src/navigation/approach_post.py @@ -3,8 +3,9 @@ from util.ros_utils import get_rosparam from util.state_lib.state import State from typing import Optional +import numpy as np -from navigation import search, state, waypoint +from navigation import state, recovery from navigation.approach_target_base import ApproachTargetBaseState @@ -22,7 +23,7 @@ def on_enter(self, context): def on_exit(self, context): pass - def get_target_pos(self, context) -> Optional[int]: + def get_target_pos(self, context) -> Optional[np.ndarray]: # return fid_pos, either position or None fid_pos = context.env.current_target_pos() return fid_pos @@ -30,5 +31,8 @@ def get_target_pos(self, context) -> Optional[int]: def determine_next(self, context, finished: bool) -> State: if finished: return state.DoneState() - else: - return self + if context.rover.stuck: + context.rover.previous_state = self + return recovery.RecoveryState() + + return self diff --git a/src/navigation/approach_target_base.py b/src/navigation/approach_target_base.py index 135acc1f9..5c763149b 100644 --- a/src/navigation/approach_target_base.py +++ b/src/navigation/approach_target_base.py @@ -4,6 +4,7 @@ from util.state_lib.state import State from abc import abstractmethod from typing import Optional +import numpy as np from navigation import search @@ -20,7 +21,7 @@ def on_exit(self, context) -> None: pass @abstractmethod - def get_target_pos(self, context) -> Optional[int]: + def get_target_pos(self, context) -> Optional[np.ndarray]: raise NotImplementedError @abstractmethod @@ -53,6 +54,7 @@ def on_loop(self, context) -> State: context.course.increment_waypoint() else: context.rover.send_drive_command(cmd_vel) + except ( tf2_ros.LookupException, tf2_ros.ConnectivityException, diff --git a/src/navigation/context.py b/src/navigation/context.py index 69cdf42ee..583f2b211 100644 --- a/src/navigation/context.py +++ b/src/navigation/context.py @@ -20,14 +20,16 @@ ) from mrover.srv import EnableAuton, EnableAutonRequest, EnableAutonResponse from navigation.drive import DriveController +from navigation import approach_post, long_range, approach_object from std_msgs.msg import Time, Bool from util.SE3 import SE3 from visualization_msgs.msg import Marker from util.ros_utils import get_rosparam +from util.state_lib.state import State TAG_EXPIRATION_TIME_SECONDS = 60 -TIME_THRESHOLD = get_rosparam("long_range/time_threshold", 5) +LONG_RANGE_TAG_EXPIRATION_TIME_SECONDS = get_rosparam("long_range/time_threshold", 5) INCREMENT_WEIGHT = get_rosparam("long_range/increment_weight", 5) DECREMENT_WEIGHT = get_rosparam("long_range/decrement_weight", 1) MIN_HITS = get_rosparam("long_range/min_hits", 3) @@ -167,7 +169,7 @@ def get(self, tag_id: int) -> Optional[LongRangeTag]: if ( tag_id in self.__data and self.__data[tag_id].hit_count >= self.min_hits - and time_difference <= TIME_THRESHOLD + and time_difference <= LONG_RANGE_TAG_EXPIRATION_TIME_SECONDS ): return self.__data[tag_id].tag else: @@ -201,7 +203,6 @@ def current_waypoint(self) -> Optional[Waypoint]: """ Returns the currently active waypoint - :param ud: State machine user data :return: Next waypoint to reach if we have an active course """ if self.course_data is None or self.waypoint_index >= len(self.course_data.waypoints): @@ -233,6 +234,24 @@ def look_for_object(self) -> bool: def is_complete(self) -> bool: return self.waypoint_index == len(self.course_data.waypoints) + def check_approach(self) -> Optional[State]: + """ + Returns one of the approach states (ApproachPostState, LongRangeState, or ApproachObjectState) + if we are looking for a post or object and we see it in one of the cameras (ZED or long range) + """ + current_waypoint = self.current_waypoint() + if self.look_for_post(): + # if we see the tag in the ZED, go to ApproachPostState + if self.ctx.env.current_target_pos() is not None: + return approach_post.ApproachPostState() + # if we see the tag in the long range camera, go to LongRangeState + if self.ctx.env.long_range_tags.get(current_waypoint.tag_id) is not None: + return long_range.LongRangeState() + elif self.look_for_object(): + if self.ctx.env.current_target_pos() is not None: + return approach_object.ApproachObjectState() # if we see the object + return None + def setup_course(ctx: Context, waypoints: List[Tuple[Waypoint, SE3]]) -> Course: all_waypoint_info = [] diff --git a/src/navigation/long_range.py b/src/navigation/long_range.py index 9c7184ae5..554c47490 100644 --- a/src/navigation/long_range.py +++ b/src/navigation/long_range.py @@ -6,7 +6,7 @@ from util.np_utils import normalized from typing import Optional -from navigation import approach_post +from navigation import approach_post, recovery from navigation.approach_target_base import ApproachTargetBaseState import numpy as np @@ -29,7 +29,7 @@ def on_enter(self, context): def on_exit(self, context): pass - def get_target_pos(self, context) -> Optional[int]: + def get_target_pos(self, context) -> Optional[np.ndarray]: tag_id = context.course.current_waypoint().tag_id tag = context.env.long_range_tags.get(tag_id) if tag is None: @@ -60,5 +60,8 @@ def determine_next(self, context, finished: bool) -> State: fid_pos = context.env.current_target_pos() if fid_pos is None: return self - else: - return approach_post.ApproachPostState() + if context.rover.stuck: + context.rover.previous_state = self + return recovery.RecoveryState() + + return approach_post.ApproachPostState() diff --git a/src/navigation/navigation.py b/src/navigation/navigation.py index 44d4db391..5cb574e71 100755 --- a/src/navigation/navigation.py +++ b/src/navigation/navigation.py @@ -59,8 +59,8 @@ def __init__(self, context: Context): DoneState(), ], ) - self.state_machine.add_transitions(ApproachObjectState(), [DoneState(), SearchState()]) - self.state_machine.add_transitions(LongRangeState(), [ApproachPostState(), SearchState()]) + self.state_machine.add_transitions(ApproachObjectState(), [DoneState(), SearchState(), RecoveryState()]) + self.state_machine.add_transitions(LongRangeState(), [ApproachPostState(), SearchState(), RecoveryState()]) self.state_machine.add_transitions(OffState(), [WaypointState(), DoneState()]) self.state_machine.configure_off_switch(OffState(), off_check) self.state_machine_server = StatePublisher(self.state_machine, "nav_structure", 1, "nav_state", 10) diff --git a/src/navigation/search.py b/src/navigation/search.py index 18db30adc..ad6a23ee6 100644 --- a/src/navigation/search.py +++ b/src/navigation/search.py @@ -86,11 +86,14 @@ class SearchState(State): prev_target: Optional[np.ndarray] = None is_recovering: bool = False - STOP_THRESH = get_rosparam("search/stop_thresh", 0.2) + STOP_THRESH = get_rosparam("search/stop_thresh", 0.5) DRIVE_FWD_THRESH = get_rosparam("search/drive_fwd_thresh", 0.34) # 20 degrees SPIRAL_COVERAGE_RADIUS = get_rosparam("search/coverage_radius", 20) SEGMENTS_PER_ROTATION = get_rosparam("search/segments_per_rotation", 8) - DISTANCE_BETWEEN_SPIRALS = get_rosparam("search/distance_between_spirals", 2.5) + DISTANCE_BETWEEN_SPIRALS = get_rosparam("search/distance_between_spirals", 3) + + OBJECT_SPIRAL_COVERAGE_RADIUS = get_rosparam("object_search/coverage_radius", 10) + OBJECT_DISTANCE_BETWEEN_SPIRALS = get_rosparam("object_search/distance_between_spirals", 3) def on_enter(self, context) -> None: search_center = context.course.current_waypoint() @@ -107,8 +110,8 @@ def on_enter(self, context) -> None: else: # water bottle or mallet self.traj = SearchTrajectory.spiral_traj( context.rover.get_pose().position[0:2], - self.SPIRAL_COVERAGE_RADIUS / 2, - self.DISTANCE_BETWEEN_SPIRALS / 2, + self.OBJECT_SPIRAL_COVERAGE_RADIUS, + self.OBJECT_DISTANCE_BETWEEN_SPIRALS, self.SEGMENTS_PER_ROTATION, search_center.tag_id, ) @@ -145,15 +148,8 @@ def on_loop(self, context) -> State: ) context.rover.send_drive_command(cmd_vel) - current_waypoint = context.course.current_waypoint() - if current_waypoint.type.val == WaypointType.POST: - # if we see the tag in the ZED, go to ApproachPostState - if context.env.current_target_pos() is not None and context.course.look_for_post(): - return approach_post.ApproachPostState() - # if we see the tag in the long range camera, go to LongRangeState - if context.env.long_range_tags.get(current_waypoint.tag_id) is not None: - return long_range.LongRangeState() - else: - if context.env.current_target_pos() is not None and context.course.look_for_object(): - return approach_object.ApproachObjectState() # if we see the object + # returns either ApproachPostState, LongRangeState, ApproachObjectState, or None + if context.course.check_approach() is not None: + return context.course.check_approach() + return self diff --git a/src/navigation/waypoint.py b/src/navigation/waypoint.py index 723f90cd3..9de2d9217 100644 --- a/src/navigation/waypoint.py +++ b/src/navigation/waypoint.py @@ -34,15 +34,9 @@ def on_loop(self, context) -> State: context.env.arrived_at_target = False return post_backup.PostBackupState() - if context.course.look_for_post(): - if context.env.current_target_pos() is not None: - return approach_post.ApproachPostState() - # if we see the tag in the long range camera, go to LongRangeState - if context.env.long_range_tags.get(current_waypoint.tag_id) is not None: - return long_range.LongRangeState() - elif context.course.look_for_object(): - if context.env.current_target_pos() is not None: - return approach_object.ApproachObjectState() + # returns either ApproachPostState, LongRangeState, ApproachObjectState, or None + if context.course.check_approach() is not None: + return context.course.check_approach() # Attempt to find the waypoint in the TF tree and drive to it try: @@ -60,8 +54,6 @@ def on_loop(self, context) -> State: elif context.course.look_for_post() or context.course.look_for_object(): # We finished a waypoint associated with a post or mallet, but we have not seen it yet. return search.SearchState() - # TODO elif looking for water bottle: - # return water_bottle_search.WaterBottleSearchState() if context.rover.stuck: context.rover.previous_state = self From 1912a4aae9641e8b2ac1e2cefc321c3d12cc8973 Mon Sep 17 00:00:00 2001 From: Alison Ryckman Date: Sun, 11 Feb 2024 01:57:12 -0500 Subject: [PATCH 060/126] remove hit count and refactor center calculation --- launch/long_range_cam.launch | 4 +- .../long_range_cam/long_range_cam.cpp | 8 +- .../long_range_tag_detector.cpp | 6 - .../long_range_tag_detector.hpp | 103 +--------- .../long_range_tag_detector.processing.cpp | 178 +++--------------- 5 files changed, 42 insertions(+), 257 deletions(-) diff --git a/launch/long_range_cam.launch b/launch/long_range_cam.launch index be21fde31..30b5f2bd0 100644 --- a/launch/long_range_cam.launch +++ b/launch/long_range_cam.launch @@ -1,4 +1,4 @@ - + From ba87040bd7dd2c7972b844ac876d3954b01cc460 Mon Sep 17 00:00:00 2001 From: neuenfeldttj Date: Sat, 6 Apr 2024 12:17:55 -0400 Subject: [PATCH 077/126] Change stream number --- src/teleoperation/frontend/src/components/AutonTask.vue | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/teleoperation/frontend/src/components/AutonTask.vue b/src/teleoperation/frontend/src/components/AutonTask.vue index 0c36c1a89..35018caa2 100644 --- a/src/teleoperation/frontend/src/components/AutonTask.vue +++ b/src/teleoperation/frontend/src/components/AutonTask.vue @@ -21,7 +21,7 @@
- +
From aa21895ac5a735342cee046d682a66ed63dffe85 Mon Sep 17 00:00:00 2001 From: neuenfeldttj Date: Sat, 6 Apr 2024 12:24:37 -0400 Subject: [PATCH 078/126] stream --- src/teleoperation/frontend/src/components/CameraFeed.vue | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/teleoperation/frontend/src/components/CameraFeed.vue b/src/teleoperation/frontend/src/components/CameraFeed.vue index e5304206d..00afae0b9 100644 --- a/src/teleoperation/frontend/src/components/CameraFeed.vue +++ b/src/teleoperation/frontend/src/components/CameraFeed.vue @@ -65,7 +65,7 @@ export default defineComponent({ }, mounted: function () { - // this.startStream(this.id) + this.startStream(this.id) this.$nextTick(() => { const canvas: HTMLCanvasElement = document.getElementById( 'stream-' + this.id From da25ed1ee4b0cae2dc31a144f906d53992626f37 Mon Sep 17 00:00:00 2001 From: jnnanni Date: Sun, 14 Apr 2024 13:11:44 -0400 Subject: [PATCH 079/126] Fix sending arm positions --- src/teleoperation/backend/consumers.py | 18 +++---- .../frontend/src/components/ArmControls.vue | 54 +++++++++++-------- 2 files changed, 41 insertions(+), 31 deletions(-) diff --git a/src/teleoperation/backend/consumers.py b/src/teleoperation/backend/consumers.py index a9c956c7b..dd93f6044 100644 --- a/src/teleoperation/backend/consumers.py +++ b/src/teleoperation/backend/consumers.py @@ -253,6 +253,14 @@ def to_velocity(self, input: int, joint_name: str, brushless: bool = True) -> fl ) def handle_controls_message(self, msg): + if msg["type"] == "arm_values" and msg["arm_mode"] == "position": + position_names = ["joint_b", "joint_c", "joint_de_pitch", "joint_de_roll"] + position_cmd = Position( + names=position_names, + positions=msg["positions"], + ) + self.arm_position_cmd_pub.publish(position_cmd) + return CACHE = ["cache"] SA_NAMES = ["sa_x", "sa_y", "sa_z", "sampler", "sensor_actuator"] RA_NAMES = self.RA_NAMES @@ -302,16 +310,6 @@ def handle_controls_message(self, msg): ) publishers[3].publish(arm_ik_cmd) - if msg["arm_mode"] == "position": - position_names = controls_names - if msg["type"] == "arm_values": - position_names = ["joint_b", "joint_c", "joint_de_pitch", "joint_de_roll"] - position_cmd = Position( - names=position_names, - positions=msg["positions"], - ) - publishers[0].publish(position_cmd) - elif msg["arm_mode"] == "velocity": velocity_cmd = Velocity() velocity_cmd.names = controls_names diff --git a/src/teleoperation/frontend/src/components/ArmControls.vue b/src/teleoperation/frontend/src/components/ArmControls.vue index f086e00fd..5279a8953 100644 --- a/src/teleoperation/frontend/src/components/ArmControls.vue +++ b/src/teleoperation/frontend/src/components/ArmControls.vue @@ -96,7 +96,6 @@ import ToggleButton from './ToggleButton.vue' import CalibrationCheckbox from './CalibrationCheckbox.vue' import MotorAdjust from './MotorAdjust.vue' import LimitSwitch from './LimitSwitch.vue' - // In seconds const updateRate = 0.01 let interval: number | undefined @@ -137,6 +136,7 @@ export default defineComponent({ } }, positions: [], + send_positions: false // Only send after submit is clicked for the first time } }, @@ -152,11 +152,17 @@ export default defineComponent({ alert('Toggling Arm Laser failed.') } } + }, + arm_mode(newMode) { + if (newMode !== 'position') { + this.positions = [] + this.send_positions = false + } } }, - mounted: function() { - document.addEventListener('keydown', this.keyDown); + mounted: function () { + document.addEventListener('keydown', this.keyDown) }, beforeUnmount: function () { @@ -166,22 +172,26 @@ export default defineComponent({ created: function () { interval = window.setInterval(() => { - const gamepads = navigator.getGamepads() - for (let i = 0; i < 4; i++) { - const gamepad = gamepads[i] - if (gamepad) { - // Microsoft and Xbox for old Xbox 360 controllers - // X-Box for new PowerA Xbox One controllers - if ( - gamepad.id.includes('Microsoft') || - gamepad.id.includes('Xbox') || - gamepad.id.includes('X-Box') - ) { - let buttons = gamepad.buttons.map((button) => { - return button.value - }) + if (this.send_positions) { + this.publishJoystickMessage([], [], this.arm_mode, this.positions) + } else { + const gamepads = navigator.getGamepads() + for (let i = 0; i < 4; i++) { + const gamepad = gamepads[i] + if (gamepad) { + // Microsoft and Xbox for old Xbox 360 controllers + // X-Box for new PowerA Xbox One controllers + if ( + gamepad.id.includes('Microsoft') || + gamepad.id.includes('Xbox') || + gamepad.id.includes('X-Box') + ) { + let buttons = gamepad.buttons.map((button) => { + return button.value + }) - this.publishJoystickMessage(gamepad.axes, buttons, this.arm_mode, this.positions) + this.publishJoystickMessage(gamepad.axes, buttons, this.arm_mode, this.positions) + } } } } @@ -221,13 +231,15 @@ export default defineComponent({ this.positions = Object.values(this.temp_positions).map( (obj) => (Number(obj.value) * Math.PI) / 180 ) + this.send_positions = true + this.publishJoystickMessage([], [], this.arm_mode, this.positions) }, - keyDown: function(event: { key: string }) { + keyDown: function (event: { key: string }) { if (event.key == ' ') { - this.arm_mode = 'arm_disabled'; + this.arm_mode = 'arm_disabled' } - }, + } } }) From 8bafb50593a3ec25e38b1461feb155ee2f55ab45 Mon Sep 17 00:00:00 2001 From: neuenfeldttj Date: Sun, 14 Apr 2024 13:22:59 -0400 Subject: [PATCH 080/126] Fixed bug when gamepad is plugged in --- src/teleoperation/frontend/src/components/ArmControls.vue | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/teleoperation/frontend/src/components/ArmControls.vue b/src/teleoperation/frontend/src/components/ArmControls.vue index 5279a8953..c8d7716e7 100644 --- a/src/teleoperation/frontend/src/components/ArmControls.vue +++ b/src/teleoperation/frontend/src/components/ArmControls.vue @@ -174,7 +174,7 @@ export default defineComponent({ interval = window.setInterval(() => { if (this.send_positions) { this.publishJoystickMessage([], [], this.arm_mode, this.positions) - } else { + } else if (this.arm_mode !== "position") { const gamepads = navigator.getGamepads() for (let i = 0; i < 4; i++) { const gamepad = gamepads[i] From e2226605fef802a51279f4e4cfd696e2144b8862 Mon Sep 17 00:00:00 2001 From: qhdwight Date: Sun, 14 Apr 2024 15:12:05 -0400 Subject: [PATCH 081/126] Start working on camera stuff --- CMakeLists.txt | 8 ++- cmake/deps.cmake | 2 + .../gst_websocket_streamer.cpp | 64 +++++++++++++++++-- .../gst_websocket_streamer.hpp | 3 +- src/esw/gst_websocket_streamer/pch.hpp | 12 ++-- 5 files changed, 75 insertions(+), 14 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c23c58118..9552eada5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -198,9 +198,11 @@ if (CUDA_FOUND) # target_include_directories(nv_vid_codec_h265_enc SYSTEM PUBLIC deps/nvenc) endif () -mrover_add_nodelet(gst_websocket_streamer src/esw/gst_websocket_streamer/*.c* src/esw/gst_websocket_streamer src/esw/gst_websocket_streamer/pch.hpp) -mrover_nodelet_link_libraries(gst_websocket_streamer PRIVATE websocket_server ${Gst_LIBRARIES} ${GstApp_LIBRARIES}) -mrover_nodelet_include_directories(gst_websocket_streamer ${Gst_INCLUDE_DIRS} ${GstApp_INCLUDE_DIRS}) +if (Gst_FOUND AND GstApp_FOUND AND LibUsb_FOUND AND LibUdev_FOUND) + mrover_add_nodelet(gst_websocket_streamer src/esw/gst_websocket_streamer/*.c* src/esw/gst_websocket_streamer src/esw/gst_websocket_streamer/pch.hpp) + mrover_nodelet_link_libraries(gst_websocket_streamer PRIVATE websocket_server ${Gst_LIBRARIES} ${GstApp_LIBRARIES} ${LibUsb_LIBRARIES} ${LibUdev_LIBRARIES}) + mrover_nodelet_include_directories(gst_websocket_streamer ${Gst_INCLUDE_DIRS} ${GstApp_INCLUDE_DIRS} ${LibUsb_INCLUDE_DIRS} ${LibUdev_INCLUDE_DIRS}) +endif () if (ZED_FOUND) mrover_add_nodelet(object_detector src/perception/object_detector/*.c* src/perception/object_detector src/perception/object_detector/pch.hpp) diff --git a/cmake/deps.cmake b/cmake/deps.cmake index b73637ce8..b3bb4a520 100644 --- a/cmake/deps.cmake +++ b/cmake/deps.cmake @@ -69,3 +69,5 @@ pkg_search_module(NetLink libnl-3.0 QUIET) pkg_search_module(NetLinkRoute libnl-route-3.0 QUIET) pkg_search_module(Gst gstreamer-1.0 QUIET) pkg_search_module(GstApp gstreamer-app-1.0 QUIET) +pkg_search_module(LibUsb libusb-1.0 QUIET) +pkg_search_module(LibUdev libudev QUIET) diff --git a/src/esw/gst_websocket_streamer/gst_websocket_streamer.cpp b/src/esw/gst_websocket_streamer/gst_websocket_streamer.cpp index a1430549b..8510c39e6 100644 --- a/src/esw/gst_websocket_streamer/gst_websocket_streamer.cpp +++ b/src/esw/gst_websocket_streamer/gst_websocket_streamer.cpp @@ -39,7 +39,7 @@ namespace mrover { mMainLoop = gstCheck(g_main_loop_new(nullptr, FALSE)); std::string launch; - if (mCaptureDevice.empty()) { + if (mDevice.empty()) { if constexpr (IS_JETSON) { // ReSharper disable once CppDFAUnreachableCode launch = std::format( @@ -85,7 +85,7 @@ namespace mrover { "! video/x-raw(memory:NVMM),format=NV12 " "! nvv4l2h265enc name=encoder bitrate={} iframeinterval=300 vbv-size=33333 insert-sps-pps=true control-rate=constant_bitrate profile=Main num-B-Frames=0 ratecontrol-enable=true preset-level=UltraFastPreset EnableTwopassCBR=false maxperf-enable=true " "! appsink name=streamSink sync=false", - mCaptureDevice, mImageWidth, mImageHeight, mImageFramerate, mBitrate); + mDevice, mImageWidth, mImageHeight, mImageFramerate, mBitrate); } else { // ReSharper disable once CppDFAUnreachableCode launch = std::format( @@ -94,14 +94,14 @@ namespace mrover { "! videoconvert " "! nvh265enc name=encoder " "! appsink name=streamSink sync=false", - mCaptureDevice, mImageWidth, mImageHeight); + mDevice, mImageWidth, mImageHeight); } } ROS_INFO_STREAM(std::format("GStreamer launch: {}", launch)); mPipeline = gstCheck(gst_parse_launch(launch.c_str(), nullptr)); - if (mCaptureDevice.empty()) mImageSource = gstCheck(gst_bin_get_by_name(GST_BIN(mPipeline), "imageSource")); + if (mDevice.empty()) mImageSource = gstCheck(gst_bin_get_by_name(GST_BIN(mPipeline), "imageSource")); mStreamSink = gstCheck(gst_bin_get_by_name(GST_BIN(mPipeline), "streamSink")); if (gst_element_set_state(mPipeline, GST_STATE_PAUSED) == GST_STATE_CHANGE_FAILURE) @@ -150,7 +150,8 @@ namespace mrover { mNh = getMTNodeHandle(); mPnh = getMTPrivateNodeHandle(); - mCaptureDevice = mPnh.param("device", ""); + mDevice = mPnh.param("device", ""); + mUsbIdentifier = mPnh.param("usb_id", "1"); mImageTopic = mPnh.param("image_topic", "image"); mImageWidth = mPnh.param("width", 640); mImageHeight = mPnh.param("height", 480); @@ -161,6 +162,57 @@ namespace mrover { gst_init(nullptr, nullptr); + libusb_context* context{}; + if (libusb_init(&context) != 0) throw std::runtime_error{"Failed to initialize libusb"}; + + libusb_device** list{}; + ssize_t count = libusb_get_device_list(context, &list); + if (count < 0) throw std::runtime_error{"Failed to get device list"}; + + for (ssize_t i = 0; i < count; ++i) { + libusb_device* device = list[i]; + libusb_device_descriptor descriptor{}; + if (libusb_get_device_descriptor(device, &descriptor) != 0) continue; + + std::uint8_t bus = libusb_get_bus_number(device); + std::uint8_t address = libusb_get_device_address(device); + std::uint8_t port = libusb_get_port_number(device); + + // get associated video port + // libusb_get_port_numbers(device, port_numbers, sizeof(port_numbers)); + + ROS_INFO_STREAM(std::format("USB device: bus={}, address={}, port={}, vendor={}, product={}", + bus, address, port, descriptor.idVendor, descriptor.idProduct)); + } + + if (!mUsbIdentifier.empty()) { + udev* udevContext = udev_new(); + if (!udevContext) throw std::runtime_error{"Failed to initialize udev"}; + + udev_enumerate* enumerate = udev_enumerate_new(udevContext); + if (!enumerate) throw std::runtime_error{"Failed to create udev enumeration"}; + + udev_enumerate_add_match_subsystem(enumerate, "video4linux"); + udev_enumerate_scan_devices(enumerate); + + udev_list_entry* devices = udev_enumerate_get_list_entry(enumerate); + if (!devices) throw std::runtime_error{"Failed to get udev device list"}; + + udev_list_entry* entry; + udev_list_entry_foreach(entry, devices) { + std::string deviceSysPath = udev_list_entry_get_name(entry); + udev_device* device = udev_device_new_from_syspath(udevContext, deviceSysPath.c_str()); + if (!device) continue; + + std::string devicePath = udev_device_get_devnode(device); + if (devicePath.empty()) continue; + + mDevice = devicePath; + ROS_INFO_STREAM(std::format("Found USB device: {}", mDevice)); + break; + } + } + initPipeline(); mStreamServer.emplace( @@ -193,7 +245,7 @@ namespace mrover { ROS_INFO("Stopped GStreamer pipeline"); }); - if (mCaptureDevice.empty()) { + if (mDevice.empty()) { mImageSubscriber = mNh.subscribe(mImageTopic, 1, &GstWebsocketStreamerNodelet::imageCallback, this); } diff --git a/src/esw/gst_websocket_streamer/gst_websocket_streamer.hpp b/src/esw/gst_websocket_streamer/gst_websocket_streamer.hpp index c4950f62c..55180d333 100644 --- a/src/esw/gst_websocket_streamer/gst_websocket_streamer.hpp +++ b/src/esw/gst_websocket_streamer/gst_websocket_streamer.hpp @@ -8,8 +8,9 @@ namespace mrover { ros::NodeHandle mNh, mPnh; - std::string mCaptureDevice; + std::string mDevice; std::string mImageTopic; + std::string mUsbIdentifier; std::uint64_t mBitrate{}; std::uint32_t mImageWidth{}, mImageHeight{}, mImageFramerate{}; diff --git a/src/esw/gst_websocket_streamer/pch.hpp b/src/esw/gst_websocket_streamer/pch.hpp index 2c224a3ec..cbac989d5 100644 --- a/src/esw/gst_websocket_streamer/pch.hpp +++ b/src/esw/gst_websocket_streamer/pch.hpp @@ -1,20 +1,24 @@ #pragma once -#include #include #include +#include #include -#include -#include #include -#include #include #include #include +#include +#include +#include #include #include #include +#include + +#include + #include From e102663ed82658d43e8a365eb0332cea6e1c98c8 Mon Sep 17 00:00:00 2001 From: qhdwight Date: Mon, 15 Apr 2024 16:15:57 -0400 Subject: [PATCH 082/126] Add device resizing, rename some things --- CMakeLists.txt | 2 +- ansible/roles/esw/tasks/main.yml | 2 +- .../gst_websocket_streamer.cpp | 97 +++++++++++-------- .../gst_websocket_streamer.hpp | 13 ++- src/esw/gst_websocket_streamer/pch.hpp | 2 + 5 files changed, 72 insertions(+), 44 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9552eada5..832fb2d6b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -200,7 +200,7 @@ endif () if (Gst_FOUND AND GstApp_FOUND AND LibUsb_FOUND AND LibUdev_FOUND) mrover_add_nodelet(gst_websocket_streamer src/esw/gst_websocket_streamer/*.c* src/esw/gst_websocket_streamer src/esw/gst_websocket_streamer/pch.hpp) - mrover_nodelet_link_libraries(gst_websocket_streamer PRIVATE websocket_server ${Gst_LIBRARIES} ${GstApp_LIBRARIES} ${LibUsb_LIBRARIES} ${LibUdev_LIBRARIES}) + mrover_nodelet_link_libraries(gst_websocket_streamer PRIVATE websocket_server ${Gst_LIBRARIES} ${GstApp_LIBRARIES} ${LibUsb_LIBRARIES} ${LibUdev_LIBRARIES} opencv_core opencv_imgproc) mrover_nodelet_include_directories(gst_websocket_streamer ${Gst_INCLUDE_DIRS} ${GstApp_INCLUDE_DIRS} ${LibUsb_INCLUDE_DIRS} ${LibUdev_INCLUDE_DIRS}) endif () diff --git a/ansible/roles/esw/tasks/main.yml b/ansible/roles/esw/tasks/main.yml index f9a81d7a5..9ae41142b 100644 --- a/ansible/roles/esw/tasks/main.yml +++ b/ansible/roles/esw/tasks/main.yml @@ -22,4 +22,4 @@ - name: Install Moteus GUI pip: - name: moteus_gui + name: moteus-gui diff --git a/src/esw/gst_websocket_streamer/gst_websocket_streamer.cpp b/src/esw/gst_websocket_streamer/gst_websocket_streamer.cpp index 8510c39e6..9787ae241 100644 --- a/src/esw/gst_websocket_streamer/gst_websocket_streamer.cpp +++ b/src/esw/gst_websocket_streamer/gst_websocket_streamer.cpp @@ -8,6 +8,8 @@ constexpr bool IS_JETSON = false; namespace mrover { + using namespace std::string_view_literals; + template auto gstCheck(T* t) -> T* { if (!t) throw std::runtime_error{"Failed to create"}; @@ -39,7 +41,7 @@ namespace mrover { mMainLoop = gstCheck(g_main_loop_new(nullptr, FALSE)); std::string launch; - if (mDevice.empty()) { + if (mDeviceNode.empty()) { if constexpr (IS_JETSON) { // ReSharper disable once CppDFAUnreachableCode launch = std::format( @@ -85,7 +87,7 @@ namespace mrover { "! video/x-raw(memory:NVMM),format=NV12 " "! nvv4l2h265enc name=encoder bitrate={} iframeinterval=300 vbv-size=33333 insert-sps-pps=true control-rate=constant_bitrate profile=Main num-B-Frames=0 ratecontrol-enable=true preset-level=UltraFastPreset EnableTwopassCBR=false maxperf-enable=true " "! appsink name=streamSink sync=false", - mDevice, mImageWidth, mImageHeight, mImageFramerate, mBitrate); + mDeviceNode, mImageWidth, mImageHeight, mImageFramerate, mBitrate); } else { // ReSharper disable once CppDFAUnreachableCode launch = std::format( @@ -94,14 +96,14 @@ namespace mrover { "! videoconvert " "! nvh265enc name=encoder " "! appsink name=streamSink sync=false", - mDevice, mImageWidth, mImageHeight); + mDeviceNode, mImageWidth, mImageHeight); } } ROS_INFO_STREAM(std::format("GStreamer launch: {}", launch)); mPipeline = gstCheck(gst_parse_launch(launch.c_str(), nullptr)); - if (mDevice.empty()) mImageSource = gstCheck(gst_bin_get_by_name(GST_BIN(mPipeline), "imageSource")); + if (mDeviceNode.empty()) mImageSource = gstCheck(gst_bin_get_by_name(GST_BIN(mPipeline), "imageSource")); mStreamSink = gstCheck(gst_bin_get_by_name(GST_BIN(mPipeline), "streamSink")); if (gst_element_set_state(mPipeline, GST_STATE_PAUSED) == GST_STATE_CHANGE_FAILURE) @@ -127,14 +129,22 @@ namespace mrover { if (mStreamServer->clientCount() == 0) return; if (msg->encoding != sensor_msgs::image_encodings::BGRA8) throw std::runtime_error{"Unsupported encoding"}; - if (msg->width != mImageWidth || msg->height != mImageHeight) throw std::runtime_error{"Unsupported resolution"}; + + cv::Size receivedSize{static_cast(msg->width), static_cast(msg->height)}; + cv::Mat bgraFrame{receivedSize, CV_8UC4, const_cast(msg->data.data()), msg->step}; + + if (cv::Size targetSize{static_cast(mImageWidth), static_cast(mImageHeight)}; + receivedSize != targetSize) { + ROS_WARN_ONCE("Image size does not match pipeline app source size, will resize"); + resize(bgraFrame, bgraFrame, targetSize); + } // "step" is the number of bytes (NOT pixels) in an image row - std::size_t size = msg->step * msg->height; + std::size_t size = bgraFrame.step * bgraFrame.rows; GstBuffer* buffer = gstCheck(gst_buffer_new_allocate(nullptr, size, nullptr)); GstMapInfo info; gst_buffer_map(buffer, &info, GST_MAP_WRITE); - std::memcpy(info.data, msg->data.data(), size); + std::memcpy(info.data, bgraFrame.data, size); gst_buffer_unmap(buffer, &info); gst_app_src_push_buffer(GST_APP_SRC(mImageSource), buffer); @@ -150,8 +160,8 @@ namespace mrover { mNh = getMTNodeHandle(); mPnh = getMTPrivateNodeHandle(); - mDevice = mPnh.param("device", ""); - mUsbIdentifier = mPnh.param("usb_id", "1"); + mDeviceNode = mPnh.param("dev_node", ""); + mDevicePath = mPnh.param("dev_path", "1"); mImageTopic = mPnh.param("image_topic", "image"); mImageWidth = mPnh.param("width", 640); mImageHeight = mPnh.param("height", 480); @@ -162,30 +172,24 @@ namespace mrover { gst_init(nullptr, nullptr); - libusb_context* context{}; - if (libusb_init(&context) != 0) throw std::runtime_error{"Failed to initialize libusb"}; - - libusb_device** list{}; - ssize_t count = libusb_get_device_list(context, &list); - if (count < 0) throw std::runtime_error{"Failed to get device list"}; - - for (ssize_t i = 0; i < count; ++i) { - libusb_device* device = list[i]; - libusb_device_descriptor descriptor{}; - if (libusb_get_device_descriptor(device, &descriptor) != 0) continue; - - std::uint8_t bus = libusb_get_bus_number(device); - std::uint8_t address = libusb_get_device_address(device); - std::uint8_t port = libusb_get_port_number(device); + if (!mDevicePath.empty()) { + // libusb_context* context{}; + // if (libusb_init(&context) != 0) throw std::runtime_error{"Failed to initialize libusb"}; + // + // libusb_device** list{}; + // ssize_t count = libusb_get_device_list(context, &list); + // if (count < 0) throw std::runtime_error{"Failed to get device list"}; + // + // // Find the libusb device associated with our camera + // + // auto it = std::ranges::find_if(list, list + count, [this](libusb_device* device) { + // libusb_device_descriptor descriptor{}; + // if (libusb_get_device_descriptor(device, &descriptor) != 0) return false; + // return std::format("{}-{}", libusb_get_bus_number(device), libusb_get_port_number(device)) == mUsbIdentifier; + // }); + // if (it == list + count) throw std::runtime_error{"Failed to find USB device"}; + // libusb_device* targetUsbDevice = *it; - // get associated video port - // libusb_get_port_numbers(device, port_numbers, sizeof(port_numbers)); - - ROS_INFO_STREAM(std::format("USB device: bus={}, address={}, port={}, vendor={}, product={}", - bus, address, port, descriptor.idVendor, descriptor.idProduct)); - } - - if (!mUsbIdentifier.empty()) { udev* udevContext = udev_new(); if (!udevContext) throw std::runtime_error{"Failed to initialize udev"}; @@ -198,19 +202,32 @@ namespace mrover { udev_list_entry* devices = udev_enumerate_get_list_entry(enumerate); if (!devices) throw std::runtime_error{"Failed to get udev device list"}; + udev_device* device{}; udev_list_entry* entry; udev_list_entry_foreach(entry, devices) { - std::string deviceSysPath = udev_list_entry_get_name(entry); - udev_device* device = udev_device_new_from_syspath(udevContext, deviceSysPath.c_str()); - if (!device) continue; + device = udev_device_new_from_syspath(udevContext, udev_list_entry_get_name(entry)); - std::string devicePath = udev_device_get_devnode(device); - if (devicePath.empty()) continue; + if (udev_device_get_devpath(device) != mDevicePath) continue; + + if (!device) throw std::runtime_error{"Failed to get udev device"}; - mDevice = devicePath; - ROS_INFO_STREAM(std::format("Found USB device: {}", mDevice)); break; + + // for (udev_list_entry* listEntry = udev_device_get_properties_list_entry(device); listEntry != nullptr; listEntry = udev_list_entry_get_next(listEntry)) { + // char const* name = udev_list_entry_get_name(listEntry); + // char const* value = udev_list_entry_get_value(listEntry); + // ROS_INFO_STREAM(std::format("udev: {} = {}", name, value)); + // } + + // if (std::stoi(udev_device_get_sysattr_value(device, "busnum")) == libusb_get_bus_number(targetUsbDevice) && + // std::stoi(udev_device_get_sysattr_value(device, "devnum")) == libusb_get_device_address(targetUsbDevice)) { + // targetUdevDevice = device; + // break; + // } } + if (!device) throw std::runtime_error{"Failed to find udev device"}; + + mDeviceNode = udev_device_get_devnode(device); } initPipeline(); @@ -245,7 +262,7 @@ namespace mrover { ROS_INFO("Stopped GStreamer pipeline"); }); - if (mDevice.empty()) { + if (mDeviceNode.empty()) { mImageSubscriber = mNh.subscribe(mImageTopic, 1, &GstWebsocketStreamerNodelet::imageCallback, this); } diff --git a/src/esw/gst_websocket_streamer/gst_websocket_streamer.hpp b/src/esw/gst_websocket_streamer/gst_websocket_streamer.hpp index 55180d333..f72f01253 100644 --- a/src/esw/gst_websocket_streamer/gst_websocket_streamer.hpp +++ b/src/esw/gst_websocket_streamer/gst_websocket_streamer.hpp @@ -8,9 +8,18 @@ namespace mrover { ros::NodeHandle mNh, mPnh; - std::string mDevice; + // For example, /dev/video0 + // These device paths are not garunteed to stay the same between reboots + // Prefer sys path for non-debugging purposes + std::string mDeviceNode; std::string mImageTopic; - std::string mUsbIdentifier; + // To find the sys path: + // 1) Disconnect all cameras + // 2) Confirm there are no /dev/video* devices + // 2) Connect the camera you want to use + // 3) Run "ls /dev/video*" to verify the device is connected + // 4) Run "udevadm info -q path -n /dev/video0" to get the sys path + std::string mDevicePath; std::uint64_t mBitrate{}; std::uint32_t mImageWidth{}, mImageHeight{}, mImageFramerate{}; diff --git a/src/esw/gst_websocket_streamer/pch.hpp b/src/esw/gst_websocket_streamer/pch.hpp index cbac989d5..2769438ad 100644 --- a/src/esw/gst_websocket_streamer/pch.hpp +++ b/src/esw/gst_websocket_streamer/pch.hpp @@ -21,4 +21,6 @@ #include +#include + #include From b75e2583064b580a7d733547c0089b78ecb6f261 Mon Sep 17 00:00:00 2001 From: neuenfeldttj Date: Tue, 16 Apr 2024 11:35:57 -0400 Subject: [PATCH 083/126] fixed ik msg --- src/teleoperation/backend/consumers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/teleoperation/backend/consumers.py b/src/teleoperation/backend/consumers.py index f985d6d21..9f3b78bd2 100644 --- a/src/teleoperation/backend/consumers.py +++ b/src/teleoperation/backend/consumers.py @@ -275,7 +275,7 @@ def handle_controls_message(self, msg): if msg["arm_mode"] == "ik": ee_in_map = SE3.from_tf_tree(self.tf_buffer, "base_link", "arm_e_link") - + ee_in_map.position[0] += ( self.ik_names["x"] * self.filter_xbox_axis(msg["axes"][self.xbox_mappings["left_js_x"]]), ) @@ -283,7 +283,7 @@ def handle_controls_message(self, msg): self.ik_names["y"] * self.filter_xbox_axis(msg["axes"][self.xbox_mappings["left_js_y"]]), ) ee_in_map.position[2] += self.ik_names["z"] * (left_trigger - right_trigger) - + arm_ik_cmd = IK(target=PoseStamped( header=Header(stamp=rospy.Time.now(), frame_id="base_link"), pose=Pose( From 5891cb14f23a5b84eab5e6080a506c9da8b4f02a Mon Sep 17 00:00:00 2001 From: neuenfeldttj Date: Tue, 16 Apr 2024 22:20:36 -0400 Subject: [PATCH 084/126] reorganized arm function --- src/teleoperation/backend/consumers.py | 291 ++++++++++++------------- 1 file changed, 140 insertions(+), 151 deletions(-) diff --git a/src/teleoperation/backend/consumers.py b/src/teleoperation/backend/consumers.py index 9f3b78bd2..653ab5bda 100644 --- a/src/teleoperation/backend/consumers.py +++ b/src/teleoperation/backend/consumers.py @@ -250,167 +250,156 @@ def to_velocity(self, input: int, joint_name: str, brushless: bool = True) -> fl * (self.brushed_motors[joint_name]["max_velocity"] - self.brushed_motors[joint_name]["min_velocity"]) / 2 ) + + def publish_ik(self, axes): + left_trigger = self.filter_xbox_axis(axes[self.xbox_mappings["left_trigger"]]) + right_trigger = self.filter_xbox_axis(axes[self.xbox_mappings["right_trigger"]]) + ee_in_map = SE3.from_tf_tree(self.tf_buffer, "base_link", "arm_e_link") + + ee_in_map.position[0] += ( + self.ik_names["x"] * self.filter_xbox_axis(axes[self.xbox_mappings["left_js_x"]]), + ) + ee_in_map.position[1] += ( + self.ik_names["y"] * self.filter_xbox_axis(axes[self.xbox_mappings["left_js_y"]]), + ) + + ee_in_map.position[2] += self.ik_names["z"] * (left_trigger - right_trigger) + + arm_ik_cmd = IK(target=PoseStamped( + header=Header(stamp=rospy.Time.now(), frame_id="base_link"), + pose=Pose( + position=Point(*ee_in_map.position), + orientation=Quaternion(*ee_in_map.rotation.quaternion), + ) + ) + ) + self.send(text_data=json.dumps({ + "type": "ik", + "target": { + "position": ee_in_map.position.tolist(), + "quaternion": ee_in_map.rotation.quaternion.tolist() + } + })) + self.arm_ik_pub.publish(arm_ik_cmd) + def publish_position(self, type, names, positions): + position_cmd = Position( + names=names, + positions=positions, + ) + if type == "arm_values": + self.arm_position_cmd_pub.publish(position_cmd) + elif type == "sa_arm_values": + self.sa_position_cmd_pub.publish(position_cmd) + elif type == "cache_values": + self.cache_position_cmd_pub.publish(position_cmd) + + def publish_velocity(self, type, names, axes, buttons): + left_trigger = self.filter_xbox_axis(axes[self.xbox_mappings["left_trigger"]]) + right_trigger = self.filter_xbox_axis(axes[self.xbox_mappings["right_trigger"]]) + velocity_cmd = Velocity() + velocity_cmd.names = names + if type == "cache_values": + velocity_cmd.velocities = [ + self.sa_config["cache"]["multiplier"] + * self.filter_xbox_button(buttons, "right_bumper", "left_bumper") + ] + self.cache_velocity_cmd_pub.publish(velocity_cmd) + elif type == "sa_arm_values": + velocity_cmd.velocities = [ + self.to_velocity( + self.filter_xbox_axis(axes[self.sa_config["sa_x"]["xbox_index"]]), "sa_x", False + ), + self.to_velocity( + self.filter_xbox_axis(axes[self.sa_config["sa_y"]["xbox_index"]]), "sa_y", False + ), + self.to_velocity( + self.filter_xbox_axis(axes[self.sa_config["sa_z"]["xbox_index"]]), "sa_z", True + ), + self.sa_config["sampler"]["multiplier"] * (right_trigger - left_trigger), + self.sa_config["sensor_actuator"]["multiplier"] + * self.filter_xbox_button(buttons, "right_bumper", "left_bumper"), + ] + self.sa_velocity_cmd_pub.publish(velocity_cmd) + elif type == "arm_values": + velocity_cmd.velocities = [ + self.to_velocity( + self.filter_xbox_axis(axes[self.ra_config["joint_a"]["xbox_index"]]), "joint_a" + ), + self.to_velocity( + self.filter_xbox_axis(axes[self.ra_config["joint_b"]["xbox_index"]]), "joint_b", False + ), + self.to_velocity( + self.filter_xbox_axis(axes[self.ra_config["joint_c"]["xbox_index"]]), "joint_c" + ), + self.to_velocity( + self.filter_xbox_axis(axes[self.ra_config["joint_de_pitch"]["xbox_index"]]), "joint_de_0" + ), + self.to_velocity( + self.filter_xbox_axis(axes[self.ra_config["joint_de_roll"]["xbox_index"]]), "joint_de_1" + ), + self.ra_config["allen_key"]["multiplier"] * self.filter_xbox_button(buttons, "y", "a"), + self.ra_config["gripper"]["multiplier"] * self.filter_xbox_button(buttons, "b", "x"), + ] + self.arm_velocity_cmd_pub.publish(velocity_cmd) + + def publish_throttle(self, type, names, axes, buttons): + left_trigger = self.filter_xbox_axis(axes[self.xbox_mappings["left_trigger"]]) + right_trigger = self.filter_xbox_axis(axes[self.xbox_mappings["right_trigger"]]) + throttle_cmd = Throttle() + throttle_cmd.names = names + if type == "cache_values": + throttle_cmd.throttles = [ + self.sa_config["cache"]["multiplier"] + * self.filter_xbox_button(buttons, "right_bumper", "left_bumper") + ] + self.cache_throttle_cmd_pub.publish(throttle_cmd) + elif type == "arm_values": + throttle_cmd.throttles = [ + self.filter_xbox_axis(axes[self.ra_config["joint_a"]["xbox_index"]]), + self.filter_xbox_axis(axes[self.ra_config["joint_b"]["xbox_index"]]), + self.filter_xbox_axis(axes[self.ra_config["joint_c"]["xbox_index"]]), + self.filter_xbox_axis( + axes[self.ra_config["joint_de_pitch"]["xbox_index_right"]] + - axes[self.ra_config["joint_de_pitch"]["xbox_index_left"]] + ), + self.filter_xbox_axis( + buttons[self.ra_config["joint_de_roll"]["xbox_index_right"]] + - buttons[self.ra_config["joint_de_roll"]["xbox_index_left"]] + ), + self.ra_config["allen_key"]["multiplier"] * self.filter_xbox_button(buttons, "y", "a"), + self.ra_config["gripper"]["multiplier"] * self.filter_xbox_button(buttons, "b", "x"), + ] + self.arm_throttle_cmd_pub.publish(throttle_cmd) + elif type == "sa_arm_values": + throttle_cmd.throttles = [ + self.filter_xbox_axis(axes[self.sa_config["sa_x"]["xbox_index"]]), + self.filter_xbox_axis(axes[self.sa_config["sa_y"]["xbox_index"]]), + self.filter_xbox_axis(axes[self.sa_config["sa_z"]["xbox_index"]]), + self.sa_config["sampler"]["multiplier"] * (right_trigger - left_trigger), + self.sa_config["sensor_actuator"]["multiplier"] + * self.filter_xbox_button(buttons, "right_bumper", "left_bumper"), + ] + self.sa_throttle_cmd_pub.publish(throttle_cmd) + def handle_controls_message(self, msg): - CACHE = ["cache"] - SA_NAMES = ["sa_x", "sa_y", "sa_z", "sampler", "sensor_actuator"] - RA_NAMES = self.RA_NAMES - ra_slow_mode = False - left_trigger = self.filter_xbox_axis(msg["axes"][self.xbox_mappings["left_trigger"]]) - right_trigger = self.filter_xbox_axis(msg["axes"][self.xbox_mappings["right_trigger"]]) - arm_pubs = [self.arm_position_cmd_pub, self.arm_velocity_cmd_pub, self.arm_throttle_cmd_pub, self.arm_ik_pub] - sa_pubs = [self.sa_position_cmd_pub, self.sa_velocity_cmd_pub, self.sa_throttle_cmd_pub] - cache_pubs = [self.cache_position_cmd_pub, self.cache_velocity_cmd_pub, self.cache_throttle_cmd_pub] - publishers = [] - controls_names = [] - if msg["type"] == "cache_values": - controls_names = CACHE - publishers = cache_pubs - elif msg["type"] == "arm_values": - controls_names = RA_NAMES - publishers = arm_pubs - elif msg["type"] == "sa_arm_values": - controls_names = SA_NAMES - publishers = sa_pubs + names = ["joint_b", "joint_c", "joint_de_pitch", "joint_de_roll"] + if msg["type"] == "sa_arm_values": + names = ["sa_x", "sa_y", "sa_z", "sampler", "sensor_actuator"] + elif msg["type"] == "cache_values": + names = ["cache"] if msg["arm_mode"] == "ik": - ee_in_map = SE3.from_tf_tree(self.tf_buffer, "base_link", "arm_e_link") - - ee_in_map.position[0] += ( - self.ik_names["x"] * self.filter_xbox_axis(msg["axes"][self.xbox_mappings["left_js_x"]]), - ) - ee_in_map.position[1] += ( - self.ik_names["y"] * self.filter_xbox_axis(msg["axes"][self.xbox_mappings["left_js_y"]]), - ) - ee_in_map.position[2] += self.ik_names["z"] * (left_trigger - right_trigger) - - arm_ik_cmd = IK(target=PoseStamped( - header=Header(stamp=rospy.Time.now(), frame_id="base_link"), - pose=Pose( - position=Point(*ee_in_map.position), - orientation=Quaternion(*ee_in_map.rotation.quaternion), - ) - ) - ) - self.send(text_data=json.dumps({ - "type": "ik", - "target": { - "position": ee_in_map.position.tolist(), - "quaternion": ee_in_map.rotation.quaternion.tolist() - } - })) - publishers[3].publish(arm_ik_cmd) + self.publish_ik(axes=msg["axes"]) elif msg["arm_mode"] == "position": - position_names = controls_names - if msg["type"] == "arm_values": - position_names = ["joint_b", "joint_c", "joint_de_pitch", "joint_de_roll"] - position_cmd = Position( - names=position_names, - positions=msg["positions"], - ) - publishers[0].publish(position_cmd) + self.publish_position(type=msg["type"], names=names, positions=msg["positions"]) elif msg["arm_mode"] == "velocity": - velocity_cmd = Velocity() - velocity_cmd.names = controls_names - if msg["type"] == "cache_values": - velocity_cmd.velocities = [ - self.sa_config["cache"]["multiplier"] - * self.filter_xbox_button(msg["buttons"], "right_bumper", "left_bumper") - ] - elif msg["type"] == "sa_arm_values": - velocity_cmd.velocities = [ - self.to_velocity( - self.filter_xbox_axis(msg["axes"][self.sa_config["sa_x"]["xbox_index"]]), "sa_x", False - ), - self.to_velocity( - self.filter_xbox_axis(msg["axes"][self.sa_config["sa_y"]["xbox_index"]]), "sa_y", False - ), - self.to_velocity( - self.filter_xbox_axis(msg["axes"][self.sa_config["sa_z"]["xbox_index"]]), "sa_z", True - ), - self.sa_config["sampler"]["multiplier"] * (right_trigger - left_trigger), - self.sa_config["sensor_actuator"]["multiplier"] - * self.filter_xbox_button(msg["buttons"], "right_bumper", "left_bumper"), - ] - elif msg["type"] == "arm_values": - velocity_cmd.velocities = [ - self.to_velocity( - self.filter_xbox_axis(msg["axes"][self.ra_config["joint_a"]["xbox_index"]]), "joint_a" - ), - self.to_velocity( - self.filter_xbox_axis(msg["axes"][self.ra_config["joint_b"]["xbox_index"]]), "joint_b", False - ), - self.to_velocity( - self.filter_xbox_axis(msg["axes"][self.ra_config["joint_c"]["xbox_index"]]), "joint_c" - ), - self.to_velocity( - self.filter_xbox_axis(msg["axes"][self.ra_config["joint_de_pitch"]["xbox_index"]]), "joint_de_0" - ), - self.to_velocity( - self.filter_xbox_axis(msg["axes"][self.ra_config["joint_de_roll"]["xbox_index"]]), "joint_de_1" - ), - self.ra_config["allen_key"]["multiplier"] * self.filter_xbox_button(msg["buttons"], "y", "a"), - self.ra_config["gripper"]["multiplier"] * self.filter_xbox_button(msg["buttons"], "b", "x"), - ] - publishers[1].publish(velocity_cmd) + self.publish_velocity(type=msg["type"], names=names, axes=msg["axes"], buttons=msg["buttons"]) elif msg["arm_mode"] == "throttle": - throttle_cmd = Throttle() - throttle_cmd.names = controls_names - if msg["type"] == "cache_values": - throttle_cmd.throttles = [ - self.sa_config["cache"]["multiplier"] - * self.filter_xbox_button(msg["buttons"], "right_bumper", "left_bumper") - ] - elif msg["type"] == "arm_values": - # print(msg["buttons"]) - - # d_pad_x = msg["axes"][self.xbox_mappings["d_pad_x"]] - # if d_pad_x > 0.5: - # ra_slow_mode = True - # elif d_pad_x < -0.5: - # ra_slow_mode = False - - throttle_cmd.throttles = [ - self.filter_xbox_axis(msg["axes"][self.ra_config["joint_a"]["xbox_index"]]), - self.filter_xbox_axis(msg["axes"][self.ra_config["joint_b"]["xbox_index"]]), - self.filter_xbox_axis(msg["axes"][self.ra_config["joint_c"]["xbox_index"]]), - self.filter_xbox_axis( - msg["axes"][self.ra_config["joint_de_pitch"]["xbox_index_right"]] - - msg["axes"][self.ra_config["joint_de_pitch"]["xbox_index_left"]] - ), - self.filter_xbox_axis( - msg["buttons"][self.ra_config["joint_de_roll"]["xbox_index_right"]] - - msg["buttons"][self.ra_config["joint_de_roll"]["xbox_index_left"]] - ), - self.ra_config["allen_key"]["multiplier"] * self.filter_xbox_button(msg["buttons"], "y", "a"), - self.ra_config["gripper"]["multiplier"] * self.filter_xbox_button(msg["buttons"], "b", "x"), - ] - - for i, name in enumerate(self.RA_NAMES): - if ra_slow_mode: - throttle_cmd.throttles[i] *= self.ra_config[name]["slow_mode_multiplier"] - if self.ra_config[name]["invert"]: - throttle_cmd.throttles[i] *= -1 - elif msg["type"] == "sa_arm_values": - throttle_cmd.throttles = [ - self.filter_xbox_axis(msg["axes"][self.sa_config["sa_x"]["xbox_index"]]), - self.filter_xbox_axis(msg["axes"][self.sa_config["sa_y"]["xbox_index"]]), - self.filter_xbox_axis(msg["axes"][self.sa_config["sa_z"]["xbox_index"]]), - self.sa_config["sampler"]["multiplier"] * (right_trigger - left_trigger), - self.sa_config["sensor_actuator"]["multiplier"] - * self.filter_xbox_button(msg["buttons"], "right_bumper", "left_bumper"), - ] - - fast_mode_activated = msg["buttons"][self.xbox_mappings["a"]] or msg["buttons"][self.xbox_mappings["b"]] - if not fast_mode_activated: - for i, name in enumerate(SA_NAMES): - # When going up (vel > 0) with SA joint 2, we DON'T want slow mode. - if not (name == "sa_y" and throttle_cmd.throttles[i] > 0): - throttle_cmd.throttles[i] *= self.sa_config[name]["slow_mode_multiplier"] - publishers[2].publish(throttle_cmd) + self.publish_throttle(type=msg["type"], names=names, axes=msg["axes"], buttons=msg["buttons"]) def handle_joystick_message(self, msg): # Tiny deadzone so we can safely e-stop with dampen switch From c89e6d5b276c8743e4400a207c508838cc549954 Mon Sep 17 00:00:00 2001 From: qhdwight Date: Wed, 17 Apr 2024 13:39:03 -0400 Subject: [PATCH 085/126] Update --- launch/basestation.launch | 9 ++++++--- src/teleoperation/backend/consumers.py | 6 +++--- src/teleoperation/frontend/package.json | 1 + 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/launch/basestation.launch b/launch/basestation.launch index b5039bcbe..47f7dd691 100644 --- a/launch/basestation.launch +++ b/launch/basestation.launch @@ -4,14 +4,17 @@ + + + - - - + + + diff --git a/src/teleoperation/backend/consumers.py b/src/teleoperation/backend/consumers.py index 653ab5bda..0cb13cdee 100644 --- a/src/teleoperation/backend/consumers.py +++ b/src/teleoperation/backend/consumers.py @@ -252,8 +252,8 @@ def to_velocity(self, input: int, joint_name: str, brushless: bool = True) -> fl ) def publish_ik(self, axes): - left_trigger = self.filter_xbox_axis(axes[self.xbox_mappings["left_trigger"]]) - right_trigger = self.filter_xbox_axis(axes[self.xbox_mappings["right_trigger"]]) + # left_trigger = self.filter_xbox_axis(axes[self.xbox_mappings["left_trigger"]]) + # right_trigger = self.filter_xbox_axis(axes[self.xbox_mappings["right_trigger"]]) ee_in_map = SE3.from_tf_tree(self.tf_buffer, "base_link", "arm_e_link") ee_in_map.position[0] += ( @@ -263,7 +263,7 @@ def publish_ik(self, axes): self.ik_names["y"] * self.filter_xbox_axis(axes[self.xbox_mappings["left_js_y"]]), ) - ee_in_map.position[2] += self.ik_names["z"] * (left_trigger - right_trigger) + # ee_in_map.position[2] += self.ik_names["z"] * (left_trigger - right_trigger) arm_ik_cmd = IK(target=PoseStamped( header=Header(stamp=rospy.Time.now(), frame_id="base_link"), diff --git a/src/teleoperation/frontend/package.json b/src/teleoperation/frontend/package.json index 8638e8b32..76d04eba6 100644 --- a/src/teleoperation/frontend/package.json +++ b/src/teleoperation/frontend/package.json @@ -20,6 +20,7 @@ "html2canvas": "^1.4.1", "leaflet": "^1.9.4", "quaternion-to-euler": "^0.5.0", + "three": "^0.163.0", "vue": "^3.3.4", "vue-flight-indicators": "^0.1.1", "vue-router": "^4.2.4", From 2af6befd842d74ab962d4df1a2351014fcc9cfb8 Mon Sep 17 00:00:00 2001 From: qhdwight Date: Wed, 17 Apr 2024 13:53:22 -0400 Subject: [PATCH 086/126] Format --- scripts/debug_arm_ik.py | 8 +-- src/simulator/simulator.sensors.cpp | 2 +- src/teleoperation/backend/consumers.py | 74 +++++++++++--------------- 3 files changed, 36 insertions(+), 48 deletions(-) diff --git a/scripts/debug_arm_ik.py b/scripts/debug_arm_ik.py index 99aa9b7a1..8fb2e95e4 100755 --- a/scripts/debug_arm_ik.py +++ b/scripts/debug_arm_ik.py @@ -13,13 +13,13 @@ rospy.sleep(1.0) pub.publish( - IK( - target = PoseStamped( + IK( + target=PoseStamped( header=Header(stamp=rospy.Time.now(), frame_id="base_link"), pose=Pose( position=Point(x=0.8, y=1.0, z=0.5), orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0), - ) + ), ) ) ) @@ -31,7 +31,7 @@ def on_clicked_point(clicked_point: PointStamped): pose=Pose( position=clicked_point.point, orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0), - ) + ), ) ) pub.publish(ik) diff --git a/src/simulator/simulator.sensors.cpp b/src/simulator/simulator.sensors.cpp index 355b36541..1f1d1c4e5 100644 --- a/src/simulator/simulator.sensors.cpp +++ b/src/simulator/simulator.sensors.cpp @@ -245,7 +245,7 @@ namespace mrover { ControllerState armControllerState; sensor_msgs::JointState armJointState; - std::vector zeros = {0,0,0,0,0,0}; + std::vector zeros = {0, 0, 0, 0, 0, 0}; armJointState.header.stamp = ros::Time::now(); for (auto& linkName: {"arm_a_link", "arm_b_link", "arm_c_link", "arm_d_link", "arm_e_link"}) { armControllerState.name.emplace_back(armMsgToUrdf.backward(linkName).value()); diff --git a/src/teleoperation/backend/consumers.py b/src/teleoperation/backend/consumers.py index 0cb13cdee..40918bf87 100644 --- a/src/teleoperation/backend/consumers.py +++ b/src/teleoperation/backend/consumers.py @@ -95,7 +95,9 @@ def connect(self): # Subscribers self.pdb_sub = rospy.Subscriber("/pdb_data", PDLB, self.pdb_callback) - self.arm_moteus_sub = rospy.Subscriber("/arm_controller_data", ControllerState, self.arm_controller_callback) + self.arm_moteus_sub = rospy.Subscriber( + "/arm_controller_data", ControllerState, self.arm_controller_callback + ) self.arm_joint_sub = rospy.Subscriber("/arm_joint_data", JointState, self.arm_joint_callback) self.drive_moteus_sub = rospy.Subscriber( "/drive_controller_data", ControllerState, self.drive_controller_callback @@ -250,36 +252,37 @@ def to_velocity(self, input: int, joint_name: str, brushless: bool = True) -> fl * (self.brushed_motors[joint_name]["max_velocity"] - self.brushed_motors[joint_name]["min_velocity"]) / 2 ) - + def publish_ik(self, axes): # left_trigger = self.filter_xbox_axis(axes[self.xbox_mappings["left_trigger"]]) # right_trigger = self.filter_xbox_axis(axes[self.xbox_mappings["right_trigger"]]) ee_in_map = SE3.from_tf_tree(self.tf_buffer, "base_link", "arm_e_link") - ee_in_map.position[0] += ( - self.ik_names["x"] * self.filter_xbox_axis(axes[self.xbox_mappings["left_js_x"]]), - ) - ee_in_map.position[1] += ( - self.ik_names["y"] * self.filter_xbox_axis(axes[self.xbox_mappings["left_js_y"]]), - ) - + ee_in_map.position[0] += (self.ik_names["x"] * self.filter_xbox_axis(axes[self.xbox_mappings["left_js_x"]]),) + ee_in_map.position[1] += (self.ik_names["y"] * self.filter_xbox_axis(axes[self.xbox_mappings["left_js_y"]]),) + # ee_in_map.position[2] += self.ik_names["z"] * (left_trigger - right_trigger) - arm_ik_cmd = IK(target=PoseStamped( + arm_ik_cmd = IK( + target=PoseStamped( header=Header(stamp=rospy.Time.now(), frame_id="base_link"), pose=Pose( position=Point(*ee_in_map.position), orientation=Quaternion(*ee_in_map.rotation.quaternion), - ) + ), + ) + ) + self.send( + text_data=json.dumps( + { + "type": "ik", + "target": { + "position": ee_in_map.position.tolist(), + "quaternion": ee_in_map.rotation.quaternion.tolist(), + }, + } ) ) - self.send(text_data=json.dumps({ - "type": "ik", - "target": { - "position": ee_in_map.position.tolist(), - "quaternion": ee_in_map.rotation.quaternion.tolist() - } - })) self.arm_ik_pub.publish(arm_ik_cmd) def publish_position(self, type, names, positions): @@ -301,21 +304,14 @@ def publish_velocity(self, type, names, axes, buttons): velocity_cmd.names = names if type == "cache_values": velocity_cmd.velocities = [ - self.sa_config["cache"]["multiplier"] - * self.filter_xbox_button(buttons, "right_bumper", "left_bumper") + self.sa_config["cache"]["multiplier"] * self.filter_xbox_button(buttons, "right_bumper", "left_bumper") ] self.cache_velocity_cmd_pub.publish(velocity_cmd) elif type == "sa_arm_values": velocity_cmd.velocities = [ - self.to_velocity( - self.filter_xbox_axis(axes[self.sa_config["sa_x"]["xbox_index"]]), "sa_x", False - ), - self.to_velocity( - self.filter_xbox_axis(axes[self.sa_config["sa_y"]["xbox_index"]]), "sa_y", False - ), - self.to_velocity( - self.filter_xbox_axis(axes[self.sa_config["sa_z"]["xbox_index"]]), "sa_z", True - ), + self.to_velocity(self.filter_xbox_axis(axes[self.sa_config["sa_x"]["xbox_index"]]), "sa_x", False), + self.to_velocity(self.filter_xbox_axis(axes[self.sa_config["sa_y"]["xbox_index"]]), "sa_y", False), + self.to_velocity(self.filter_xbox_axis(axes[self.sa_config["sa_z"]["xbox_index"]]), "sa_z", True), self.sa_config["sampler"]["multiplier"] * (right_trigger - left_trigger), self.sa_config["sensor_actuator"]["multiplier"] * self.filter_xbox_button(buttons, "right_bumper", "left_bumper"), @@ -323,15 +319,11 @@ def publish_velocity(self, type, names, axes, buttons): self.sa_velocity_cmd_pub.publish(velocity_cmd) elif type == "arm_values": velocity_cmd.velocities = [ - self.to_velocity( - self.filter_xbox_axis(axes[self.ra_config["joint_a"]["xbox_index"]]), "joint_a" - ), + self.to_velocity(self.filter_xbox_axis(axes[self.ra_config["joint_a"]["xbox_index"]]), "joint_a"), self.to_velocity( self.filter_xbox_axis(axes[self.ra_config["joint_b"]["xbox_index"]]), "joint_b", False ), - self.to_velocity( - self.filter_xbox_axis(axes[self.ra_config["joint_c"]["xbox_index"]]), "joint_c" - ), + self.to_velocity(self.filter_xbox_axis(axes[self.ra_config["joint_c"]["xbox_index"]]), "joint_c"), self.to_velocity( self.filter_xbox_axis(axes[self.ra_config["joint_de_pitch"]["xbox_index"]]), "joint_de_0" ), @@ -350,8 +342,7 @@ def publish_throttle(self, type, names, axes, buttons): throttle_cmd.names = names if type == "cache_values": throttle_cmd.throttles = [ - self.sa_config["cache"]["multiplier"] - * self.filter_xbox_button(buttons, "right_bumper", "left_bumper") + self.sa_config["cache"]["multiplier"] * self.filter_xbox_button(buttons, "right_bumper", "left_bumper") ] self.cache_throttle_cmd_pub.publish(throttle_cmd) elif type == "arm_values": @@ -381,7 +372,7 @@ def publish_throttle(self, type, names, axes, buttons): * self.filter_xbox_button(buttons, "right_bumper", "left_bumper"), ] self.sa_throttle_cmd_pub.publish(throttle_cmd) - + def handle_controls_message(self, msg): names = ["joint_b", "joint_c", "joint_de_pitch", "joint_de_roll"] if msg["type"] == "sa_arm_values": @@ -798,12 +789,9 @@ def flight_attitude_listener(self): self.send(text_data=json.dumps({"type": "flight_attitude", "pitch": pitch, "roll": roll})) rate.sleep() - + def arm_joint_callback(self, msg): - self.send(text_data=json.dumps({ - "type": "fk", - "positions": msg.position - })) + self.send(text_data=json.dumps({"type": "fk", "positions": msg.position})) def science_spectral_callback(self, msg): self.send( From 049d49b8a78584c1229a553224cf63d9ee27130f Mon Sep 17 00:00:00 2001 From: qhdwight Date: Wed, 17 Apr 2024 14:35:23 -0400 Subject: [PATCH 087/126] Update --- src/simulator/simulator.render.cpp | 13 ++++ .../arm_controller/arm_controller.cpp | 20 +++--- src/teleoperation/arm_controller/pch.hpp | 1 + src/teleoperation/backend/consumers.py | 69 +++++++++---------- 4 files changed, 57 insertions(+), 46 deletions(-) diff --git a/src/simulator/simulator.render.cpp b/src/simulator/simulator.render.cpp index ce2bf2247..9f725cc11 100644 --- a/src/simulator/simulator.render.cpp +++ b/src/simulator/simulator.render.cpp @@ -208,6 +208,19 @@ namespace mrover { mWindow = GlfwPointer{w, h, WINDOW_NAME, nullptr, nullptr}; NODELET_INFO_STREAM(std::format("Created window of size: {}x{}", w, h)); + if (cv::Mat logo = imread(std::filesystem::path{std::source_location::current().file_name()}.parent_path() / "mrover_logo.png", cv::IMREAD_UNCHANGED); + logo.type() == CV_8UC4) { + cvtColor(logo, logo, cv::COLOR_BGRA2RGBA); + GLFWimage logoImage{ + .width = logo.cols, + .height = logo.rows, + .pixels = logo.data, + }; + glfwSetWindowIcon(mWindow.get(), 1, &logoImage); + } else { + ROS_WARN_STREAM("Failed to load logo image"); + } + if (glfwRawMouseMotionSupported()) glfwSetInputMode(mWindow.get(), GLFW_RAW_MOUSE_MOTION, GLFW_TRUE); glfwSetWindowUserPointer(mWindow.get(), this); diff --git a/src/teleoperation/arm_controller/arm_controller.cpp b/src/teleoperation/arm_controller/arm_controller.cpp index b0d7d23af..76414e499 100644 --- a/src/teleoperation/arm_controller/arm_controller.cpp +++ b/src/teleoperation/arm_controller/arm_controller.cpp @@ -16,19 +16,19 @@ namespace mrover { Position positions; positions.names = {"joint_a", "joint_b", "joint_c", "joint_de_pitch", "joint_de_roll"}; positions.positions.resize(positions.names.size()); - SE3d target_frame_to_arm_b_static; + SE3d targetFrameToArmBaseLink; try { - target_frame_to_arm_b_static = SE3Conversions::fromTfTree(mTfBuffer, ik_target.target.header.frame_id, "arm_base_link"); - } catch (...) { - ROS_WARN_THROTTLE(1, "Failed to resolve information about target frame"); + targetFrameToArmBaseLink = SE3Conversions::fromTfTree(mTfBuffer, ik_target.target.header.frame_id, "arm_base_link"); + } catch (tf2::TransformException const& exception) { + ROS_WARN_STREAM_THROTTLE(1, std::format("Failed to get transform from {} to arm_base_link: {}", ik_target.target.header.frame_id, exception.what())); return; } - Eigen::Vector4d target{ik_target.target.pose.position.x, ik_target.target.pose.position.y, ik_target.target.pose.position.z, 1}; - Eigen::Vector4d target_in_arm_b_static = target_frame_to_arm_b_static.transform() * target; - double x = target_in_arm_b_static.x() - END_EFFECTOR_LENGTH; // shift back by the length of the end effector - double z = target_in_arm_b_static.z(); - double y = target_in_arm_b_static.y(); - SE3Conversions::pushToTfTree(mTfBroadcaster, "arm_target", "arm_base_link", target_frame_to_arm_b_static); + SE3d endEffectorInTarget{{ik_target.target.pose.position.x, ik_target.target.pose.position.y, ik_target.target.pose.position.z}, SO3d::Identity()}; + SE3d endEffectorInArmBaseLink = targetFrameToArmBaseLink * endEffectorInTarget; + double x = endEffectorInArmBaseLink.translation().x() - END_EFFECTOR_LENGTH; // shift back by the length of the end effector + double y = endEffectorInArmBaseLink.translation().y(); + double z = endEffectorInArmBaseLink.translation().z(); + SE3Conversions::pushToTfTree(mTfBroadcaster, "arm_target", "arm_base_link", targetFrameToArmBaseLink); double gamma = 0; double x3 = x - LINK_DE * std::cos(gamma); diff --git a/src/teleoperation/arm_controller/pch.hpp b/src/teleoperation/arm_controller/pch.hpp index 2f68bad08..104f5832c 100644 --- a/src/teleoperation/arm_controller/pch.hpp +++ b/src/teleoperation/arm_controller/pch.hpp @@ -2,6 +2,7 @@ #include #include +#include #include diff --git a/src/teleoperation/backend/consumers.py b/src/teleoperation/backend/consumers.py index 40918bf87..f5d3cfa34 100644 --- a/src/teleoperation/backend/consumers.py +++ b/src/teleoperation/backend/consumers.py @@ -1,17 +1,17 @@ import csv -from datetime import datetime import json import os -import pytz -from math import copysign -from math import pi -from tf.transformations import euler_from_quaternion import threading +from datetime import datetime +from math import copysign, pi +import pytz from channels.generic.websocket import JsonWebsocketConsumer + import rospy import tf2_ros -import cv2 +from backend.models import AutonWaypoint, BasicWaypoint +from geometry_msgs.msg import Twist, Pose, PoseStamped, Point, Quaternion # from cv_bridge import CvBridge from mrover.msg import ( @@ -33,16 +33,12 @@ HeaterData, ) from mrover.srv import EnableAuton, AdjustMotor, ChangeCameras, CapturePanorama -from sensor_msgs.msg import JointState, NavSatFix, Temperature, RelativeHumidity, Image +from sensor_msgs.msg import JointState, NavSatFix, Temperature, RelativeHumidity from std_msgs.msg import String, Header from std_srvs.srv import SetBool, Trigger -from geometry_msgs.msg import Twist, Pose, PoseStamped, Point, Quaternion - +from tf.transformations import euler_from_quaternion from util.SE3 import SE3 -from backend.models import AutonWaypoint, BasicWaypoint - - DEFAULT_ARM_DEADZONE = 0.15 @@ -215,16 +211,18 @@ def receive(self, text_data): except Exception as e: rospy.logerr(e) + @staticmethod def filter_xbox_axis( - self, - value: int, + value: float, deadzone_threshold: float = DEFAULT_ARM_DEADZONE, quad_control: bool = False, ) -> float: - deadzoned_val = deadzone(value, deadzone_threshold) - return quadratic(deadzoned_val) if quad_control else deadzoned_val + value = deadzone(value, deadzone_threshold) + if quad_control: + value = quadratic(value) + return value - def filter_xbox_button(self, button_array: "List[int]", pos_button: str, neg_button: str) -> float: + def filter_xbox_button(self, button_array: list[float], pos_button: str, neg_button: str) -> float: """ Applies various filtering functions to an axis for controlling the arm :return: Return -1, 0, or 1 depending on what buttons are being pressed @@ -253,25 +251,13 @@ def to_velocity(self, input: int, joint_name: str, brushless: bool = True) -> fl / 2 ) - def publish_ik(self, axes): - # left_trigger = self.filter_xbox_axis(axes[self.xbox_mappings["left_trigger"]]) - # right_trigger = self.filter_xbox_axis(axes[self.xbox_mappings["right_trigger"]]) - ee_in_map = SE3.from_tf_tree(self.tf_buffer, "base_link", "arm_e_link") + def publish_ik(self, axes: list[float], buttons: list[float]) -> None: + ee_in_map = SE3.from_tf_tree(self.tf_buffer, "base_link", "arm_d_link") - ee_in_map.position[0] += (self.ik_names["x"] * self.filter_xbox_axis(axes[self.xbox_mappings["left_js_x"]]),) - ee_in_map.position[1] += (self.ik_names["y"] * self.filter_xbox_axis(axes[self.xbox_mappings["left_js_y"]]),) + ee_in_map.position[0] += self.ik_names["x"] * self.filter_xbox_axis(axes[self.xbox_mappings["left_js_x"]]) + ee_in_map.position[1] += self.ik_names["y"] * self.filter_xbox_axis(axes[self.xbox_mappings["left_js_y"]]) + ee_in_map.position[2] += self.ik_names["z"] * self.filter_xbox_button(buttons, "right_trigger", "left_trigger") - # ee_in_map.position[2] += self.ik_names["z"] * (left_trigger - right_trigger) - - arm_ik_cmd = IK( - target=PoseStamped( - header=Header(stamp=rospy.Time.now(), frame_id="base_link"), - pose=Pose( - position=Point(*ee_in_map.position), - orientation=Quaternion(*ee_in_map.rotation.quaternion), - ), - ) - ) self.send( text_data=json.dumps( { @@ -283,7 +269,18 @@ def publish_ik(self, axes): } ) ) - self.arm_ik_pub.publish(arm_ik_cmd) + + self.arm_ik_pub.publish( + IK( + target=PoseStamped( + header=Header(stamp=rospy.Time.now(), frame_id="base_link"), + pose=Pose( + position=Point(*ee_in_map.position), + orientation=Quaternion(*ee_in_map.rotation.quaternion), + ), + ) + ) + ) def publish_position(self, type, names, positions): position_cmd = Position( @@ -381,7 +378,7 @@ def handle_controls_message(self, msg): names = ["cache"] if msg["arm_mode"] == "ik": - self.publish_ik(axes=msg["axes"]) + self.publish_ik(msg["axes"], msg["buttons"]) elif msg["arm_mode"] == "position": self.publish_position(type=msg["type"], names=names, positions=msg["positions"]) From b26ea44aa64031b1eb903d23413370531a9008eb Mon Sep 17 00:00:00 2001 From: qhdwight Date: Wed, 17 Apr 2024 21:06:33 -0400 Subject: [PATCH 088/126] Fix arm base link --- src/teleoperation/arm_controller/arm_controller.cpp | 2 +- urdf/rover/rover.urdf.xacro | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/teleoperation/arm_controller/arm_controller.cpp b/src/teleoperation/arm_controller/arm_controller.cpp index 76414e499..dbbe35771 100644 --- a/src/teleoperation/arm_controller/arm_controller.cpp +++ b/src/teleoperation/arm_controller/arm_controller.cpp @@ -28,7 +28,7 @@ namespace mrover { double x = endEffectorInArmBaseLink.translation().x() - END_EFFECTOR_LENGTH; // shift back by the length of the end effector double y = endEffectorInArmBaseLink.translation().y(); double z = endEffectorInArmBaseLink.translation().z(); - SE3Conversions::pushToTfTree(mTfBroadcaster, "arm_target", "arm_base_link", targetFrameToArmBaseLink); + SE3Conversions::pushToTfTree(mTfBroadcaster, "arm_target", "arm_base_link", endEffectorInArmBaseLink); double gamma = 0; double x3 = x - LINK_DE * std::cos(gamma); diff --git a/urdf/rover/rover.urdf.xacro b/urdf/rover/rover.urdf.xacro index 3aedbbc29..f79b800bb 100644 --- a/urdf/rover/rover.urdf.xacro +++ b/urdf/rover/rover.urdf.xacro @@ -204,7 +204,7 @@ - + From 33d430cfc96116d4c9cf6e16aba1b4082b74958f Mon Sep 17 00:00:00 2001 From: qhdwight Date: Thu, 18 Apr 2024 15:37:53 -0400 Subject: [PATCH 089/126] Import --- src/teleoperation/backend/consumers.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/teleoperation/backend/consumers.py b/src/teleoperation/backend/consumers.py index 187e87794..d0e1747d3 100644 --- a/src/teleoperation/backend/consumers.py +++ b/src/teleoperation/backend/consumers.py @@ -12,6 +12,7 @@ import tf2_ros from backend.models import AutonWaypoint, BasicWaypoint from geometry_msgs.msg import Twist, Pose, Point, Quaternion, Vector3, PoseStamped + # from cv_bridge import CvBridge from mrover.msg import ( PDLB, @@ -32,7 +33,7 @@ HeaterData, ) from mrover.srv import EnableAuton, AdjustMotor, ChangeCameras, CapturePanorama -from sensor_msgs.msg import NavSatFix, Temperature, RelativeHumidity +from sensor_msgs.msg import NavSatFix, Temperature, RelativeHumidity, JointState from std_msgs.msg import String, Header from std_srvs.srv import SetBool, Trigger from tf.transformations import euler_from_quaternion From 24600dde93d29888cc72d53e1e72a42bc2a506c0 Mon Sep 17 00:00:00 2001 From: nehakankanala Date: Thu, 18 Apr 2024 19:13:47 -0400 Subject: [PATCH 090/126] set default to be using only one gps --- config/localization.yaml | 2 +- launch/localization.launch | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/config/localization.yaml b/config/localization.yaml index 55dc1b47a..698a69056 100644 --- a/config/localization.yaml +++ b/config/localization.yaml @@ -8,4 +8,4 @@ gps_linearization: reference_point_longitude: -83.7096706 reference_point_altitude: 234.1 reference_heading: 90.0 - use_both_gps: true + use_both_gps: false diff --git a/launch/localization.launch b/launch/localization.launch index 33acfa8ff..45803a5a9 100644 --- a/launch/localization.launch +++ b/launch/localization.launch @@ -2,7 +2,7 @@ - + From a2b143644c61e9dc8eddaf20eed99eed41e7538e Mon Sep 17 00:00:00 2001 From: umroverPerception Date: Sat, 20 Apr 2024 15:14:11 -0400 Subject: [PATCH 091/126] Fix arducam, build on CUDA 11.4 and G++9, add udev rules to ansible --- .../jetson_services/files/99-usb-serial.rules | 2 -- .../rover-base-station-gstreamer-0.service | 9 --------- .../rover-base-station-gstreamer-0.timer | 5 ----- .../rover-base-station-gstreamer-1.service | 9 --------- .../rover-base-station-gstreamer-1.timer | 5 ----- .../rover-base-station-gstreamer-2.service | 9 --------- .../rover-base-station-gstreamer-2.timer | 5 ----- .../rover-base-station-gstreamer-3.service | 9 --------- .../rover-base-station-gstreamer-3.timer | 5 ----- .../files/rules/99-usb-serial.rules | 3 +++ ansible/roles/jetson_services/tasks/main.yml | 14 +++++++++++++ config/esw.yaml | 20 +++++++++---------- config/localization.yaml | 2 +- launch/esw_base.launch | 8 ++++---- launch/localization.launch | 2 +- package.xml | 1 + src/perception/usb_camera/usb_camera.cpp | 6 ++++-- src/preload/bits/allocator.h | 8 ++++++++ 18 files changed, 45 insertions(+), 77 deletions(-) delete mode 100644 ansible/roles/jetson_services/files/99-usb-serial.rules delete mode 100644 ansible/roles/jetson_services/files/rover-base-station-gstreamer-0.service delete mode 100644 ansible/roles/jetson_services/files/rover-base-station-gstreamer-0.timer delete mode 100644 ansible/roles/jetson_services/files/rover-base-station-gstreamer-1.service delete mode 100644 ansible/roles/jetson_services/files/rover-base-station-gstreamer-1.timer delete mode 100644 ansible/roles/jetson_services/files/rover-base-station-gstreamer-2.service delete mode 100644 ansible/roles/jetson_services/files/rover-base-station-gstreamer-2.timer delete mode 100644 ansible/roles/jetson_services/files/rover-base-station-gstreamer-3.service delete mode 100644 ansible/roles/jetson_services/files/rover-base-station-gstreamer-3.timer create mode 100644 ansible/roles/jetson_services/files/rules/99-usb-serial.rules diff --git a/ansible/roles/jetson_services/files/99-usb-serial.rules b/ansible/roles/jetson_services/files/99-usb-serial.rules deleted file mode 100644 index dfdc52298..000000000 --- a/ansible/roles/jetson_services/files/99-usb-serial.rules +++ /dev/null @@ -1,2 +0,0 @@ -SUBSYSTEM=="tty", ATTRS{idVendor}=="0483", ATTRS{idProduct}=="374b", SYMLINK+="raman" -SUBSYSTEM=="tty", ATTRS{idVendor}=="2341", ATTRS{idProduct}=="0043", SYMLINK+="auton_led" diff --git a/ansible/roles/jetson_services/files/rover-base-station-gstreamer-0.service b/ansible/roles/jetson_services/files/rover-base-station-gstreamer-0.service deleted file mode 100644 index 7480d3f85..000000000 --- a/ansible/roles/jetson_services/files/rover-base-station-gstreamer-0.service +++ /dev/null @@ -1,9 +0,0 @@ -[Unit] -Description=Base Station Gstreamer Service 0 - -[Service] -Environment="DISPLAY=:1" -Environment="XAUTHORITY=/home/mrover/.Xauthority" -User=mrover -ExecStart=/usr/bin/gst-launch-1.0 udpsrc port=5000 ! "application/x-rtp, encoding-name=(string)H264, payload=96" ! rtph264depay ! decodebin ! videoconvert ! autovideosink -Restart=on-failure \ No newline at end of file diff --git a/ansible/roles/jetson_services/files/rover-base-station-gstreamer-0.timer b/ansible/roles/jetson_services/files/rover-base-station-gstreamer-0.timer deleted file mode 100644 index 402352bf1..000000000 --- a/ansible/roles/jetson_services/files/rover-base-station-gstreamer-0.timer +++ /dev/null @@ -1,5 +0,0 @@ -[Timer] -OnBootSec=20s - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/ansible/roles/jetson_services/files/rover-base-station-gstreamer-1.service b/ansible/roles/jetson_services/files/rover-base-station-gstreamer-1.service deleted file mode 100644 index 1032633da..000000000 --- a/ansible/roles/jetson_services/files/rover-base-station-gstreamer-1.service +++ /dev/null @@ -1,9 +0,0 @@ -[Unit] -Description=Base Station Gstreamer Service 1 - -[Service] -Environment="DISPLAY=:1" -Environment="XAUTHORITY=/home/mrover/.Xauthority" -User=mrover -ExecStart=/usr/bin/gst-launch-1.0 udpsrc port=5001 ! "application/x-rtp, encoding-name=(string)H264, payload=96" ! rtph264depay ! decodebin ! videoconvert ! autovideosink -Restart=on-failure \ No newline at end of file diff --git a/ansible/roles/jetson_services/files/rover-base-station-gstreamer-1.timer b/ansible/roles/jetson_services/files/rover-base-station-gstreamer-1.timer deleted file mode 100644 index 402352bf1..000000000 --- a/ansible/roles/jetson_services/files/rover-base-station-gstreamer-1.timer +++ /dev/null @@ -1,5 +0,0 @@ -[Timer] -OnBootSec=20s - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/ansible/roles/jetson_services/files/rover-base-station-gstreamer-2.service b/ansible/roles/jetson_services/files/rover-base-station-gstreamer-2.service deleted file mode 100644 index deb6543dc..000000000 --- a/ansible/roles/jetson_services/files/rover-base-station-gstreamer-2.service +++ /dev/null @@ -1,9 +0,0 @@ -[Unit] -Description=Base Station Gstreamer Service 2 - -[Service] -Environment="DISPLAY=:1" -Environment="XAUTHORITY=/home/mrover/.Xauthority" -User=mrover -ExecStart=/usr/bin/gst-launch-1.0 udpsrc port=5002 ! "application/x-rtp, encoding-name=(string)H264, payload=96" ! rtph264depay ! decodebin ! videoconvert ! autovideosink -Restart=on-failure \ No newline at end of file diff --git a/ansible/roles/jetson_services/files/rover-base-station-gstreamer-2.timer b/ansible/roles/jetson_services/files/rover-base-station-gstreamer-2.timer deleted file mode 100644 index 402352bf1..000000000 --- a/ansible/roles/jetson_services/files/rover-base-station-gstreamer-2.timer +++ /dev/null @@ -1,5 +0,0 @@ -[Timer] -OnBootSec=20s - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/ansible/roles/jetson_services/files/rover-base-station-gstreamer-3.service b/ansible/roles/jetson_services/files/rover-base-station-gstreamer-3.service deleted file mode 100644 index 06be0fc58..000000000 --- a/ansible/roles/jetson_services/files/rover-base-station-gstreamer-3.service +++ /dev/null @@ -1,9 +0,0 @@ -[Unit] -Description=Base Station Gstreamer Service 3 - -[Service] -Environment="DISPLAY=:1" -Environment="XAUTHORITY=/home/mrover/.Xauthority" -User=mrover -ExecStart=/usr/bin/gst-launch-1.0 udpsrc port=5003 ! "application/x-rtp, encoding-name=(string)H264, payload=96" ! rtph264depay ! decodebin ! videoconvert ! autovideosink -Restart=on-failure \ No newline at end of file diff --git a/ansible/roles/jetson_services/files/rover-base-station-gstreamer-3.timer b/ansible/roles/jetson_services/files/rover-base-station-gstreamer-3.timer deleted file mode 100644 index 402352bf1..000000000 --- a/ansible/roles/jetson_services/files/rover-base-station-gstreamer-3.timer +++ /dev/null @@ -1,5 +0,0 @@ -[Timer] -OnBootSec=20s - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/ansible/roles/jetson_services/files/rules/99-usb-serial.rules b/ansible/roles/jetson_services/files/rules/99-usb-serial.rules new file mode 100644 index 000000000..99fcb6258 --- /dev/null +++ b/ansible/roles/jetson_services/files/rules/99-usb-serial.rules @@ -0,0 +1,3 @@ +SUBSYSTEM=="tty", ATTRS{idVendor}=="1546", ATTRS{idProduct}=="01a9", SYMLINK+="gps_%n" +SUBSYSTEM=="tty", ATTRS{idVendor}=="2341", ATTRS{idProduct}=="0058", SYMLINK+="imu" +SUBSYSTEM=="video4linux", KERNEL=="video*", ATTRS{idVendor}=="1bcf", ATTRS{idProduct}=="0b12", ATTR{index}=="0", GROUP="video", SYMLINK+="arducam" diff --git a/ansible/roles/jetson_services/tasks/main.yml b/ansible/roles/jetson_services/tasks/main.yml index e69de29bb..74c048ea2 100644 --- a/ansible/roles/jetson_services/tasks/main.yml +++ b/ansible/roles/jetson_services/tasks/main.yml @@ -0,0 +1,14 @@ +- name: USB Rules + become: true + synchronize: + src: files/rules/ + dest: /etc/udev/rules.d + recursive: true + +- name: udev Reload Rules + become: true + command: udevadm control --reload-rules + +- name: udev TTY Trigger + become: true + command: udevadm trigger --subsystem-match=tty diff --git a/config/esw.yaml b/config/esw.yaml index 4e38291be..fe5806ccf 100644 --- a/config/esw.yaml +++ b/config/esw.yaml @@ -4,7 +4,7 @@ right_gps_driver: frame_id: "base_link" left_gps_driver: - port: "/dev/gps" + port: "/dev/gps_0" baud: 38400 frame_id: "base_link" @@ -129,11 +129,10 @@ brushless_motors: min_velocity: -70.0 max_velocity: 70.0 joint_a: - velocity_multiplier: 1.0 - min_velocity: -100.0 - max_velocity: 100.0 - is_linear: true - rad_to_meters_ratio: 1236.8475 # 5.0 = 5 motor radians -> 1 meter + # 1236.8475 rad/meter, or 196.850393476 rev/meter, and we want a max vel of 40 meter/s. + # gear ratio is currently at 0.005080 revolutions = 1 meter + min_velocity: -0.05 # this means -0.10 meters per second. + max_velocity: 0.05 limit_0_present: true limit_1_present: true limit_0_enabled: true @@ -151,11 +150,10 @@ brushless_motors: max_forward_pos: 0.4 max_backward_pos: 0.0 joint_c: - velocity_multiplier: -1.0 - min_velocity: -0.6 # gear ratio: 484:1 - max_velocity: 0.6 - min_position: 0.0 - max_position: 3.8 # 220 degrees of motion + min_velocity: -0.04 # in terms of output + max_velocity: 0.04 # max output shaft speed: 5 rpm (for now) + min_position: -0.125 + max_position: 0.30 # 220 degrees of motion is the entire range joint_de_0: velocity_multiplier: 1.0 min_velocity: -67.0 diff --git a/config/localization.yaml b/config/localization.yaml index 55dc1b47a..698a69056 100644 --- a/config/localization.yaml +++ b/config/localization.yaml @@ -8,4 +8,4 @@ gps_linearization: reference_point_longitude: -83.7096706 reference_point_altitude: 234.1 reference_heading: 90.0 - use_both_gps: true + use_both_gps: false diff --git a/launch/esw_base.launch b/launch/esw_base.launch index 8e97e2ece..5eb8b0660 100644 --- a/launch/esw_base.launch +++ b/launch/esw_base.launch @@ -2,22 +2,22 @@ - - + --> - diff --git a/launch/localization.launch b/launch/localization.launch index 33acfa8ff..45803a5a9 100644 --- a/launch/localization.launch +++ b/launch/localization.launch @@ -2,7 +2,7 @@ - + diff --git a/package.xml b/package.xml index 5a42eb6a6..9ab172f38 100644 --- a/package.xml +++ b/package.xml @@ -64,6 +64,7 @@ rviz_imu_plugin robot_localization + rtcm_msgs diff --git a/src/perception/usb_camera/usb_camera.cpp b/src/perception/usb_camera/usb_camera.cpp index e83d6cac2..02bb42c4a 100644 --- a/src/perception/usb_camera/usb_camera.cpp +++ b/src/perception/usb_camera/usb_camera.cpp @@ -42,7 +42,7 @@ namespace mrover { mCamInfoPub = mNh.advertise(cameraInfoTopicName, 1); std::string captureFormat = std::format("video/x-raw,format=YUY2,width={},height={},framerate={}/1", width, height, framerate); - std::string convertFormat = "video/x-raw,format=BGRA"; + std::string convertFormat = "video/x-raw,format=I420"; std::string gstString = std::format("v4l2src device={} ! {} ! videoconvert ! {} ! appsink", device, captureFormat, convertFormat); NODELET_INFO_STREAM(std::format("GStreamer string: {}", gstString)); cv::VideoCapture capture{gstString, cv::CAP_GSTREAMER}; @@ -57,7 +57,9 @@ namespace mrover { if (mImgPub.getNumSubscribers()) { auto imageMessage = boost::make_shared(); - fillImageMessage(frame, imageMessage); + cv::Mat bgra; + cv::cvtColor(frame, bgra, cv::COLOR_YUV2BGRA_I420); + fillImageMessage(bgra, imageMessage); imageMessage->header.frame_id = "long_range_cam_frame"; imageMessage->header.stamp = ros::Time::now(); mImgPub.publish(imageMessage); diff --git a/src/preload/bits/allocator.h b/src/preload/bits/allocator.h index a58775c45..815b7f850 100644 --- a/src/preload/bits/allocator.h +++ b/src/preload/bits/allocator.h @@ -8,6 +8,8 @@ #if defined(__linux__) && defined(__GNUC__) +#if (__GNUC__ == 13) + // Allocators -*- C++ -*- // Copyright (C) 2001-2023 Free Software Foundation, Inc. @@ -285,4 +287,10 @@ namespace std _GLIBCXX_VISIBILITY(default) { #endif +#else + +#include_next + +#endif + #endif From 6e357546c282f7a9935db57894e7eb01da40eaa9 Mon Sep 17 00:00:00 2001 From: qhdwight Date: Sat, 20 Apr 2024 15:30:55 -0400 Subject: [PATCH 092/126] Avoid uneeded copy in usb camera, only replcae bits allocator if on gcc 13 --- src/esw/drive_bridge/main.cpp | 3 ++- src/perception/usb_camera/usb_camera.cpp | 7 ++++--- src/preload/bits/allocator.h | 6 +++++- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/src/esw/drive_bridge/main.cpp b/src/esw/drive_bridge/main.cpp index 9e836e462..afa2092ec 100644 --- a/src/esw/drive_bridge/main.cpp +++ b/src/esw/drive_bridge/main.cpp @@ -43,7 +43,8 @@ auto main(int argc, char** argv) -> int { } void moveDrive(geometry_msgs::Twist::ConstPtr const& msg) { - // See 11.5.1 in "Controls Engineering in the FIRST Robotics Competition" for the math + // See 13.3.1.4 in "Modern Robotics" for the math + // Link: https://hades.mech.northwestern.edu/images/7/7f/MR.pdf auto forward = MetersPerSecond{msg->linear.x}; auto turn = RadiansPerSecond{msg->angular.z}; // TODO(quintin) Don't ask me to explain perfectly why we need to cancel out a meters unit in the numerator diff --git a/src/perception/usb_camera/usb_camera.cpp b/src/perception/usb_camera/usb_camera.cpp index e83d6cac2..2452b638c 100644 --- a/src/perception/usb_camera/usb_camera.cpp +++ b/src/perception/usb_camera/usb_camera.cpp @@ -42,8 +42,7 @@ namespace mrover { mCamInfoPub = mNh.advertise(cameraInfoTopicName, 1); std::string captureFormat = std::format("video/x-raw,format=YUY2,width={},height={},framerate={}/1", width, height, framerate); - std::string convertFormat = "video/x-raw,format=BGRA"; - std::string gstString = std::format("v4l2src device={} ! {} ! videoconvert ! {} ! appsink", device, captureFormat, convertFormat); + std::string gstString = std::format("v4l2src device={} ! {} ! appsink", device, captureFormat); NODELET_INFO_STREAM(std::format("GStreamer string: {}", gstString)); cv::VideoCapture capture{gstString, cv::CAP_GSTREAMER}; if (!capture.isOpened()) throw std::runtime_error{"USB camera failed to open"}; @@ -57,7 +56,9 @@ namespace mrover { if (mImgPub.getNumSubscribers()) { auto imageMessage = boost::make_shared(); - fillImageMessage(frame, imageMessage); + cv::Mat bgra; + cvtColor(frame, bgra, cv::COLOR_YUV2BGRA_YUY2); + fillImageMessage(bgra, imageMessage); imageMessage->header.frame_id = "long_range_cam_frame"; imageMessage->header.stamp = ros::Time::now(); mImgPub.publish(imageMessage); diff --git a/src/preload/bits/allocator.h b/src/preload/bits/allocator.h index a58775c45..5d5595bbd 100644 --- a/src/preload/bits/allocator.h +++ b/src/preload/bits/allocator.h @@ -6,7 +6,7 @@ // C++ should have never broke this backwards compatibility! // How silly! -#if defined(__linux__) && defined(__GNUC__) +#if defined(__linux__) && defined(__GNUC__) && (__GNUC__ == 13) // Allocators -*- C++ -*- @@ -285,4 +285,8 @@ namespace std _GLIBCXX_VISIBILITY(default) { #endif +#else + +#include_next + #endif From 2fa3e85c511a628a332d7e586aaf1015460a932a Mon Sep 17 00:00:00 2001 From: qhdwight Date: Sat, 20 Apr 2024 15:32:49 -0400 Subject: [PATCH 093/126] Avoid uneeded copy in usb camera --- src/perception/usb_camera/usb_camera.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/perception/usb_camera/usb_camera.cpp b/src/perception/usb_camera/usb_camera.cpp index 02bb42c4a..2452b638c 100644 --- a/src/perception/usb_camera/usb_camera.cpp +++ b/src/perception/usb_camera/usb_camera.cpp @@ -42,8 +42,7 @@ namespace mrover { mCamInfoPub = mNh.advertise(cameraInfoTopicName, 1); std::string captureFormat = std::format("video/x-raw,format=YUY2,width={},height={},framerate={}/1", width, height, framerate); - std::string convertFormat = "video/x-raw,format=I420"; - std::string gstString = std::format("v4l2src device={} ! {} ! videoconvert ! {} ! appsink", device, captureFormat, convertFormat); + std::string gstString = std::format("v4l2src device={} ! {} ! appsink", device, captureFormat); NODELET_INFO_STREAM(std::format("GStreamer string: {}", gstString)); cv::VideoCapture capture{gstString, cv::CAP_GSTREAMER}; if (!capture.isOpened()) throw std::runtime_error{"USB camera failed to open"}; @@ -58,7 +57,7 @@ namespace mrover { if (mImgPub.getNumSubscribers()) { auto imageMessage = boost::make_shared(); cv::Mat bgra; - cv::cvtColor(frame, bgra, cv::COLOR_YUV2BGRA_I420); + cvtColor(frame, bgra, cv::COLOR_YUV2BGRA_YUY2); fillImageMessage(bgra, imageMessage); imageMessage->header.frame_id = "long_range_cam_frame"; imageMessage->header.stamp = ros::Time::now(); From c5174be310cabf943e712502039e5a354d543401 Mon Sep 17 00:00:00 2001 From: qhdwight Date: Sat, 20 Apr 2024 15:35:11 -0400 Subject: [PATCH 094/126] Lower speed --- config/esw.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/config/esw.yaml b/config/esw.yaml index f0a78c30f..3c877b92a 100644 --- a/config/esw.yaml +++ b/config/esw.yaml @@ -122,10 +122,10 @@ brushless_motors: max_velocity: 20.0 max_torque: 5.0 joint_a: - # 1236.8475 rad/meter, or 196.850393476 rev/meter, and we want a max vel of 40 meter/s. + # 1236.8475 rad/meter, or 196.850393476 rev/meter, and we want a max vel of 40 meter/s. # gear ratio is currently at 0.005080 revolutions = 1 meter - min_velocity: -0.10 # this means -0.10 meters per second. - max_velocity: 0.10 + min_velocity: -0.05 # this means -0.10 meters per second. + max_velocity: 0.05 limit_0_present: true limit_1_present: true limit_0_enabled: true @@ -144,8 +144,8 @@ brushless_motors: max_backward_pos: 0.0 max_torque: 20.0 joint_c: - min_velocity: -0.08 # in terms of output - max_velocity: 0.08 # max output shaft speed: 5 rpm (for now) + min_velocity: -0.04 # in terms of output + max_velocity: 0.04 # max output shaft speed: 5 rpm (for now) min_position: -0.125 max_position: 0.30 # 220 degrees of motion is the entire range max_torque: 20.0 @@ -595,4 +595,4 @@ auton_led_driver: baud: 115200 science: - shutoff_temp: 50.0f \ No newline at end of file + shutoff_temp: 50.0f From ac5fe3bde0992d2fddc64aa6383e6ce3c4cf79cb Mon Sep 17 00:00:00 2001 From: qhdwight Date: Sat, 20 Apr 2024 15:41:07 -0400 Subject: [PATCH 095/126] Fix catkin profile --- ansible/roles/build/files/profiles/ci/config.yaml | 4 ++-- ansible/roles/build/files/profiles/debug/config.yaml | 4 ++-- ansible/roles/build/files/profiles/release/config.yaml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ansible/roles/build/files/profiles/ci/config.yaml b/ansible/roles/build/files/profiles/ci/config.yaml index d14216f40..c0346527b 100644 --- a/ansible/roles/build/files/profiles/ci/config.yaml +++ b/ansible/roles/build/files/profiles/ci/config.yaml @@ -5,9 +5,9 @@ catkin_make_args: [ ] cmake_args: - -DCMAKE_BUILD_TYPE=Release - -DCMAKE_C_COMPILER=clang-16 - - -DCMAKE_CXX_FLAGS=-pipe + - -DCMAKE_CXX_FLAGS=-pipe -fgnuc-version=13.1.0 - -DCMAKE_CXX_COMPILER=clang++-16 - - -DCMAKE_CUDA_HOST_COMPILER=clang++-16 + - -DCMAKE_CUDA_HOST_COMPILER=g++-9 - -DMROVER_IS_CI=ON - -Wno-dev devel_layout: linked diff --git a/ansible/roles/build/files/profiles/debug/config.yaml b/ansible/roles/build/files/profiles/debug/config.yaml index 71005790a..434ae5ae9 100644 --- a/ansible/roles/build/files/profiles/debug/config.yaml +++ b/ansible/roles/build/files/profiles/debug/config.yaml @@ -5,11 +5,11 @@ catkin_make_args: [ ] cmake_args: - -DCMAKE_BUILD_TYPE=Debug - -DCMAKE_C_COMPILER=clang-16 - - -DCMAKE_CXX_FLAGS=-pipe + - -DCMAKE_CXX_FLAGS=-pipe -fgnuc-version=13.1.0 - -DCMAKE_CXX_COMPILER=clang++-16 - -DCMAKE_CXX_COMPILER_LAUNCHER=ccache - -DCMAKE_CUDA_COMPILER_LAUNCHER=ccache - - -DCMAKE_CUDA_HOST_COMPILER=clang++-16 + - -DCMAKE_CUDA_HOST_COMPILER=g++-9 - -Wno-dev devel_layout: linked devel_space: devel diff --git a/ansible/roles/build/files/profiles/release/config.yaml b/ansible/roles/build/files/profiles/release/config.yaml index f0c29e538..ef35c5cb7 100644 --- a/ansible/roles/build/files/profiles/release/config.yaml +++ b/ansible/roles/build/files/profiles/release/config.yaml @@ -5,11 +5,11 @@ catkin_make_args: [ ] cmake_args: - -DCMAKE_BUILD_TYPE=Release - -DCMAKE_C_COMPILER=clang-16 - - -DCMAKE_CXX_FLAGS=-march=native -pipe + - -DCMAKE_CXX_FLAGS=-march=native -pipe -fgnuc-version=13.1.0 - -DCMAKE_CXX_COMPILER=clang++-16 - -DCMAKE_CXX_COMPILER_LAUNCHER=ccache - -DCMAKE_CUDA_COMPILER_LAUNCHER=ccache - - -DCMAKE_CUDA_HOST_COMPILER=clang++-16 + - -DCMAKE_CUDA_HOST_COMPILER=g++-9 - -DCMAKE_INTERPROCEDURAL_OPTIMIZATION=ON - -Wno-dev devel_layout: linked From ddb39f451af9991afc61ea3c1cb84ef37b22a179 Mon Sep 17 00:00:00 2001 From: qhdwight Date: Sat, 20 Apr 2024 16:55:46 -0400 Subject: [PATCH 096/126] Add arm automation init --- CMakeLists.txt | 6 ++ action/ArmAction.action | 3 + config/teleop.yaml | 13 +++- setup.py | 1 + src/teleoperation/arm_automation/__init__.py | 0 .../arm_automation/arm_automation.py | 60 +++++++++++++++++++ src/teleoperation/backend/consumers.py | 29 ++++++--- 7 files changed, 102 insertions(+), 10 deletions(-) create mode 100644 action/ArmAction.action create mode 100644 src/teleoperation/arm_automation/__init__.py create mode 100755 src/teleoperation/arm_automation/arm_automation.py diff --git a/CMakeLists.txt b/CMakeLists.txt index 7c68974f1..b363cfdf1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -67,15 +67,19 @@ set(MROVER_ROS_DEPENDENCIES tf2 tf2_ros tf2_geometry_msgs + actionlib_msgs ) extract_filenames(msg/*.msg MROVER_MESSAGE_FILES) extract_filenames(srv/*.srv MROVER_SERVICE_FILES) +extract_filenames(action/*.action MROVER_ACTION_FILES) + set(MROVER_MESSAGE_DEPENDENCIES std_msgs sensor_msgs + actionlib_msgs ) set(MROVER_PARAMETERS @@ -123,6 +127,8 @@ add_message_files(FILES ${MROVER_MESSAGE_FILES}) add_service_files(FILES ${MROVER_SERVICE_FILES}) +add_action_files(DIRECTORY action FILES ${MROVER_ACTION_FILES}) + generate_messages(DEPENDENCIES ${MROVER_MESSAGE_DEPENDENCIES}) generate_dynamic_reconfigure_options(${MROVER_PARAMETERS}) diff --git a/action/ArmAction.action b/action/ArmAction.action new file mode 100644 index 000000000..9cc4f5321 --- /dev/null +++ b/action/ArmAction.action @@ -0,0 +1,3 @@ +string name +--- +--- diff --git a/config/teleop.yaml b/config/teleop.yaml index 5aafb02ee..03676355d 100644 --- a/config/teleop.yaml +++ b/config/teleop.yaml @@ -74,14 +74,23 @@ teleop: left_js_y: 1 right_js_x: 2 right_js_y: 3 - left_trigger: 6 - right_trigger: 7 a: 0 b: 1 x: 2 y: 3 left_bumper: 4 right_bumper: 5 + left_trigger: 6 + right_trigger: 7 + back: 8 + forward: 9 + left_stick_click: 10 + right_stick_click: 11 + dpad_up: 12 + dpad_down: 13 + dpad_left: 14 + dpad_right: 15 + home: 16 joystick_mappings: left_right: 0 diff --git a/setup.py b/setup.py index 48ba751d1..e5807a321 100644 --- a/setup.py +++ b/setup.py @@ -13,6 +13,7 @@ "navigation.failure_identification", "esw", "teleoperation.teleoperation", + "teleoperation.arm_controller", ], package_dir={"": "src"}, ) diff --git a/src/teleoperation/arm_automation/__init__.py b/src/teleoperation/arm_automation/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/teleoperation/arm_automation/arm_automation.py b/src/teleoperation/arm_automation/arm_automation.py new file mode 100755 index 000000000..0bd6afae6 --- /dev/null +++ b/src/teleoperation/arm_automation/arm_automation.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python3 + +import rospy + +import actionlib + +from mrover.msg import ArmActionAction, ArmActionGoal, ArmActionResult, Position + +from sensor_msgs.msg import JointState + + +def arm_automation() -> None: + rospy.init_node("arm_automation") + + server = None + joint_state = None + + pos_pub = rospy.Publisher("arm_position_cmd", Position, queue_size=1) + + def joint_state_callback(msg: JointState): + nonlocal joint_state + joint_state = msg + + rospy.Subscriber("joint_states", JointState, joint_state_callback) + + def execute_callback(goal: ArmActionGoal): + success = True + + rate = rospy.Rate(20) + while not rospy.is_shutdown(): + if server.is_preempt_requested(): + server.set_preempted() + success = False + break + + if goal.name == "de_home": + pos_pub.publish(Position(names=["joint_de_pitch", "joint_de_roll"], position=[0, 0])) + if joint_state is None: + success = False + break + + if abs(joint_state.position[0]) < 0.1 and abs(joint_state.position[1]) < 0.1: + break + + rate.sleep() + + if success: + server.set_succeeded(ArmActionResult()) + else: + server.set_aborted(ArmActionResult()) + + server = actionlib.SimpleActionServer("arm_action", ArmActionAction, execute_cb=execute_callback, auto_start=False) + server.start() + + +if __name__ == "__main__": + try: + arm_automation() + except rospy.ROSInterruptException: + pass diff --git a/src/teleoperation/backend/consumers.py b/src/teleoperation/backend/consumers.py index fef797f8c..fdc58227f 100644 --- a/src/teleoperation/backend/consumers.py +++ b/src/teleoperation/backend/consumers.py @@ -13,8 +13,12 @@ from backend.models import AutonWaypoint, BasicWaypoint from geometry_msgs.msg import Twist, Pose, Point, Quaternion, Vector3, PoseStamped +import actionlib + # from cv_bridge import CvBridge from mrover.msg import ( + ArmActionAction, + ArmActionGoal, PDLB, ControllerState, GPSWaypoint, @@ -381,17 +385,26 @@ def handle_controls_message(self, msg): elif msg["type"] == "cache_values": names = ["cache"] - if msg["arm_mode"] == "ik": - self.publish_ik(msg["axes"], msg["buttons"]) + if msg["buttons"][self.xbox_mappings["home"]] > 0.5: + client = actionlib.SimpleActionClient("arm_action", ArmActionAction) + client.wait_for_server() + + goal = ArmActionGoal(name="de_home") + client.send_goal(goal) + + client.wait_for_result() + else: + if msg["arm_mode"] == "ik": + self.publish_ik(msg["axes"], msg["buttons"]) - elif msg["arm_mode"] == "position": - self.publish_position(type=msg["type"], names=names, positions=msg["positions"]) + elif msg["arm_mode"] == "position": + self.publish_position(type=msg["type"], names=names, positions=msg["positions"]) - elif msg["arm_mode"] == "velocity": - self.publish_velocity(type=msg["type"], names=names, axes=msg["axes"], buttons=msg["buttons"]) + elif msg["arm_mode"] == "velocity": + self.publish_velocity(type=msg["type"], names=names, axes=msg["axes"], buttons=msg["buttons"]) - elif msg["arm_mode"] == "throttle": - self.publish_throttle(type=msg["type"], names=names, axes=msg["axes"], buttons=msg["buttons"]) + elif msg["arm_mode"] == "throttle": + self.publish_throttle(type=msg["type"], names=names, axes=msg["axes"], buttons=msg["buttons"]) def handle_joystick_message(self, msg): # Tiny deadzone so we can safely e-stop with dampen switch From 671f07156e44fbad1dc923bb7523a2e9947ee6d8 Mon Sep 17 00:00:00 2001 From: qhdwight Date: Sat, 20 Apr 2024 17:01:30 -0400 Subject: [PATCH 097/126] Temp fix --- CMakeLists.txt | 2 +- src/preload/bits/allocator.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b363cfdf1..930a5dbbc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -102,7 +102,7 @@ if (ZED_FOUND) set(CMAKE_CUDA_STANDARD 17) set(CMAKE_CUDA_STANDARD_REQUIRED ON) set(CMAKE_CUDA_SEPARABLE_COMPILATION ON) - set(CMAKE_CUDA_FLAGS "--diag-suppress 108,68") + set(CMAKE_CUDA_FLAGS "--diag-suppress 20012") # Jetson Xavier NX/AGX has Volta 7.2 architecture # Perception Laptop (A4000, Quadro version of RTX 3080) has Ampere 8.6 architecture set(CMAKE_CUDA_ARCHITECTURES 72 86) diff --git a/src/preload/bits/allocator.h b/src/preload/bits/allocator.h index 5d5595bbd..c33fb23b3 100644 --- a/src/preload/bits/allocator.h +++ b/src/preload/bits/allocator.h @@ -6,7 +6,7 @@ // C++ should have never broke this backwards compatibility! // How silly! -#if defined(__linux__) && defined(__GNUC__) && (__GNUC__ == 13) +#if defined(__linux__) && defined(__GLIBCXX__) && !defined(__CUDACC__) // Allocators -*- C++ -*- From 189b267db7c889df242d2ed786a894b030036008 Mon Sep 17 00:00:00 2001 From: qhdwight Date: Sat, 20 Apr 2024 17:29:43 -0400 Subject: [PATCH 098/126] Update --- .../arm_automation/arm_automation.py | 52 +++++++++++++------ 1 file changed, 36 insertions(+), 16 deletions(-) diff --git a/src/teleoperation/arm_automation/arm_automation.py b/src/teleoperation/arm_automation/arm_automation.py index 0bd6afae6..f64533273 100755 --- a/src/teleoperation/arm_automation/arm_automation.py +++ b/src/teleoperation/arm_automation/arm_automation.py @@ -8,6 +8,8 @@ from sensor_msgs.msg import JointState +import numpy as np + def arm_automation() -> None: rospy.init_node("arm_automation") @@ -21,33 +23,51 @@ def joint_state_callback(msg: JointState): nonlocal joint_state joint_state = msg - rospy.Subscriber("joint_states", JointState, joint_state_callback) + rospy.Subscriber("arm_joint_data", JointState, joint_state_callback) def execute_callback(goal: ArmActionGoal): - success = True - + if joint_state is None: + rospy.logerr("No joint state data available") + server.set_aborted(ArmActionResult()) + return + + if goal.name == "de_home": + target_names = ["joint_de_pitch", "joint_de_roll"] + target_positions = [ + joint_state.position[joint_state.name.index('joint_de_pitch')], + np.pi / 8 + ] + rospy.loginfo(f"Moving to {target_positions} for {target_names}") + else: + rospy.logerr("Invalid goal name") + server.set_aborted(ArmActionResult()) + return + rate = rospy.Rate(20) while not rospy.is_shutdown(): if server.is_preempt_requested(): server.set_preempted() - success = False - break + rospy.loginfo("Preempted") + server.set_aborted(ArmActionResult()) + return - if goal.name == "de_home": - pos_pub.publish(Position(names=["joint_de_pitch", "joint_de_roll"], position=[0, 0])) - if joint_state is None: - success = False - break - if abs(joint_state.position[0]) < 0.1 and abs(joint_state.position[1]) < 0.1: - break + pos_pub.publish(Position(names=target_names, positions=target_positions)) + + feedback = [ + joint_state.position[joint_state.name.index('joint_de_pitch')], + joint_state.position[joint_state.name.index('joint_de_roll')] + ] + + if np.allclose(target_positions, feedback, atol=0.1): + rospy.loginfo("Reached target") + break rate.sleep() - if success: - server.set_succeeded(ArmActionResult()) - else: - server.set_aborted(ArmActionResult()) + server.set_succeeded(ArmActionResult()) + + rospy.sleep(1) server = actionlib.SimpleActionServer("arm_action", ArmActionAction, execute_cb=execute_callback, auto_start=False) server.start() From e4e59a13ff725a9d3b987b80a569b00cbf162ea4 Mon Sep 17 00:00:00 2001 From: umroverPerception Date: Sat, 20 Apr 2024 18:55:37 -0400 Subject: [PATCH 099/126] Updates --- config/esw.yaml | 6 +++--- launch/esw_base.launch | 24 ++++++++++++------------ 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/config/esw.yaml b/config/esw.yaml index 3c877b92a..c68689546 100644 --- a/config/esw.yaml +++ b/config/esw.yaml @@ -144,11 +144,11 @@ brushless_motors: max_backward_pos: 0.0 max_torque: 20.0 joint_c: - min_velocity: -0.04 # in terms of output - max_velocity: 0.04 # max output shaft speed: 5 rpm (for now) + min_velocity: -0.03 # in terms of output + max_velocity: 0.03 # max output shaft speed: 5 rpm (for now) min_position: -0.125 max_position: 0.30 # 220 degrees of motion is the entire range - max_torque: 20.0 + max_torque: 200.0 joint_de_0: min_velocity: -5.0 max_velocity: 5.0 diff --git a/launch/esw_base.launch b/launch/esw_base.launch index e2ccf7d2d..e69ff7fa2 100644 --- a/launch/esw_base.launch +++ b/launch/esw_base.launch @@ -23,14 +23,6 @@ --> - - @@ -47,15 +39,23 @@ -
From 51c2504f68c2ec88a47d1a89524e54cc4f02d0b9 Mon Sep 17 00:00:00 2001 From: neuenfeldttj Date: Mon, 22 Apr 2024 12:06:07 -0400 Subject: [PATCH 106/126] added cameras tab, cleaned up code --- .../frontend/src/components/AutonTask.vue | 2 +- .../frontend/src/components/CameraFeed.vue | 23 ------------------ .../frontend/src/components/Cameras.vue | 24 ++----------------- .../frontend/src/components/DMESTask.vue | 2 +- .../frontend/src/components/ISHTask.vue | 2 +- .../frontend/src/components/Menu.vue | 3 +-- .../frontend/src/components/MenuButton.vue | 9 ++++++- .../frontend/src/components/SATask.vue | 2 +- .../frontend/src/router/index.ts | 11 +++++++++ 9 files changed, 26 insertions(+), 52 deletions(-) diff --git a/src/teleoperation/frontend/src/components/AutonTask.vue b/src/teleoperation/frontend/src/components/AutonTask.vue index 1cd400907..c56858767 100644 --- a/src/teleoperation/frontend/src/components/AutonTask.vue +++ b/src/teleoperation/frontend/src/components/AutonTask.vue @@ -47,7 +47,7 @@
- +
diff --git a/src/teleoperation/frontend/src/components/CameraFeed.vue b/src/teleoperation/frontend/src/components/CameraFeed.vue index efbce30af..2620bd880 100644 --- a/src/teleoperation/frontend/src/components/CameraFeed.vue +++ b/src/teleoperation/frontend/src/components/CameraFeed.vue @@ -3,19 +3,6 @@

{{ name }} • ID: {{ id }}

-
- - -
@@ -52,8 +39,6 @@ export default defineComponent({ // IK Mode IKCam: false, - - quality: 2 } }, @@ -82,14 +67,6 @@ export default defineComponent({ this.sendMessage({ type: 'start_click_ik', data: { x: event.offsetX, y: event.offsetY } }) } }, - changeQuality: function () { - this.sendMessage({ - type: 'sendCameras', - primary: this.mission === 'sa', - device: this.id, - resolution: parseInt(this.quality) - }) - }, toggleIKMode: function () { this.IKCam = !this.IKCam diff --git a/src/teleoperation/frontend/src/components/Cameras.vue b/src/teleoperation/frontend/src/components/Cameras.vue index 786f7b60b..55f8be2cd 100644 --- a/src/teleoperation/frontend/src/components/Cameras.vue +++ b/src/teleoperation/frontend/src/components/Cameras.vue @@ -1,7 +1,6 @@ @@ -62,10 +61,6 @@ export default { }, props: { - primary: { - type: Boolean, - required: true - }, isSA: { type: Boolean, required: true @@ -82,10 +77,7 @@ export default { cameraIdx: 0, cameraName: '', capacity: 4, - qualities: reactive(new Array(9).fill(-1)), streamOrder: reactive([]), - - num_available: -1 } }, @@ -123,19 +115,9 @@ export default { setCamIndex: function (index: number) { // every time a button is pressed, it changes cam status and adds/removes from stream this.camsEnabled[index] = !this.camsEnabled[index] - if (this.camsEnabled[index]) this.qualities[index] = 2 //if enabling camera, turn on medium quality this.changeStream(index) }, - sendCameras: function (index: number) { - this.sendMessage({ - type: 'sendCameras', - primary: this.primary, - device: index, - resolution: this.qualities[index] - }) - }, - addCameraName: function () { this.names[this.cameraIdx] = this.cameraName }, @@ -145,9 +127,7 @@ export default { if (found) { this.streamOrder.splice(this.streamOrder.indexOf(index), 1) this.streamOrder.push(-1) - this.qualities[index] = -1 //close the stream when sending it to comms } else this.streamOrder[this.streamOrder.indexOf(-1)] = index - this.sendCameras(index) }, takePanorama() { diff --git a/src/teleoperation/frontend/src/components/DMESTask.vue b/src/teleoperation/frontend/src/components/DMESTask.vue index b83e5ff9b..668223102 100644 --- a/src/teleoperation/frontend/src/components/DMESTask.vue +++ b/src/teleoperation/frontend/src/components/DMESTask.vue @@ -22,7 +22,7 @@
- +
diff --git a/src/teleoperation/frontend/src/components/ISHTask.vue b/src/teleoperation/frontend/src/components/ISHTask.vue index 8be0a6a1c..08e46c76a 100644 --- a/src/teleoperation/frontend/src/components/ISHTask.vue +++ b/src/teleoperation/frontend/src/components/ISHTask.vue @@ -25,7 +25,7 @@
- +
diff --git a/src/teleoperation/frontend/src/components/Menu.vue b/src/teleoperation/frontend/src/components/Menu.vue index cfea3f72e..cea3a9804 100644 --- a/src/teleoperation/frontend/src/components/Menu.vue +++ b/src/teleoperation/frontend/src/components/Menu.vue @@ -13,8 +13,7 @@ - - +
diff --git a/src/teleoperation/frontend/src/components/MenuButton.vue b/src/teleoperation/frontend/src/components/MenuButton.vue index 6218a81f8..88998202e 100644 --- a/src/teleoperation/frontend/src/components/MenuButton.vue +++ b/src/teleoperation/frontend/src/components/MenuButton.vue @@ -1,5 +1,5 @@ diff --git a/src/teleoperation/frontend/src/components/SATask.vue b/src/teleoperation/frontend/src/components/SATask.vue index 7b0bc13d0..e9776bdba 100644 --- a/src/teleoperation/frontend/src/components/SATask.vue +++ b/src/teleoperation/frontend/src/components/SATask.vue @@ -28,7 +28,7 @@
- +
diff --git a/src/teleoperation/frontend/src/router/index.ts b/src/teleoperation/frontend/src/router/index.ts index f34e022f1..495e00bf9 100644 --- a/src/teleoperation/frontend/src/router/index.ts +++ b/src/teleoperation/frontend/src/router/index.ts @@ -4,6 +4,7 @@ import DMESTask from '../components/DMESTask.vue' import AutonTask from '../components/AutonTask.vue' import ISHTask from '../components/ISHTask.vue' import SATask from '../components/SATask.vue' +import Cameras from '../components/Cameras.vue' const routes = [ { @@ -41,7 +42,17 @@ const routes = [ path: '/ISHTask', name: 'ISHTask', component: ISHTask + }, + { + path: '/Cameras', + name: 'Cameras', + component: Cameras, + props: { + isSA: false, + mission: 'other' + } } + ] const router = createRouter({ history: createWebHistory(), From 434d2e43ea5adf3d932d14c9b6a7cdd321ad8e5b Mon Sep 17 00:00:00 2001 From: qhdwight Date: Mon, 22 Apr 2024 21:50:24 -0400 Subject: [PATCH 107/126] Add micro adjust for drive --- config/teleop.yaml | 4 +++- src/teleoperation/backend/consumers.py | 6 +++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/config/teleop.yaml b/config/teleop.yaml index bcdc5940f..1b8c68952 100644 --- a/config/teleop.yaml +++ b/config/teleop.yaml @@ -57,7 +57,9 @@ teleop: enabled: true multiplier: 1 twist: - multiplier: 0.7 + multiplier: 0.6 + tilt: + multiplier: 1 xbox_mappings: left_x: 0 diff --git a/src/teleoperation/backend/consumers.py b/src/teleoperation/backend/consumers.py index 0580d6a47..75db25f50 100644 --- a/src/teleoperation/backend/consumers.py +++ b/src/teleoperation/backend/consumers.py @@ -433,6 +433,8 @@ def get_axes_input( # angular_from_lateral = get_axes_input("left_right", 0.4, True) angular = get_axes_input("twist", 0.03, True, self.max_angular_speed * dampen) + linear += get_axes_input("tilt", 0.5, scale=0.1) + self.twist_pub.publish( Twist( linear=Vector3(x=linear), @@ -812,9 +814,7 @@ def flight_attitude_listener(self): def arm_joint_callback(self, msg: JointState) -> None: # Set non-finite values to zero - for i in range(len(msg.position)): - if not isfinite(msg.position[i]): - msg.position[i] = 0 + msg.position = [x if isfinite(x) else 0 for x in msg.position] self.send(text_data=json.dumps({"type": "fk", "positions": msg.position})) def science_spectral_callback(self, msg): From 251462816b478707c9e3660e13d2fe897049d4de Mon Sep 17 00:00:00 2001 From: qhdwight Date: Tue, 23 Apr 2024 00:02:32 -0400 Subject: [PATCH 108/126] Supress cuda warning --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7c68974f1..4e916101b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -98,7 +98,7 @@ if (ZED_FOUND) set(CMAKE_CUDA_STANDARD 17) set(CMAKE_CUDA_STANDARD_REQUIRED ON) set(CMAKE_CUDA_SEPARABLE_COMPILATION ON) - set(CMAKE_CUDA_FLAGS "--diag-suppress 108,68") + set(CMAKE_CUDA_FLAGS "--diag-suppress 20012") # Jetson Xavier NX/AGX has Volta 7.2 architecture # Perception Laptop (A4000, Quadro version of RTX 3080) has Ampere 8.6 architecture set(CMAKE_CUDA_ARCHITECTURES 72 86) From f2147db8225576522a1a29c3fb6ab23f7b4370fe Mon Sep 17 00:00:00 2001 From: jnnanni Date: Tue, 23 Apr 2024 19:28:52 -0400 Subject: [PATCH 109/126] SA_z encoder in inches --- src/teleoperation/frontend/src/components/SAArmControls.vue | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/teleoperation/frontend/src/components/SAArmControls.vue b/src/teleoperation/frontend/src/components/SAArmControls.vue index 3aaed73e4..5047bd2db 100644 --- a/src/teleoperation/frontend/src/components/SAArmControls.vue +++ b/src/teleoperation/frontend/src/components/SAArmControls.vue @@ -46,7 +46,7 @@
-

SA Z Position: {{ z_position }}

+

SA Z Position: {{ z_position }} inches

- + From 4c766ade3c842f9b726d9dea784bb871723660d1 Mon Sep 17 00:00:00 2001 From: nehakankanala Date: Wed, 24 Apr 2024 18:16:58 -0400 Subject: [PATCH 111/126] added gps driver back in --- src/localization/gps_driver.py | 137 +++++++++++++++++++++++++++++++++ 1 file changed, 137 insertions(+) create mode 100644 src/localization/gps_driver.py diff --git a/src/localization/gps_driver.py b/src/localization/gps_driver.py new file mode 100644 index 000000000..0ed6ea0c3 --- /dev/null +++ b/src/localization/gps_driver.py @@ -0,0 +1,137 @@ +#!/usr/bin/env python3 +""" +Reads and parses NMEA messages from the onboard GPS to provide +location data to the rover over LCM (/gps). Subscribes to +/rtcm and passes RTCM messages to the onboard gps to +acquire an RTK fix. +""" +import serial +import asyncio +import rospy +import threading +import numpy as np +from os import getenv + +import rospy + +# from rover_msgs import GPS, RTCM +from pyubx2 import UBXReader, UBX_PROTOCOL, RTCM3_PROTOCOL, protocol, UBXMessage +from std_msgs.msg import Header +from sensor_msgs.msg import NavSatFix + +# from rtcm_msgs.msg import Message +# from mrover.msg import rtkStatus +import datetime + + +class GPS_Driver: + def __init__(self): + rospy.init_node("gps_driver") + self.port = "/dev/gps" + self.baud = 115200 + # self.base_station_sub = rospy.Subscriber("/rtcm", Message, self.process_rtcm) + self.gps_pub = rospy.Publisher("fix", NavSatFix, queue_size=1) + # self.rtk_fix_pub = rospy.Publisher("rtk_fix_status", rtkStatus, queue_size=1) + + self.lock = threading.Lock() + self.valid_offset = False + self.time_offset = -1 + + def connect(self): + # open connection to rover GPS + self.ser = serial.Serial(self.port, self.baud) + self.reader = UBXReader(self.ser, protfilter=(UBX_PROTOCOL | RTCM3_PROTOCOL)) + + return self + + def exit(self) -> None: + # close connection + self.ser.close() + + # def process_rtcm(self, data) -> None: + # print("processing RTCM") + # with self.lock: + # # rtcm_data = RTCM.decode(data) + # self.ser.write(data.message) + + def parse_rover_gps_data(self, rover_gps_data) -> None: + msg = rover_gps_data + # skip if message could not be parsed + if not msg: + return + + if rover_gps_data.identity == "RXM-RTCM": + rospy.loginfo("RXM") + msg_used = msg.msgUsed + + if msg_used == 0: + rospy.logwarn("RTCM Usage unknown\n") + elif msg_used == 1: + rospy.logwarn("RTCM message not used\n") + elif msg_used == 2: + rospy.loginfo("RTCM message successfully used by receiver\n") + + # rospy.loginfo(vars(rover_gps_data)) + + if rover_gps_data.identity == "NAV-PVT": + rospy.loginfo("PVT") + parsed_latitude = msg.lat + parsed_longitude = msg.lon + parsed_altitude = msg.hMSL + time = datetime.datetime(year=msg.year, month=msg.month, day=msg.day, hour=msg.hour, second=msg.second) + time = rospy.Time(secs=time.timestamp() + (msg.nano / 1e6)) + if not self.valid_offset: + self.time_offset = rospy.Time.now() - time + self.valid_offset = True + + time = time + self.time_offset + + rospy.loginfo_throttle(3, f"{time} {rospy.Time.now()} {time-rospy.Time.now()} {self.time_offset}") + + message_header = Header(stamp=time, frame_id="base_link") + + self.gps_pub.publish( + NavSatFix( + header=message_header, + latitude=parsed_latitude, + longitude=parsed_longitude, + altitude=parsed_altitude, + ) + ) + # self.rtk_fix_pub.publish(rtkStatus(msg_used)) + + if msg.difSoln == 1: + rospy.loginfo_throttle(3, "Differential correction applied") + + # publidh to navsatstatus in navsatfix + if msg.carrSoln == 0: + rospy.logwarn_throttle(3, "No RTK") + elif msg.carrSoln == 1: + rospy.loginfo_throttle(3, "Floating RTK Fix") + elif msg.carrSoln == 2: + rospy.loginfo_throttle(3, "RTK FIX") + + if rover_gps_data.identity == "NAV-STATUS": + pass + + def gps_data_thread(self) -> None: + # TODO: add more message checks if needed + while not rospy.is_shutdown(): + with self.lock: + if self.ser.in_waiting: + raw, rover_gps_data = self.reader.read() + parsed_gps_data = self.parse_rover_gps_data(rover_gps_data) + + +def main(): + # change values + rtk_manager = GPS_Driver() + rtk_manager.connect() + # rover_gps_thread = threading.Thread(rtk_manager.gps_data_thread) + # rover_gps_thread.start() + rtk_manager.gps_data_thread() + rtk_manager.exit() + + +if __name__ == "__main__": + main() \ No newline at end of file From b6d181bfb93cb1e660b4583b69ff16d130646dbc Mon Sep 17 00:00:00 2001 From: neuenfeldttj Date: Wed, 24 Apr 2024 18:33:47 -0400 Subject: [PATCH 112/126] added map stuff --- .../frontend/src/components/DMESTask.vue | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/teleoperation/frontend/src/components/DMESTask.vue b/src/teleoperation/frontend/src/components/DMESTask.vue index c0dae73e4..688f7c916 100644 --- a/src/teleoperation/frontend/src/components/DMESTask.vue +++ b/src/teleoperation/frontend/src/components/DMESTask.vue @@ -70,6 +70,7 @@ import OdometryReading from './OdometryReading.vue' import DriveControls from './DriveControls.vue' import MastGimbalControls from './MastGimbalControls.vue' import type ArmMoteusStateTableVue from './ArmMoteusStateTable.vue' +import { quaternionToMapAngle } from '../utils.js' export default defineComponent({ components: { @@ -140,7 +141,16 @@ export default defineComponent({ this.moteusDrive.state = msg.state this.moteusDrive.error = msg.error this.moteusDrive.limit_hit = msg.limit_hit - } else if (msg.type == 'center_map') { + } + else if (msg.type == 'nav_sat_fix') { + this.odom.latitude_deg = msg.latitude + this.odom.longitude_deg = msg.longitude + this.odom.altitude = msg.altitude + } + else if (msg.type == 'auton_tfclient') { + this.odom.bearing_deg = quaternionToMapAngle(msg.rotation) + } + else if (msg.type == 'center_map') { this.odom.latitude_deg = msg.latitude this.odom.longitude_deg = msg.longitude } From 0710cfae809aeabeabafe032e2a407dbfd760ce1 Mon Sep 17 00:00:00 2001 From: neuenfeldttj Date: Wed, 24 Apr 2024 18:51:33 -0400 Subject: [PATCH 113/126] fixed bearing --- src/teleoperation/backend/consumers.py | 8 +++---- .../frontend/src/components/AutonTask.vue | 4 ++-- .../frontend/src/components/DMESTask.vue | 8 +++++-- .../frontend/src/components/SATask.vue | 22 +++++++++++++++++-- 4 files changed, 32 insertions(+), 10 deletions(-) diff --git a/src/teleoperation/backend/consumers.py b/src/teleoperation/backend/consumers.py index 75db25f50..f44425176 100644 --- a/src/teleoperation/backend/consumers.py +++ b/src/teleoperation/backend/consumers.py @@ -187,8 +187,8 @@ def receive(self, text_data): self.send_auton_command(message) elif message["type"] == "teleop_enabled": self.send_teleop_enabled(message) - elif message["type"] == "auton_tfclient": - self.auton_bearing() + elif message["type"] == "bearing": + self.bearing() elif message["type"] == "mast_gimbal": self.mast_gimbal(message) elif message["type"] == "max_streams": @@ -655,12 +655,12 @@ def ish_thermistor_data_callback(self, msg): temps = [x.temperature for x in msg.temps] self.send(text_data=json.dumps({"type": "thermistor", "temps": temps})) - def auton_bearing(self): + def bearing(self): base_link_in_map = SE3.from_tf_tree(self.tf_buffer, "map", "base_link") self.send( text_data=json.dumps( { - "type": "auton_tfclient", + "type": "bearing", "rotation": base_link_in_map.rotation.quaternion.tolist(), } ) diff --git a/src/teleoperation/frontend/src/components/AutonTask.vue b/src/teleoperation/frontend/src/components/AutonTask.vue index 1cd400907..bf06605f8 100644 --- a/src/teleoperation/frontend/src/components/AutonTask.vue +++ b/src/teleoperation/frontend/src/components/AutonTask.vue @@ -156,7 +156,7 @@ export default defineComponent({ this.odom.latitude_deg = msg.latitude this.odom.longitude_deg = msg.longitude this.odom.altitude = msg.altitude - } else if (msg.type == 'auton_tfclient') { + } else if (msg.type == 'bearing') { this.odom.bearing_deg = quaternionToMapAngle(msg.rotation) } else if (msg.type == "center_map") { this.odom.latitude_deg = msg.latitude @@ -179,7 +179,7 @@ export default defineComponent({ this.sendMessage({ "type": "center_map" }); }, 250) interval = setInterval(() => { - this.sendMessage({ type: 'auton_tfclient' }) + this.sendMessage({ type: 'bearing' }) }, 1000) }, diff --git a/src/teleoperation/frontend/src/components/DMESTask.vue b/src/teleoperation/frontend/src/components/DMESTask.vue index 688f7c916..f33cd09aa 100644 --- a/src/teleoperation/frontend/src/components/DMESTask.vue +++ b/src/teleoperation/frontend/src/components/DMESTask.vue @@ -69,9 +69,10 @@ import MotorsStatusTable from './MotorsStatusTable.vue' import OdometryReading from './OdometryReading.vue' import DriveControls from './DriveControls.vue' import MastGimbalControls from './MastGimbalControls.vue' -import type ArmMoteusStateTableVue from './ArmMoteusStateTable.vue' import { quaternionToMapAngle } from '../utils.js' +let interval: number + export default defineComponent({ components: { PDBFuse, @@ -147,7 +148,7 @@ export default defineComponent({ this.odom.longitude_deg = msg.longitude this.odom.altitude = msg.altitude } - else if (msg.type == 'auton_tfclient') { + else if (msg.type == 'bearing') { this.odom.bearing_deg = quaternionToMapAngle(msg.rotation) } else if (msg.type == 'center_map') { @@ -173,6 +174,9 @@ export default defineComponent({ this.cancelIK(event) } }) + interval = setInterval(() => { + this.sendMessage({ type: 'bearing' }) + }, 1000) } }) diff --git a/src/teleoperation/frontend/src/components/SATask.vue b/src/teleoperation/frontend/src/components/SATask.vue index 7b0bc13d0..4382b7c56 100644 --- a/src/teleoperation/frontend/src/components/SATask.vue +++ b/src/teleoperation/frontend/src/components/SATask.vue @@ -106,7 +106,9 @@ import MotorAdjust from './MotorAdjust.vue' import OdometryReading from './OdometryReading.vue' import SAArmControls from './SAArmControls.vue' import { disableAutonLED, quaternionToMapAngle } from '../utils.js' -import { mapState } from 'vuex' +import { mapState, mapActions } from 'vuex' + +let interval: number export default { components: { @@ -175,10 +177,26 @@ export default { this.moteusState.error = msg.error this.moteusState.limit_hit = msg.limit_hit } + else if (msg.type == 'nav_sat_fix') { + this.odom.latitude_deg = msg.latitude + this.odom.longitude_deg = msg.longitude + this.odom.altitude = msg.altitude + } + else if (msg.type == 'bearing') { + this.odom.bearing_deg = quaternionToMapAngle(msg.rotation) + } } }, - created: function () {} + methods: { + ...mapActions('websocket', ['sendMessage']), + }, + + created: function () { + interval = setInterval(() => { + this.sendMessage({ type: 'bearing' }) + }, 1000) + } } From 98975cff4404fd996ec355561768aa1ce16412de Mon Sep 17 00:00:00 2001 From: qhdwight Date: Sat, 27 Apr 2024 13:29:41 -0400 Subject: [PATCH 114/126] Refactor streaming --- ansible/roles/build/tasks/main.yml | 1 + .../gst_websocket_streamer.cpp | 125 +++++++++--------- .../gst_websocket_streamer.hpp | 15 +++ .../frontend/src/components/CameraFeed.vue | 77 ++++++----- 4 files changed, 127 insertions(+), 91 deletions(-) diff --git a/ansible/roles/build/tasks/main.yml b/ansible/roles/build/tasks/main.yml index 6d91a9651..c2c6e022f 100644 --- a/ansible/roles/build/tasks/main.yml +++ b/ansible/roles/build/tasks/main.yml @@ -97,6 +97,7 @@ - libgstreamer1.0-dev - libgstreamer-plugins-base1.0-dev - vainfo + - x264 - name: Install Local APT Packages become: True diff --git a/src/esw/gst_websocket_streamer/gst_websocket_streamer.cpp b/src/esw/gst_websocket_streamer/gst_websocket_streamer.cpp index a1430549b..dd7af2f46 100644 --- a/src/esw/gst_websocket_streamer/gst_websocket_streamer.cpp +++ b/src/esw/gst_websocket_streamer/gst_websocket_streamer.cpp @@ -1,11 +1,5 @@ #include "gst_websocket_streamer.hpp" -#if defined(MROVER_IS_JETSON) -constexpr bool IS_JETSON = true; -#else -constexpr bool IS_JETSON = false; -#endif - namespace mrover { template @@ -26,7 +20,12 @@ namespace mrover { GstBuffer* buffer = gst_sample_get_buffer(sample); GstMapInfo map; gst_buffer_map(buffer, &map, GST_MAP_READ); - mStreamServer->broadcast({reinterpret_cast(map.data), map.size}); + + std::vector chunk(sizeof(ChunkHeader) + map.size); + std::memcpy(chunk.data(), &mChunkHeader, sizeof(ChunkHeader)); + std::memcpy(chunk.data() + sizeof(ChunkHeader), map.data, map.size); + + mStreamServer->broadcast(chunk); gst_buffer_unmap(buffer, &map); gst_sample_unref(sample); @@ -38,65 +37,63 @@ namespace mrover { mMainLoop = gstCheck(g_main_loop_new(nullptr, FALSE)); + // Source std::string launch; if (mCaptureDevice.empty()) { - if constexpr (IS_JETSON) { - // ReSharper disable once CppDFAUnreachableCode - launch = std::format( - // App source is pushed to when we get a ROS BGRA image message - // is-live prevents frames from being pushed when the pipeline is in READY - "appsrc name=imageSource is-live=true " - "! video/x-raw,format=BGRA,width={},height={},framerate=30/1 " - "! videoconvert " // Convert BGRA => I420 (YUV) for the encoder, note we are still on the CPU - "! video/x-raw,format=I420 " - "! nvvidconv " // Upload to GPU memory for the encoder - "! video/x-raw(memory:NVMM),format=I420 " - "! nvv4l2h265enc name=encoder bitrate={} iframeinterval=300 vbv-size=33333 insert-sps-pps=true control-rate=constant_bitrate profile=Main num-B-Frames=0 ratecontrol-enable=true preset-level=UltraFastPreset EnableTwopassCBR=false maxperf-enable=true " - // App sink is pulled from (getting H265 chunks) on another thread and sent to the stream server - // sync=false is needed to avoid weirdness, going from playing => ready => playing will not work otherwise - "! appsink name=streamSink sync=false", - mImageWidth, mImageHeight, mBitrate); - } else { - // ReSharper disable once CppDFAUnreachableCode - launch = std::format( - "appsrc name=imageSource is-live=true " - "! video/x-raw,format=BGRA,width={},height={},framerate=30/1 " - "! nvh265enc name=encoder " - "! appsink name=streamSink sync=false", - mImageWidth, mImageHeight); - } + // App source is pushed to when we get a ROS BGRA image message + // is-live prevents frames from being pushed when the pipeline is in READY + launch += "appsrc name=imageSource is-live=true "; } else { - if constexpr (IS_JETSON) { - // ReSharper disable once CppDFAUnreachableCode - - // TODO(quintin): I had to apply this patch: https://forums.developer.nvidia.com/t/macrosilicon-usb/157777/4 - // nvv4l2camerasrc only supports UYUV by default, but our cameras are YUY2 (YUYV) - - launch = std::format( + launch += std::format("v4l2src device={} ", mCaptureDevice); + } + // Source format + std::string captureFormat = mCaptureDevice.empty() ? "video/x-raw,format=BGRA" : mDecodeJpegFromDevice ? "image/jpeg" + : "video/x-raw,format=YUY2"; + launch += std::format("! {},width={},height={},framerate={}/1 ", + captureFormat, mImageWidth, mImageHeight, mImageFramerate); + // Source decoder and H265 encoder + if (gst_element_factory_find("nvv4l2h265enc")) { + // Most likely on the Jetson + if (captureFormat.contains("jpeg")) { + // Mostly used with USB cameras, MPEG capture uses way less USB bandwidth + launch += + // TODO(quintin): I had to apply this patch: https://forums.developer.nvidia.com/t/macrosilicon-usb/157777/4 + // nvv4l2camerasrc only supports UYUV by default, but our cameras are YUY2 (YUYV) // "nvv4l2camerasrc device={} " - - "v4l2src device={} " - // "! video/x-raw(memory:NVMM),format=YUY2,width={},height={},framerate={}/1 " - "! image/jpeg,width={},height={},framerate={}/1 " - "! nvv4l2decoder mjpeg=1 " - - "! nvvidconv " - "! video/x-raw(memory:NVMM),format=NV12 " - "! nvv4l2h265enc name=encoder bitrate={} iframeinterval=300 vbv-size=33333 insert-sps-pps=true control-rate=constant_bitrate profile=Main num-B-Frames=0 ratecontrol-enable=true preset-level=UltraFastPreset EnableTwopassCBR=false maxperf-enable=true " - "! appsink name=streamSink sync=false", - mCaptureDevice, mImageWidth, mImageHeight, mImageFramerate, mBitrate); + + "! nvv4l2decoder mjpeg=1 " // Hardware-accelerated JPEG decoding, output is apparently some unknown proprietary format + "! nvvidconv " // Convert from proprietary format to NV12 so the encoder understands it + "! video/x-raw(memory:NVMM),format=NV12 "; } else { - // ReSharper disable once CppDFAUnreachableCode - launch = std::format( - "v4l2src device={} " - "! video/x-raw,format=YUY2,width={},height={},framerate=30/1 " - "! videoconvert " - "! nvh265enc name=encoder " - "! appsink name=streamSink sync=false", - mCaptureDevice, mImageWidth, mImageHeight); + launch += "! videoconvert " // Convert to I420 for the encoder, note we are still on the CPU + "! video/x-raw,format=I420 " + "! nvvidconv " // Upload to GPU memory for the encoder + "! video/x-raw(memory:NVMM),format=I420 "; } + launch += std::format("! nvv4l2h265enc name=encoder bitrate={} iframeinterval=300 vbv-size=33333 insert-sps-pps=true control-rate=constant_bitrate profile=Main num-B-Frames=0 ratecontrol-enable=true preset-level=UltraFastPreset EnableTwopassCBR=false maxperf-enable=true ", + mBitrate); + mChunkHeader.codec = ChunkHeader::Codec::H265; + } else if (gst_element_factory_find("nvh265enc")) { + // For desktop/laptops with the custom NVIDIA bad gstreamer plugins built (a massive pain to do!) + if (captureFormat.contains("jpeg")) + launch += "! jpegdec "; + else if (captureFormat.contains("YUY2")) + launch += "! videoconvert "; + launch += "! nvh265enc name=encoder "; + mChunkHeader.codec = ChunkHeader::Codec::H265; + } else { + // For desktop/laptops with no hardware encoder + if (captureFormat.contains("jpeg")) + launch += "! jpegdec "; + else + launch += "! videoconvert "; + launch += "! x264enc name=encoder "; + mChunkHeader.codec = ChunkHeader::Codec::H264; } + // App sink is pulled from (getting H265 chunks) on another thread and sent to the stream server + // sync=false is needed to avoid weirdness, going from playing => ready => playing will not work otherwise + launch += "! appsink name=streamSink sync=false"; ROS_INFO_STREAM(std::format("GStreamer launch: {}", launch)); mPipeline = gstCheck(gst_parse_launch(launch.c_str(), nullptr)); @@ -145,12 +142,20 @@ namespace mrover { } } + auto widthAndHeightToResolution(std::uint32_t width, std::uint32_t height) -> ChunkHeader::Resolution { + if (width == 640 && height == 480) return ChunkHeader::Resolution::EGA; + if (width == 1280 && height == 720) return ChunkHeader::Resolution::HD; + if (width == 1920 && height == 1080) return ChunkHeader::Resolution::FHD; + throw std::runtime_error{"Unsupported resolution"}; + } + auto GstWebsocketStreamerNodelet::onInit() -> void { try { mNh = getMTNodeHandle(); mPnh = getMTPrivateNodeHandle(); mCaptureDevice = mPnh.param("device", ""); + mDecodeJpegFromDevice = mPnh.param("decode_jpeg_from_device", true); mImageTopic = mPnh.param("image_topic", "image"); mImageWidth = mPnh.param("width", 640); mImageHeight = mPnh.param("height", 480); @@ -159,6 +164,8 @@ namespace mrover { auto port = mPnh.param("port", 8081); mBitrate = mPnh.param("bitrate", 2000000); + mChunkHeader.resolution = widthAndHeightToResolution(mImageWidth, mImageHeight); + gst_init(nullptr, nullptr); initPipeline(); @@ -198,7 +205,7 @@ namespace mrover { } } catch (std::exception const& e) { - ROS_ERROR_STREAM(std::format("Exception initializing NVIDIA GST H265 Encoder: {}", e.what())); + ROS_ERROR_STREAM(std::format("Exception initializing gstreamer websocket streamer: {}", e.what())); ros::requestShutdown(); } } diff --git a/src/esw/gst_websocket_streamer/gst_websocket_streamer.hpp b/src/esw/gst_websocket_streamer/gst_websocket_streamer.hpp index c4950f62c..7682a8a54 100644 --- a/src/esw/gst_websocket_streamer/gst_websocket_streamer.hpp +++ b/src/esw/gst_websocket_streamer/gst_websocket_streamer.hpp @@ -4,11 +4,24 @@ namespace mrover { + struct ChunkHeader { + enum struct Resolution : std::uint8_t { + EGA, + HD, + FHD, + } resolution; + enum struct Codec : std::uint8_t { + H265, + H264, + } codec; + }; + class GstWebsocketStreamerNodelet final : public nodelet::Nodelet { ros::NodeHandle mNh, mPnh; std::string mCaptureDevice; + bool mDecodeJpegFromDevice{}; std::string mImageTopic; std::uint64_t mBitrate{}; std::uint32_t mImageWidth{}, mImageHeight{}, mImageFramerate{}; @@ -22,6 +35,8 @@ namespace mrover { std::thread mMainLoopThread; std::thread mStreamSinkThread; + ChunkHeader mChunkHeader{}; + auto onInit() -> void override; auto pullStreamSamplesLoop() -> void; diff --git a/src/teleoperation/frontend/src/components/CameraFeed.vue b/src/teleoperation/frontend/src/components/CameraFeed.vue index efbce30af..698838b96 100644 --- a/src/teleoperation/frontend/src/components/CameraFeed.vue +++ b/src/teleoperation/frontend/src/components/CameraFeed.vue @@ -4,26 +4,26 @@

{{ name }} • ID: {{ id }}

- - - - -
- + > + + +
+
diff --git a/src/teleoperation/gui_chromium.sh b/src/teleoperation/gui_chromium.sh index 3f5e50061..e83d1c054 100755 --- a/src/teleoperation/gui_chromium.sh +++ b/src/teleoperation/gui_chromium.sh @@ -4,4 +4,6 @@ # Chromium currently only supports this when using VA-API hardware acceleration # It uses the WebCodecs API to decode # You can easily test if your setup works with this URL: https://w3c.github.io/webcodecs/samples/video-decode-display/ -chromium --enable-features=VaapiVideoDecodeLinuxGL --app=http://localhost:8080 +readonly FLAGS="--enable-features=VaapiVideoDecodeLinuxGL" +readonly ADDRESS="http://localhost:8080" +chromium ${FLAGS} --app=${ADDRESS}/$1 diff --git a/src/teleoperation/gui_chromium_cameras.sh b/src/teleoperation/gui_chromium_cameras.sh new file mode 100755 index 000000000..ef6bade5b --- /dev/null +++ b/src/teleoperation/gui_chromium_cameras.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +$(dirname $0)/gui_chromium.sh Cameras diff --git a/src/teleoperation/gui_chromium_menu.sh b/src/teleoperation/gui_chromium_menu.sh new file mode 100755 index 000000000..a259fcd1c --- /dev/null +++ b/src/teleoperation/gui_chromium_menu.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +$(dirname $0)/gui_chromium.sh From b476237fa5382bc01fac0e1a98fd013ad2815e48 Mon Sep 17 00:00:00 2001 From: qhdwight Date: Sat, 27 Apr 2024 20:51:21 -0400 Subject: [PATCH 124/126] Better error msg --- src/esw/can_driver/can_driver.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/esw/can_driver/can_driver.cpp b/src/esw/can_driver/can_driver.cpp index 7b9aa81c2..6355105f8 100644 --- a/src/esw/can_driver/can_driver.cpp +++ b/src/esw/can_driver/can_driver.cpp @@ -69,7 +69,7 @@ namespace mrover { } } else { NODELET_WARN("No CAN devices specified or config was invalid. Did you forget to load the correct ROS parameters?"); - NODELET_WARN("For example before testing the devboard run: \"rosparam load config/esw_devboard.yml\""); + NODELET_WARN("For example before testing the devboard run: \"rosparam load config/esw.yml\""); } mCanNetLink = CanNetLink{mInterface}; @@ -201,7 +201,7 @@ namespace mrover { } catch (boost::system::system_error const& error) { // check if ran out of buffer space if (error.code() == boost::asio::error::no_buffer_space) { - NODELET_WARN_STREAM_THROTTLE(1, "No buffer space available to send CAN message"); + NODELET_WARN_STREAM_THROTTLE(1, "No buffer space available to send CAN message. This usually indicates an electrical problem with the bus. CAN will avoid sending out messages if it can not see other devices."); return; } ros::shutdown(); From f4a9902d968a7c65233920dcf4659415f7e6785a Mon Sep 17 00:00:00 2001 From: qhdwight Date: Sat, 27 Apr 2024 20:56:05 -0400 Subject: [PATCH 125/126] Squashed commit of the following: commit b476237fa5382bc01fac0e1a98fd013ad2815e48 Author: qhdwight Date: Sat Apr 27 20:51:21 2024 -0400 Better error msg commit 0508b1be1f5fd8ee72de034686c2c2b694486efa Author: qhdwight Date: Sat Apr 27 17:01:51 2024 -0400 Oops commit e45f80659980bbe106e4a1be06f1c9d03f1f5958 Merge: 7dcfcc752 620179f10 Author: Quintin Date: Sat Apr 27 14:25:53 2024 -0400 Merge pull request #688 from umrover/camera-id Allow camera streaming to select a device from a physical port via udev commit 620179f104543323a06084765a81bb323514c5f3 Author: qhdwight Date: Sat Apr 27 14:22:25 2024 -0400 FOrmat commit 2cfb2b854190286813894813eed499cf9ca376a1 Author: qhdwight Date: Sat Apr 27 14:21:36 2024 -0400 Update launch file commit c1ae562f089bae32247bcf33529abc114e801c0d Author: qhdwight Date: Sat Apr 27 14:16:53 2024 -0400 Reorder commit 1c203f1d366246ba7c6ee35b4a23715dac1ef939 Merge: c29541595 7dcfcc752 Author: qhdwight Date: Sat Apr 27 14:16:19 2024 -0400 Merge remote-tracking branch 'origin/integration' into camera-id commit 7dcfcc7529fcc72579f2d433d8db7f3fd90a8d36 Author: qhdwight Date: Sat Apr 27 14:08:57 2024 -0400 Format commit 35e4a7e5bd1a58288a0b913bf988d07481a35afe Author: qhdwight Date: Sat Apr 27 14:07:06 2024 -0400 Add needed video conv commit 09e3fd6a0c27fe06ade8b9dddd2bae374f345317 Author: qhdwight Date: Sat Apr 27 13:51:49 2024 -0400 Comments commit 659fd47c5c0bf8caaf1ec274abf3f5fd2a30f027 Author: qhdwight Date: Sat Apr 27 13:47:07 2024 -0400 Zero tune latency, add to sim commit 98975cff4404fd996ec355561768aa1ce16412de Author: qhdwight Date: Sat Apr 27 13:29:41 2024 -0400 Refactor streaming commit 0710cfae809aeabeabafe032e2a407dbfd760ce1 Author: neuenfeldttj Date: Wed Apr 24 18:51:33 2024 -0400 fixed bearing commit 0b85e5a9e6f840c0862485c92ed6908dd5342577 Merge: b6d181bfb 4c766ade3 Author: neuenfeldttj Date: Wed Apr 24 18:33:59 2024 -0400 Merge branch 'integration' of github.com:umrover/mrover-ros into integration commit b6d181bfb93cb1e660b4583b69ff16d130646dbc Author: neuenfeldttj Date: Wed Apr 24 18:33:47 2024 -0400 added map stuff commit 4c766ade3c842f9b726d9dea784bb871723660d1 Author: nehakankanala Date: Wed Apr 24 18:16:58 2024 -0400 added gps driver back in commit ba1fd6a1bcb892912b2a95348ff097aa4e2dea5c Author: umroverPerception Date: Wed Apr 24 15:51:58 2024 -0400 make changes for mast gimbal commit b39873370d30a0149585420a4ef3da94e9d00bee Merge: 434d2e43e f2147db82 Author: umroverPerception Date: Wed Apr 24 14:19:22 2024 -0400 Merge remote-tracking branch 'origin/integration' into arm-practice commit f2147db8225576522a1a29c3fb6ab23f7b4370fe Author: jnnanni Date: Tue Apr 23 19:28:52 2024 -0400 SA_z encoder in inches commit 251462816b478707c9e3660e13d2fe897049d4de Author: qhdwight Date: Tue Apr 23 00:02:32 2024 -0400 Supress cuda warning commit 434d2e43ea5adf3d932d14c9b6a7cdd321ad8e5b Author: qhdwight Date: Mon Apr 22 21:50:24 2024 -0400 Add micro adjust for drive commit 270509ec8287004553d8f4ac155b3211007bc7a6 Merge: b93f5746f 546e3550e Author: qhdwight Date: Mon Apr 22 12:00:51 2024 -0400 Merge remote-tracking branch 'origin/integration' into arm-practice commit 546e3550e6760c790541ef92a84b8c1f3ca0ea8b Merge: c45919a41 916db0022 Author: neuenfeldttj Date: Sun Apr 21 21:32:53 2024 -0400 Merge branch 'integration' of github.com:umrover/mrover-ros into integration commit c45919a411c614a411597b7f9c8826c381112542 Author: neuenfeldttj Date: Sun Apr 21 21:32:35 2024 -0400 added zero sa_z commit 916db002230c8de1cbdeedfed6b3dd2eac2712e3 Author: umroverPerception Date: Sun Apr 21 15:23:10 2024 -0400 Fix compile commit b93f5746f75b5e2a9f83f65e8a4d5ccc688e80c9 Author: MRover Laptop Date: Sun Apr 21 14:05:42 2024 -0400 Tune pid commit 2b7704ab28c9d76ba4ca6d27bbb3487315558105 Author: qhdwight Date: Sun Apr 21 13:34:51 2024 -0400 Set non finite variables to 0 for fk commit 0beb164973fb4ee62d073fe6a604e7c133512136 Merge: 10130ea8b 06bf210ef Author: qhdwight Date: Sun Apr 21 12:13:19 2024 -0400 Merge remote-tracking branch 'origin/integration' into arm-practice commit 06bf210ef42cfd76c05fe65020aeb73fb90dabb1 Merge: 8bafb5059 cceaa56e8 Author: neuenfeldttj Date: Sun Apr 21 11:51:05 2024 -0400 pulled integration, added SA position fix commit cceaa56e8eb12ae4fc07e29f8f114650fa97ae17 Author: neuenfeldttj Date: Sun Apr 21 11:39:52 2024 -0400 fixed auton cameras commit 10130ea8b546850aec4f0752956a793312063647 Author: qhdwight Date: Sat Apr 20 18:56:03 2024 -0400 Fix arm controls commit e4e59a13ff725a9d3b987b80a569b00cbf162ea4 Author: umroverPerception Date: Sat Apr 20 18:55:37 2024 -0400 Updates commit 189b267db7c889df242d2ed786a894b030036008 Author: qhdwight Date: Sat Apr 20 17:29:43 2024 -0400 Update commit 671f07156e44fbad1dc923bb7523a2e9947ee6d8 Author: qhdwight Date: Sat Apr 20 17:01:30 2024 -0400 Temp fix commit ddb39f451af9991afc61ea3c1cb84ef37b22a179 Author: qhdwight Date: Sat Apr 20 16:55:46 2024 -0400 Add arm automation init commit 3837e83aabac4262ec797dd6726e9b0af5d854ce Merge: ac5fe3bde 33d430cfc Author: qhdwight Date: Sat Apr 20 15:45:52 2024 -0400 Merge remote-tracking branch 'origin/tjn/threejs_new' into arm-practice commit ac5fe3bde0992d2fddc64aa6383e6ce3c4cf79cb Author: qhdwight Date: Sat Apr 20 15:41:07 2024 -0400 Fix catkin profile commit c5174be310cabf943e712502039e5a354d543401 Author: qhdwight Date: Sat Apr 20 15:35:11 2024 -0400 Lower speed commit 6e357546c282f7a9935db57894e7eb01da40eaa9 Author: qhdwight Date: Sat Apr 20 15:30:55 2024 -0400 Avoid uneeded copy in usb camera, only replcae bits allocator if on gcc 13 commit c2954159503bffc7257b4a313c4b74201486d3a5 Merge: d24a6ba49 65c9dc912 Author: qhdwight Date: Fri Apr 19 00:48:53 2024 -0400 Merge remote-tracking branch 'origin/integration' into camera-id commit d24a6ba496deaf6f42dcbc75925ae914b84254a9 Merge: 40c8d4321 5eb699edf Author: qhdwight Date: Thu Apr 18 23:50:23 2024 -0400 Merge remote-tracking branch 'origin/integration' into camera-id commit 40c8d4321d6f8c25975420bfa5b0efff1192d6e2 Merge: b272f5db3 45dd19f85 Author: qhdwight Date: Thu Apr 18 23:47:30 2024 -0400 Merge remote-tracking branch 'origin/integration' into camera-id commit 33d430cfc96116d4c9cf6e16aba1b4082b74958f Author: qhdwight Date: Thu Apr 18 15:37:53 2024 -0400 Import commit 9612ab7dc091786770b6cc3bc28cd03d8a4bfd83 Merge: b26ea44aa 90d365c97 Author: qhdwight Date: Thu Apr 18 14:31:18 2024 -0400 Merge remote-tracking branch 'origin/integration' into tjn/threejs_new commit b272f5db39a94c39b55dea81ee04b64b4c6cb190 Merge: e102663ed 4319cd67b Author: qhdwight Date: Wed Apr 17 21:26:03 2024 -0400 Merge remote-tracking branch 'origin/integration' into camera-id commit b26ea44aa64031b1eb903d23413370531a9008eb Author: qhdwight Date: Wed Apr 17 21:06:33 2024 -0400 Fix arm base link commit 3a9bcbe450ff83d2c4a594c854dfcc1b5a234157 Merge: 049d49b8a 1aaff3532 Author: qhdwight Date: Wed Apr 17 20:52:43 2024 -0400 Merge remote-tracking branch 'origin/integration' into tjn/threejs_new commit 049d49b8a78584c1229a553224cf63d9ee27130f Author: qhdwight Date: Wed Apr 17 14:35:23 2024 -0400 Update commit 2af6befd842d74ab962d4df1a2351014fcc9cfb8 Author: qhdwight Date: Wed Apr 17 13:53:22 2024 -0400 Format commit 4c875e26ad87244845787b924481372c09995476 Merge: c89e6d5b2 cee42b1bf Author: qhdwight Date: Wed Apr 17 13:53:04 2024 -0400 Merge remote-tracking branch 'origin/integration' into tjn/threejs_new commit c89e6d5b276c8743e4400a207c508838cc549954 Author: qhdwight Date: Wed Apr 17 13:39:03 2024 -0400 Update commit 2841a0b4d7523df31c06c948d8afa0b0046b51e0 Merge: 5891cb14f ee9cbed80 Author: qhdwight Date: Wed Apr 17 11:42:48 2024 -0400 Merge remote-tracking branch 'origin/integration' into tjn/threejs_new commit 5891cb14f23a5b84eab5e6080a506c9da8b4f02a Author: neuenfeldttj Date: Tue Apr 16 22:20:36 2024 -0400 reorganized arm function commit b75e2583064b580a7d733547c0089b78ecb6f261 Author: neuenfeldttj Date: Tue Apr 16 11:35:57 2024 -0400 fixed ik msg commit e102663ed82658d43e8a365eb0332cea6e1c98c8 Author: qhdwight Date: Mon Apr 15 16:15:57 2024 -0400 Add device resizing, rename some things commit e2226605fef802a51279f4e4cfd696e2144b8862 Author: qhdwight Date: Sun Apr 14 15:12:05 2024 -0400 Start working on camera stuff commit 8bafb50593a3ec25e38b1461feb155ee2f55ab45 Author: neuenfeldttj Date: Sun Apr 14 13:22:59 2024 -0400 Fixed bug when gamepad is plugged in commit da25ed1ee4b0cae2dc31a144f906d53992626f37 Author: jnnanni Date: Sun Apr 14 13:11:44 2024 -0400 Fix sending arm positions commit 59687e8982c1b722ff9180ac4af2f3e00d07145c Merge: 8fcb4dc07 268f19d64 Author: neuenfeldttj Date: Sat Apr 13 19:48:09 2024 -0400 pulled integration, ik doesnt work commit 8fcb4dc07e0a9630cdb6fe4b27c18852b2ea5c9d Author: neuenfeldttj Date: Sun Mar 17 16:49:07 2024 -0400 fk and ik working commit e4b4dacf9ccbe757732a2005ca94021febc0fafd Merge: fc979b294 695f8302c Author: neuenfeldttj Date: Sun Mar 17 14:13:13 2024 -0400 Merge remote-tracking branch 'origin/integration' into tjn/threejs_new commit fc979b294f5fad5f40383ef3fb3635e50f6bfc43 Author: neuenfeldttj Date: Sun Mar 17 14:11:17 2024 -0400 Temp commit 4445a32143ef9c2dc5dfa14a1f1f30c53c391498 Author: neuenfeldttj Date: Sun Mar 10 13:39:05 2024 -0400 fk works commit 3bfc63807aac75a0e2ef182c13e2082bb340551d Author: neuenfeldttj Date: Sun Feb 18 13:59:21 2024 -0500 joint a translation works. de does not commit 52166d762268bfdf9f99aa79dcd6959281d22e9c Author: neuenfeldttj Date: Sat Feb 17 22:02:07 2024 -0500 fk works --- CMakeLists.txt | 16 +- action/ArmAction.action | 3 + .../roles/build/files/profiles/ci/config.yaml | 4 +- .../build/files/profiles/debug/config.yaml | 4 +- .../build/files/profiles/release/config.yaml | 4 +- ansible/roles/build/tasks/main.yml | 1 + ansible/roles/esw/tasks/main.yml | 2 +- cmake/deps.cmake | 2 + config/esw.yaml | 15 +- config/moteus/joint_c.cfg | 4 +- config/teleop.yaml | 47 +-- launch/basestation.launch | 9 +- launch/esw_base.launch | 130 +++---- launch/simulator.launch | 5 + scripts/debug_arm_ik.py | 21 +- setup.py | 1 + src/esw/can_driver/can_driver.cpp | 4 +- src/esw/drive_bridge/main.cpp | 3 +- .../gst_websocket_streamer.cpp | 188 +++++---- .../gst_websocket_streamer.hpp | 32 +- src/esw/gst_websocket_streamer/pch.hpp | 14 +- src/preload/bits/allocator.h | 6 +- src/simulator/simulator.cpp | 3 +- src/simulator/simulator.hpp | 1 + src/simulator/simulator.render.cpp | 13 + src/simulator/simulator.sensors.cpp | 20 +- src/teleoperation/arm_automation/__init__.py | 0 .../arm_automation/arm_automation.py | 76 ++++ .../arm_controller/arm_controller.cpp | 20 +- src/teleoperation/arm_controller/pch.hpp | 1 + src/teleoperation/backend/consumers.py | 358 +++++++++--------- src/teleoperation/frontend/index.html | 2 +- src/teleoperation/frontend/package.json | 1 + .../frontend/public/meshes/arm_a.fbx | 3 + .../frontend/public/meshes/arm_b.fbx | 3 + .../frontend/public/meshes/arm_c.fbx | 3 + .../frontend/public/meshes/arm_d.fbx | 3 + .../frontend/public/meshes/arm_e.fbx | 3 + .../frontend/public/meshes/bottle.fbx | 3 + .../frontend/public/meshes/ground.fbx | 3 + .../frontend/public/meshes/hammer.fbx | 3 + .../public/meshes/primitives/cube.fbx | 3 + .../public/meshes/primitives/cylinder.fbx | 3 + .../public/meshes/primitives/sphere.fbx | 3 + .../frontend/public/meshes/rock.fbx | 3 + .../frontend/public/meshes/rover_chassis.fbx | 3 + .../public/meshes/rover_left_bogie.fbx | 3 + .../public/meshes/rover_left_rocker.fbx | 3 + .../public/meshes/rover_left_wheel.fbx | 3 + .../public/meshes/rover_right_bogie.fbx | 3 + .../public/meshes/rover_right_rocker.fbx | 3 + .../public/meshes/rover_right_wheel.fbx | 3 + .../frontend/src/components/ArmControls.vue | 58 ++- .../frontend/src/components/AutonTask.vue | 28 +- .../frontend/src/components/CameraFeed.vue | 103 ++--- .../frontend/src/components/DMESTask.vue | 20 +- .../frontend/src/components/Rover3D.vue | 57 +++ .../frontend/src/components/SAArmControls.vue | 58 ++- .../frontend/src/components/SATask.vue | 29 +- .../frontend/src/router/index.ts | 9 +- src/teleoperation/frontend/src/rover_three.js | 162 ++++++++ urdf/rover/rover.urdf.xacro | 2 +- 62 files changed, 1085 insertions(+), 508 deletions(-) create mode 100644 action/ArmAction.action create mode 100644 src/teleoperation/arm_automation/__init__.py create mode 100755 src/teleoperation/arm_automation/arm_automation.py create mode 100644 src/teleoperation/frontend/public/meshes/arm_a.fbx create mode 100644 src/teleoperation/frontend/public/meshes/arm_b.fbx create mode 100644 src/teleoperation/frontend/public/meshes/arm_c.fbx create mode 100644 src/teleoperation/frontend/public/meshes/arm_d.fbx create mode 100644 src/teleoperation/frontend/public/meshes/arm_e.fbx create mode 100644 src/teleoperation/frontend/public/meshes/bottle.fbx create mode 100644 src/teleoperation/frontend/public/meshes/ground.fbx create mode 100644 src/teleoperation/frontend/public/meshes/hammer.fbx create mode 100644 src/teleoperation/frontend/public/meshes/primitives/cube.fbx create mode 100644 src/teleoperation/frontend/public/meshes/primitives/cylinder.fbx create mode 100644 src/teleoperation/frontend/public/meshes/primitives/sphere.fbx create mode 100644 src/teleoperation/frontend/public/meshes/rock.fbx create mode 100644 src/teleoperation/frontend/public/meshes/rover_chassis.fbx create mode 100644 src/teleoperation/frontend/public/meshes/rover_left_bogie.fbx create mode 100644 src/teleoperation/frontend/public/meshes/rover_left_rocker.fbx create mode 100644 src/teleoperation/frontend/public/meshes/rover_left_wheel.fbx create mode 100644 src/teleoperation/frontend/public/meshes/rover_right_bogie.fbx create mode 100644 src/teleoperation/frontend/public/meshes/rover_right_rocker.fbx create mode 100644 src/teleoperation/frontend/public/meshes/rover_right_wheel.fbx create mode 100644 src/teleoperation/frontend/src/components/Rover3D.vue create mode 100644 src/teleoperation/frontend/src/rover_three.js diff --git a/CMakeLists.txt b/CMakeLists.txt index 7c68974f1..e02e2f0a7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -67,15 +67,19 @@ set(MROVER_ROS_DEPENDENCIES tf2 tf2_ros tf2_geometry_msgs + actionlib_msgs ) extract_filenames(msg/*.msg MROVER_MESSAGE_FILES) extract_filenames(srv/*.srv MROVER_SERVICE_FILES) +extract_filenames(action/*.action MROVER_ACTION_FILES) + set(MROVER_MESSAGE_DEPENDENCIES std_msgs sensor_msgs + actionlib_msgs ) set(MROVER_PARAMETERS @@ -98,7 +102,7 @@ if (ZED_FOUND) set(CMAKE_CUDA_STANDARD 17) set(CMAKE_CUDA_STANDARD_REQUIRED ON) set(CMAKE_CUDA_SEPARABLE_COMPILATION ON) - set(CMAKE_CUDA_FLAGS "--diag-suppress 108,68") + set(CMAKE_CUDA_FLAGS "--diag-suppress 20012") # Jetson Xavier NX/AGX has Volta 7.2 architecture # Perception Laptop (A4000, Quadro version of RTX 3080) has Ampere 8.6 architecture set(CMAKE_CUDA_ARCHITECTURES 72 86) @@ -123,6 +127,8 @@ add_message_files(FILES ${MROVER_MESSAGE_FILES}) add_service_files(FILES ${MROVER_SERVICE_FILES}) +add_action_files(DIRECTORY action FILES ${MROVER_ACTION_FILES}) + generate_messages(DEPENDENCIES ${MROVER_MESSAGE_DEPENDENCIES}) generate_dynamic_reconfigure_options(${MROVER_PARAMETERS}) @@ -197,9 +203,11 @@ if (CUDA_FOUND) # target_include_directories(nv_vid_codec_h265_enc SYSTEM PUBLIC deps/nvenc) endif () -mrover_add_nodelet(gst_websocket_streamer src/esw/gst_websocket_streamer/*.c* src/esw/gst_websocket_streamer src/esw/gst_websocket_streamer/pch.hpp) -mrover_nodelet_link_libraries(gst_websocket_streamer PRIVATE websocket_server ${Gst_LIBRARIES} ${GstApp_LIBRARIES}) -mrover_nodelet_include_directories(gst_websocket_streamer ${Gst_INCLUDE_DIRS} ${GstApp_INCLUDE_DIRS}) +if (Gst_FOUND AND GstApp_FOUND AND LibUsb_FOUND AND LibUdev_FOUND) + mrover_add_nodelet(gst_websocket_streamer src/esw/gst_websocket_streamer/*.c* src/esw/gst_websocket_streamer src/esw/gst_websocket_streamer/pch.hpp) + mrover_nodelet_link_libraries(gst_websocket_streamer PRIVATE websocket_server ${Gst_LIBRARIES} ${GstApp_LIBRARIES} ${LibUsb_LIBRARIES} ${LibUdev_LIBRARIES} opencv_core opencv_imgproc) + mrover_nodelet_include_directories(gst_websocket_streamer ${Gst_INCLUDE_DIRS} ${GstApp_INCLUDE_DIRS} ${LibUsb_INCLUDE_DIRS} ${LibUdev_INCLUDE_DIRS}) +endif () if (ZED_FOUND) mrover_add_nodelet(object_detector src/perception/object_detector/*.c* src/perception/object_detector src/perception/object_detector/pch.hpp) diff --git a/action/ArmAction.action b/action/ArmAction.action new file mode 100644 index 000000000..9cc4f5321 --- /dev/null +++ b/action/ArmAction.action @@ -0,0 +1,3 @@ +string name +--- +--- diff --git a/ansible/roles/build/files/profiles/ci/config.yaml b/ansible/roles/build/files/profiles/ci/config.yaml index d14216f40..c0346527b 100644 --- a/ansible/roles/build/files/profiles/ci/config.yaml +++ b/ansible/roles/build/files/profiles/ci/config.yaml @@ -5,9 +5,9 @@ catkin_make_args: [ ] cmake_args: - -DCMAKE_BUILD_TYPE=Release - -DCMAKE_C_COMPILER=clang-16 - - -DCMAKE_CXX_FLAGS=-pipe + - -DCMAKE_CXX_FLAGS=-pipe -fgnuc-version=13.1.0 - -DCMAKE_CXX_COMPILER=clang++-16 - - -DCMAKE_CUDA_HOST_COMPILER=clang++-16 + - -DCMAKE_CUDA_HOST_COMPILER=g++-9 - -DMROVER_IS_CI=ON - -Wno-dev devel_layout: linked diff --git a/ansible/roles/build/files/profiles/debug/config.yaml b/ansible/roles/build/files/profiles/debug/config.yaml index 71005790a..434ae5ae9 100644 --- a/ansible/roles/build/files/profiles/debug/config.yaml +++ b/ansible/roles/build/files/profiles/debug/config.yaml @@ -5,11 +5,11 @@ catkin_make_args: [ ] cmake_args: - -DCMAKE_BUILD_TYPE=Debug - -DCMAKE_C_COMPILER=clang-16 - - -DCMAKE_CXX_FLAGS=-pipe + - -DCMAKE_CXX_FLAGS=-pipe -fgnuc-version=13.1.0 - -DCMAKE_CXX_COMPILER=clang++-16 - -DCMAKE_CXX_COMPILER_LAUNCHER=ccache - -DCMAKE_CUDA_COMPILER_LAUNCHER=ccache - - -DCMAKE_CUDA_HOST_COMPILER=clang++-16 + - -DCMAKE_CUDA_HOST_COMPILER=g++-9 - -Wno-dev devel_layout: linked devel_space: devel diff --git a/ansible/roles/build/files/profiles/release/config.yaml b/ansible/roles/build/files/profiles/release/config.yaml index f0c29e538..ef35c5cb7 100644 --- a/ansible/roles/build/files/profiles/release/config.yaml +++ b/ansible/roles/build/files/profiles/release/config.yaml @@ -5,11 +5,11 @@ catkin_make_args: [ ] cmake_args: - -DCMAKE_BUILD_TYPE=Release - -DCMAKE_C_COMPILER=clang-16 - - -DCMAKE_CXX_FLAGS=-march=native -pipe + - -DCMAKE_CXX_FLAGS=-march=native -pipe -fgnuc-version=13.1.0 - -DCMAKE_CXX_COMPILER=clang++-16 - -DCMAKE_CXX_COMPILER_LAUNCHER=ccache - -DCMAKE_CUDA_COMPILER_LAUNCHER=ccache - - -DCMAKE_CUDA_HOST_COMPILER=clang++-16 + - -DCMAKE_CUDA_HOST_COMPILER=g++-9 - -DCMAKE_INTERPROCEDURAL_OPTIMIZATION=ON - -Wno-dev devel_layout: linked diff --git a/ansible/roles/build/tasks/main.yml b/ansible/roles/build/tasks/main.yml index 6d91a9651..c2c6e022f 100644 --- a/ansible/roles/build/tasks/main.yml +++ b/ansible/roles/build/tasks/main.yml @@ -97,6 +97,7 @@ - libgstreamer1.0-dev - libgstreamer-plugins-base1.0-dev - vainfo + - x264 - name: Install Local APT Packages become: True diff --git a/ansible/roles/esw/tasks/main.yml b/ansible/roles/esw/tasks/main.yml index f9a81d7a5..9ae41142b 100644 --- a/ansible/roles/esw/tasks/main.yml +++ b/ansible/roles/esw/tasks/main.yml @@ -22,4 +22,4 @@ - name: Install Moteus GUI pip: - name: moteus_gui + name: moteus-gui diff --git a/cmake/deps.cmake b/cmake/deps.cmake index b73637ce8..b3bb4a520 100644 --- a/cmake/deps.cmake +++ b/cmake/deps.cmake @@ -69,3 +69,5 @@ pkg_search_module(NetLink libnl-3.0 QUIET) pkg_search_module(NetLinkRoute libnl-route-3.0 QUIET) pkg_search_module(Gst gstreamer-1.0 QUIET) pkg_search_module(GstApp gstreamer-app-1.0 QUIET) +pkg_search_module(LibUsb libusb-1.0 QUIET) +pkg_search_module(LibUdev libudev QUIET) diff --git a/config/esw.yaml b/config/esw.yaml index 43a539063..14bbfb5ad 100644 --- a/config/esw.yaml +++ b/config/esw.yaml @@ -52,7 +52,7 @@ can: id: 0x15 - name: "joint_a" bus: 1 - id: 0x20 + id: 0x20 - name: "joint_b" bus: 1 id: 0x21 @@ -73,10 +73,10 @@ can: id: 0x26 - name: "mast_gimbal_y" bus: 3 - id: 0x28 + id: 0x27 - name: "mast_gimbal_z" bus: 3 - id: 0x27 + id: 0x28 - name: "sa_x" bus: 1 id: 0x29 @@ -131,7 +131,7 @@ brushless_motors: max_velocity: 20.0 max_torque: 5.0 joint_a: - # 1236.8475 rad/meter, or 196.850393476 rev/meter, and we want a max vel of 40 meter/s. + # 1236.8475 rad/meter, or 196.850393476 rev/meter, and we want a max vel of 40 meter/s. # gear ratio is currently at 0.005080 revolutions = 1 meter min_velocity: -0.05 # this means -0.10 meters per second. max_velocity: 0.05 @@ -153,10 +153,11 @@ brushless_motors: max_backward_pos: 0.0 max_torque: 20.0 joint_c: - min_velocity: -0.04 # in terms of output - max_velocity: 0.04 # max output shaft speed: 5 rpm (for now) + min_velocity: -0.03 # in terms of output + max_velocity: 0.03 # max output shaft speed: 5 rpm (for now) min_position: -0.125 max_position: 0.30 # 220 degrees of motion is the entire range + max_torque: 200.0 joint_de_0: min_velocity: -5.0 max_velocity: 5.0 @@ -603,4 +604,4 @@ auton_led_driver: baud: 115200 science: - shutoff_temp: 50.0f \ No newline at end of file + shutoff_temp: 50.0f diff --git a/config/moteus/joint_c.cfg b/config/moteus/joint_c.cfg index c475f8708..761535fc1 100644 --- a/config/moteus/joint_c.cfg +++ b/config/moteus/joint_c.cfg @@ -1363,9 +1363,9 @@ servo.adc_cur_cycles 2 servo.adc_aux_cycles 47 servo.pid_dq.kp 0.060606 servo.pid_dq.ki 102.321388 -servo.pid_position.kp 14.000000 +servo.pid_position.kp 800.000000 servo.pid_position.ki 0.000000 -servo.pid_position.kd 1.450000 +servo.pid_position.kd 50.000000 servo.pid_position.iratelimit -1.000000 servo.pid_position.ilimit 0.000000 servo.pid_position.max_desired_rate 0.000000 diff --git a/config/teleop.yaml b/config/teleop.yaml index 8c05ad09a..1b8c68952 100644 --- a/config/teleop.yaml +++ b/config/teleop.yaml @@ -6,34 +6,24 @@ teleop: joint_a: multiplier: -1 slow_mode_multiplier: 0.5 - invert: True - xbox_index: 0 joint_b: multiplier: 1 slow_mode_multiplier: 1 - invert: False - xbox_index: 1 joint_c: - multiplier: -1 + multiplier: 1 slow_mode_multiplier: 0.6 - invert: False - xbox_index: 3 joint_de_pitch: - multiplier: 0.5 + multiplier: -1 slow_mode_multiplier: 1 - invert: True joint_de_roll: - multiplier: -0.5 + multiplier: 1 slow_mode_multiplier: 1 - invert: True allen_key: multiplier: 1 slow_mode_multiplier: 1 - invert: False gripper: multiplier: 1 slow_mode_multiplier: 1 - invert: True sa_controls: sa_x: @@ -67,21 +57,32 @@ teleop: enabled: true multiplier: 1 twist: - multiplier: 0.7 + multiplier: 0.6 + tilt: + multiplier: 1 xbox_mappings: - left_js_x: 0 - left_js_y: 1 - right_js_x: 2 - right_js_y: 3 - left_trigger: 6 - right_trigger: 7 + left_x: 0 + left_y: 1 + right_x: 2 + right_y: 3 a: 0 b: 1 x: 2 y: 3 left_bumper: 4 right_bumper: 5 + left_trigger: 6 + right_trigger: 7 + back: 8 + forward: 9 + left_stick_click: 10 + right_stick_click: 11 + dpad_up: 12 + dpad_down: 13 + dpad_left: 14 + dpad_right: 15 + home: 16 joystick_mappings: left_right: 0 @@ -119,6 +120,6 @@ teleop: bar: ["/tf_static", "/rosout", "/tf"] ik_multipliers: - x: 1 - y: 1 - z: 1 \ No newline at end of file + x: 0.1 + y: 0.1 + z: 0.1 \ No newline at end of file diff --git a/launch/basestation.launch b/launch/basestation.launch index b5039bcbe..47f7dd691 100644 --- a/launch/basestation.launch +++ b/launch/basestation.launch @@ -4,14 +4,17 @@ + + + - - - + + + diff --git a/launch/esw_base.launch b/launch/esw_base.launch index ca02aaf98..a9452a46b 100644 --- a/launch/esw_base.launch +++ b/launch/esw_base.launch @@ -1,77 +1,73 @@ - + - - - - - - - + + + + + + + - + + + + + + - - - - - - + + + + + + - - - - - - + + + + + + - + + + + + + - + - - - - - + + + + + - + diff --git a/launch/simulator.launch b/launch/simulator.launch index 23230736f..6c3e96e33 100644 --- a/launch/simulator.launch +++ b/launch/simulator.launch @@ -17,6 +17,11 @@ + + + + diff --git a/scripts/debug_arm_ik.py b/scripts/debug_arm_ik.py index 4757343cc..8fb2e95e4 100755 --- a/scripts/debug_arm_ik.py +++ b/scripts/debug_arm_ik.py @@ -1,7 +1,8 @@ #!/usr/bin/env python3 import rospy -from geometry_msgs.msg import Pose, Quaternion, Point, PointStamped +from geometry_msgs.msg import Pose, PoseStamped, Quaternion, Point, PointStamped +from std_msgs.msg import Header from mrover.msg import IK if __name__ == "__main__": @@ -13,18 +14,24 @@ pub.publish( IK( - pose=Pose( - position=Point(x=0.5, y=0.5, z=0.5), - orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0), + target=PoseStamped( + header=Header(stamp=rospy.Time.now(), frame_id="base_link"), + pose=Pose( + position=Point(x=0.8, y=1.0, z=0.5), + orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0), + ), ) ) ) def on_clicked_point(clicked_point: PointStamped): ik = IK( - pose=Pose( - position=clicked_point.point, - orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0), + target=PoseStamped( + header=Header(stamp=rospy.Time.now(), frame_id="base_link"), + pose=Pose( + position=clicked_point.point, + orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0), + ), ) ) pub.publish(ik) diff --git a/setup.py b/setup.py index 48ba751d1..e5807a321 100644 --- a/setup.py +++ b/setup.py @@ -13,6 +13,7 @@ "navigation.failure_identification", "esw", "teleoperation.teleoperation", + "teleoperation.arm_controller", ], package_dir={"": "src"}, ) diff --git a/src/esw/can_driver/can_driver.cpp b/src/esw/can_driver/can_driver.cpp index 7b9aa81c2..6355105f8 100644 --- a/src/esw/can_driver/can_driver.cpp +++ b/src/esw/can_driver/can_driver.cpp @@ -69,7 +69,7 @@ namespace mrover { } } else { NODELET_WARN("No CAN devices specified or config was invalid. Did you forget to load the correct ROS parameters?"); - NODELET_WARN("For example before testing the devboard run: \"rosparam load config/esw_devboard.yml\""); + NODELET_WARN("For example before testing the devboard run: \"rosparam load config/esw.yml\""); } mCanNetLink = CanNetLink{mInterface}; @@ -201,7 +201,7 @@ namespace mrover { } catch (boost::system::system_error const& error) { // check if ran out of buffer space if (error.code() == boost::asio::error::no_buffer_space) { - NODELET_WARN_STREAM_THROTTLE(1, "No buffer space available to send CAN message"); + NODELET_WARN_STREAM_THROTTLE(1, "No buffer space available to send CAN message. This usually indicates an electrical problem with the bus. CAN will avoid sending out messages if it can not see other devices."); return; } ros::shutdown(); diff --git a/src/esw/drive_bridge/main.cpp b/src/esw/drive_bridge/main.cpp index 9e836e462..afa2092ec 100644 --- a/src/esw/drive_bridge/main.cpp +++ b/src/esw/drive_bridge/main.cpp @@ -43,7 +43,8 @@ auto main(int argc, char** argv) -> int { } void moveDrive(geometry_msgs::Twist::ConstPtr const& msg) { - // See 11.5.1 in "Controls Engineering in the FIRST Robotics Competition" for the math + // See 13.3.1.4 in "Modern Robotics" for the math + // Link: https://hades.mech.northwestern.edu/images/7/7f/MR.pdf auto forward = MetersPerSecond{msg->linear.x}; auto turn = RadiansPerSecond{msg->angular.z}; // TODO(quintin) Don't ask me to explain perfectly why we need to cancel out a meters unit in the numerator diff --git a/src/esw/gst_websocket_streamer/gst_websocket_streamer.cpp b/src/esw/gst_websocket_streamer/gst_websocket_streamer.cpp index a1430549b..1033a1cc6 100644 --- a/src/esw/gst_websocket_streamer/gst_websocket_streamer.cpp +++ b/src/esw/gst_websocket_streamer/gst_websocket_streamer.cpp @@ -1,13 +1,9 @@ #include "gst_websocket_streamer.hpp" -#if defined(MROVER_IS_JETSON) -constexpr bool IS_JETSON = true; -#else -constexpr bool IS_JETSON = false; -#endif - namespace mrover { + using namespace std::string_view_literals; + template auto gstCheck(T* t) -> T* { if (!t) throw std::runtime_error{"Failed to create"}; @@ -26,7 +22,13 @@ namespace mrover { GstBuffer* buffer = gst_sample_get_buffer(sample); GstMapInfo map; gst_buffer_map(buffer, &map, GST_MAP_READ); - mStreamServer->broadcast({reinterpret_cast(map.data), map.size}); + + // Prefix the encoded chunk with metadata for the decoder on the browser side + std::vector chunk(sizeof(ChunkHeader) + map.size); + std::memcpy(chunk.data(), &mChunkHeader, sizeof(ChunkHeader)); + std::memcpy(chunk.data() + sizeof(ChunkHeader), map.data, map.size); + + mStreamServer->broadcast(chunk); gst_buffer_unmap(buffer, &map); gst_sample_unref(sample); @@ -38,70 +40,68 @@ namespace mrover { mMainLoop = gstCheck(g_main_loop_new(nullptr, FALSE)); + // Source std::string launch; - if (mCaptureDevice.empty()) { - if constexpr (IS_JETSON) { - // ReSharper disable once CppDFAUnreachableCode - launch = std::format( - // App source is pushed to when we get a ROS BGRA image message - // is-live prevents frames from being pushed when the pipeline is in READY - "appsrc name=imageSource is-live=true " - "! video/x-raw,format=BGRA,width={},height={},framerate=30/1 " - "! videoconvert " // Convert BGRA => I420 (YUV) for the encoder, note we are still on the CPU - "! video/x-raw,format=I420 " - "! nvvidconv " // Upload to GPU memory for the encoder - "! video/x-raw(memory:NVMM),format=I420 " - "! nvv4l2h265enc name=encoder bitrate={} iframeinterval=300 vbv-size=33333 insert-sps-pps=true control-rate=constant_bitrate profile=Main num-B-Frames=0 ratecontrol-enable=true preset-level=UltraFastPreset EnableTwopassCBR=false maxperf-enable=true " - // App sink is pulled from (getting H265 chunks) on another thread and sent to the stream server - // sync=false is needed to avoid weirdness, going from playing => ready => playing will not work otherwise - "! appsink name=streamSink sync=false", - mImageWidth, mImageHeight, mBitrate); - } else { - // ReSharper disable once CppDFAUnreachableCode - launch = std::format( - "appsrc name=imageSource is-live=true " - "! video/x-raw,format=BGRA,width={},height={},framerate=30/1 " - "! nvh265enc name=encoder " - "! appsink name=streamSink sync=false", - mImageWidth, mImageHeight); - } + if (mDeviceNode.empty()) { + // App source is pushed to when we get a ROS BGRA image message + // is-live prevents frames from being pushed when the pipeline is in READY + launch += "appsrc name=imageSource is-live=true "; } else { - if constexpr (IS_JETSON) { - // ReSharper disable once CppDFAUnreachableCode - - // TODO(quintin): I had to apply this patch: https://forums.developer.nvidia.com/t/macrosilicon-usb/157777/4 - // nvv4l2camerasrc only supports UYUV by default, but our cameras are YUY2 (YUYV) - - launch = std::format( + launch += std::format("v4l2src device={} ", mDeviceNode); + } + // Source format + std::string captureFormat = mDeviceNode.empty() ? "video/x-raw,format=BGRA" : mDecodeJpegFromDevice ? "image/jpeg" + : "video/x-raw,format=YUY2"; + launch += std::format("! {},width={},height={},framerate={}/1 ", + captureFormat, mImageWidth, mImageHeight, mImageFramerate); + // Source decoder and H265 encoder + if (gst_element_factory_find("nvv4l2h265enc")) { + // Most likely on the Jetson + if (captureFormat.contains("jpeg")) { + // Mostly used with USB cameras, MPEG capture uses way less USB bandwidth + launch += + // TODO(quintin): I had to apply this patch: https://forums.developer.nvidia.com/t/macrosilicon-usb/157777/4 + // nvv4l2camerasrc only supports UYUV by default, but our cameras are YUY2 (YUYV) // "nvv4l2camerasrc device={} " - - "v4l2src device={} " - // "! video/x-raw(memory:NVMM),format=YUY2,width={},height={},framerate={}/1 " - "! image/jpeg,width={},height={},framerate={}/1 " - "! nvv4l2decoder mjpeg=1 " - - "! nvvidconv " - "! video/x-raw(memory:NVMM),format=NV12 " - "! nvv4l2h265enc name=encoder bitrate={} iframeinterval=300 vbv-size=33333 insert-sps-pps=true control-rate=constant_bitrate profile=Main num-B-Frames=0 ratecontrol-enable=true preset-level=UltraFastPreset EnableTwopassCBR=false maxperf-enable=true " - "! appsink name=streamSink sync=false", - mCaptureDevice, mImageWidth, mImageHeight, mImageFramerate, mBitrate); + + "! nvv4l2decoder mjpeg=1 " // Hardware-accelerated JPEG decoding, output is apparently some unknown proprietary format + "! nvvidconv " // Convert from proprietary format to NV12 so the encoder understands it + "! video/x-raw(memory:NVMM),format=NV12 "; } else { - // ReSharper disable once CppDFAUnreachableCode - launch = std::format( - "v4l2src device={} " - "! video/x-raw,format=YUY2,width={},height={},framerate=30/1 " - "! videoconvert " - "! nvh265enc name=encoder " - "! appsink name=streamSink sync=false", - mCaptureDevice, mImageWidth, mImageHeight); + launch += "! videoconvert " // Convert to I420 for the encoder, note we are still on the CPU + "! video/x-raw,format=I420 " + "! nvvidconv " // Upload to GPU memory for the encoder + "! video/x-raw(memory:NVMM),format=I420 "; } + launch += std::format("! nvv4l2h265enc name=encoder bitrate={} iframeinterval=300 vbv-size=33333 insert-sps-pps=true control-rate=constant_bitrate profile=Main num-B-Frames=0 ratecontrol-enable=true preset-level=UltraFastPreset EnableTwopassCBR=false maxperf-enable=true ", + mBitrate); + mChunkHeader.codec = ChunkHeader::Codec::H265; + } else if (gst_element_factory_find("nvh265enc")) { + // For desktop/laptops with the custom NVIDIA bad gstreamer plugins built (a massive pain to do!) + if (captureFormat.contains("jpeg")) + launch += "! jpegdec "; + else + launch += "! videoconvert "; + launch += "! nvh265enc name=encoder "; + mChunkHeader.codec = ChunkHeader::Codec::H265; + } else { + // For desktop/laptops with no hardware encoder + if (captureFormat.contains("jpeg")) + launch += "! jpegdec "; + else + launch += "! videoconvert "; + launch += std::format("! x264enc tune=zerolatency bitrate={} name=encoder ", mBitrate); + mChunkHeader.codec = ChunkHeader::Codec::H264; } + // App sink is pulled from (getting H265 chunks) on another thread and sent to the stream server + // sync=false is needed to avoid weirdness, going from playing => ready => playing will not work otherwise + launch += "! appsink name=streamSink sync=false"; ROS_INFO_STREAM(std::format("GStreamer launch: {}", launch)); mPipeline = gstCheck(gst_parse_launch(launch.c_str(), nullptr)); - if (mCaptureDevice.empty()) mImageSource = gstCheck(gst_bin_get_by_name(GST_BIN(mPipeline), "imageSource")); + if (mDeviceNode.empty()) mImageSource = gstCheck(gst_bin_get_by_name(GST_BIN(mPipeline), "imageSource")); mStreamSink = gstCheck(gst_bin_get_by_name(GST_BIN(mPipeline), "streamSink")); if (gst_element_set_state(mPipeline, GST_STATE_PAUSED) == GST_STATE_CHANGE_FAILURE) @@ -127,14 +127,22 @@ namespace mrover { if (mStreamServer->clientCount() == 0) return; if (msg->encoding != sensor_msgs::image_encodings::BGRA8) throw std::runtime_error{"Unsupported encoding"}; - if (msg->width != mImageWidth || msg->height != mImageHeight) throw std::runtime_error{"Unsupported resolution"}; + + cv::Size receivedSize{static_cast(msg->width), static_cast(msg->height)}; + cv::Mat bgraFrame{receivedSize, CV_8UC4, const_cast(msg->data.data()), msg->step}; + + if (cv::Size targetSize{static_cast(mImageWidth), static_cast(mImageHeight)}; + receivedSize != targetSize) { + ROS_WARN_ONCE("Image size does not match pipeline app source size, will resize"); + resize(bgraFrame, bgraFrame, targetSize); + } // "step" is the number of bytes (NOT pixels) in an image row - std::size_t size = msg->step * msg->height; + std::size_t size = bgraFrame.step * bgraFrame.rows; GstBuffer* buffer = gstCheck(gst_buffer_new_allocate(nullptr, size, nullptr)); GstMapInfo info; gst_buffer_map(buffer, &info, GST_MAP_WRITE); - std::memcpy(info.data, msg->data.data(), size); + std::memcpy(info.data, bgraFrame.data, size); gst_buffer_unmap(buffer, &info); gst_app_src_push_buffer(GST_APP_SRC(mImageSource), buffer); @@ -145,12 +153,56 @@ namespace mrover { } } + auto widthAndHeightToResolution(std::uint32_t width, std::uint32_t height) -> ChunkHeader::Resolution { + if (width == 640 && height == 480) return ChunkHeader::Resolution::EGA; + if (width == 1280 && height == 720) return ChunkHeader::Resolution::HD; + if (width == 1920 && height == 1080) return ChunkHeader::Resolution::FHD; + throw std::runtime_error{"Unsupported resolution"}; + } + + auto findDeviceNode(std::string_view devicePath) -> std::string { + udev* udevContext = udev_new(); + if (!udevContext) throw std::runtime_error{"Failed to initialize udev"}; + + udev_enumerate* enumerate = udev_enumerate_new(udevContext); + if (!enumerate) throw std::runtime_error{"Failed to create udev enumeration"}; + + udev_enumerate_add_match_subsystem(enumerate, "video4linux"); + udev_enumerate_scan_devices(enumerate); + + udev_list_entry* devices = udev_enumerate_get_list_entry(enumerate); + if (!devices) throw std::runtime_error{"Failed to get udev device list"}; + + udev_device* device{}; + udev_list_entry* entry; + udev_list_entry_foreach(entry, devices) { + device = udev_device_new_from_syspath(udevContext, udev_list_entry_get_name(entry)); + + if (udev_device_get_devpath(device) != devicePath) continue; + + if (!device) throw std::runtime_error{"Failed to get udev device"}; + + break; + } + if (!device) throw std::runtime_error{"Failed to find udev device"}; + + std::string deviceNode = udev_device_get_devnode(device); + + udev_device_unref(device); + udev_enumerate_unref(enumerate); + udev_unref(udevContext); + + return deviceNode; + } + auto GstWebsocketStreamerNodelet::onInit() -> void { try { mNh = getMTNodeHandle(); mPnh = getMTPrivateNodeHandle(); - mCaptureDevice = mPnh.param("device", ""); + mDeviceNode = mPnh.param("dev_node", ""); + mDevicePath = mPnh.param("dev_path", ""); + mDecodeJpegFromDevice = mPnh.param("decode_jpeg_from_device", true); mImageTopic = mPnh.param("image_topic", "image"); mImageWidth = mPnh.param("width", 640); mImageHeight = mPnh.param("height", 480); @@ -159,6 +211,10 @@ namespace mrover { auto port = mPnh.param("port", 8081); mBitrate = mPnh.param("bitrate", 2000000); + mChunkHeader.resolution = widthAndHeightToResolution(mImageWidth, mImageHeight); + + if (!mDevicePath.empty()) mDeviceNode = findDeviceNode(mDevicePath); + gst_init(nullptr, nullptr); initPipeline(); @@ -193,12 +249,12 @@ namespace mrover { ROS_INFO("Stopped GStreamer pipeline"); }); - if (mCaptureDevice.empty()) { + if (mDeviceNode.empty()) { mImageSubscriber = mNh.subscribe(mImageTopic, 1, &GstWebsocketStreamerNodelet::imageCallback, this); } } catch (std::exception const& e) { - ROS_ERROR_STREAM(std::format("Exception initializing NVIDIA GST H265 Encoder: {}", e.what())); + ROS_ERROR_STREAM(std::format("Exception initializing gstreamer websocket streamer: {}", e.what())); ros::requestShutdown(); } } diff --git a/src/esw/gst_websocket_streamer/gst_websocket_streamer.hpp b/src/esw/gst_websocket_streamer/gst_websocket_streamer.hpp index c4950f62c..69520febe 100644 --- a/src/esw/gst_websocket_streamer/gst_websocket_streamer.hpp +++ b/src/esw/gst_websocket_streamer/gst_websocket_streamer.hpp @@ -2,14 +2,42 @@ #include "pch.hpp" +// Uses gstreamer to encode and stream video over a websocket +// The input can be a ROS BGRA image topic or a USB device +// Hardware accelerated is used when possible (with the Jetson or NVIDIA GPUs) +// Run "export GST_DEBUG=2" to debug gstreamer issues + namespace mrover { + struct ChunkHeader { + enum struct Resolution : std::uint8_t { + EGA, // 640x480 + HD, // 1280x720 + FHD, // 1920x1080 + } resolution; + enum struct Codec : std::uint8_t { + H265, + H264, + } codec; + }; + class GstWebsocketStreamerNodelet final : public nodelet::Nodelet { ros::NodeHandle mNh, mPnh; - std::string mCaptureDevice; + // For example, /dev/video0 + // These device paths are not garunteed to stay the same between reboots + // Prefer sys path for non-debugging purposes + std::string mDeviceNode; + bool mDecodeJpegFromDevice{}; // Uses less USB hub bandwidth, which is limited since we are using 2.0 std::string mImageTopic; + // To find the sys path: + // 1) Disconnect all cameras + // 2) Confirm there are no /dev/video* devices + // 2) Connect the camera you want to use + // 3) Run "ls /dev/video*" to verify the device is connected + // 4) Run "udevadm info -q path -n /dev/video0" to get the sys path + std::string mDevicePath; std::uint64_t mBitrate{}; std::uint32_t mImageWidth{}, mImageHeight{}, mImageFramerate{}; @@ -22,6 +50,8 @@ namespace mrover { std::thread mMainLoopThread; std::thread mStreamSinkThread; + ChunkHeader mChunkHeader{}; + auto onInit() -> void override; auto pullStreamSamplesLoop() -> void; diff --git a/src/esw/gst_websocket_streamer/pch.hpp b/src/esw/gst_websocket_streamer/pch.hpp index 2c224a3ec..2769438ad 100644 --- a/src/esw/gst_websocket_streamer/pch.hpp +++ b/src/esw/gst_websocket_streamer/pch.hpp @@ -1,20 +1,26 @@ #pragma once -#include #include #include +#include #include -#include -#include #include -#include #include #include #include +#include +#include +#include #include #include #include +#include + +#include + +#include + #include diff --git a/src/preload/bits/allocator.h b/src/preload/bits/allocator.h index 815b7f850..c33fb23b3 100644 --- a/src/preload/bits/allocator.h +++ b/src/preload/bits/allocator.h @@ -6,9 +6,7 @@ // C++ should have never broke this backwards compatibility! // How silly! -#if defined(__linux__) && defined(__GNUC__) - -#if (__GNUC__ == 13) +#if defined(__linux__) && defined(__GLIBCXX__) && !defined(__CUDACC__) // Allocators -*- C++ -*- @@ -292,5 +290,3 @@ namespace std _GLIBCXX_VISIBILITY(default) { #include_next #endif - -#endif diff --git a/src/simulator/simulator.cpp b/src/simulator/simulator.cpp index 499e0acb9..0fbc4639a 100644 --- a/src/simulator/simulator.cpp +++ b/src/simulator/simulator.cpp @@ -1,4 +1,5 @@ #include "simulator.hpp" +#include namespace mrover { @@ -23,7 +24,7 @@ namespace mrover { mMotorStatusPub = mNh.advertise("/drive_status", 1); mDriveControllerStatePub = mNh.advertise("/drive_controller_data", 1); mArmControllerStatePub = mNh.advertise("/arm_controller_data", 1); - mArmJointStatePub = mNh.advertise("/arm_joint_states", 1); + mArmJointStatePub = mNh.advertise("/arm_joint_data", 1); mIkTargetPub = mNh.advertise("/arm_ik", 1); diff --git a/src/simulator/simulator.hpp b/src/simulator/simulator.hpp index 3e12b9485..ddd4f0f86 100644 --- a/src/simulator/simulator.hpp +++ b/src/simulator/simulator.hpp @@ -4,6 +4,7 @@ #include "glfw_pointer.hpp" #include "wgpu_objects.hpp" +#include using namespace std::literals; diff --git a/src/simulator/simulator.render.cpp b/src/simulator/simulator.render.cpp index ce2bf2247..9f725cc11 100644 --- a/src/simulator/simulator.render.cpp +++ b/src/simulator/simulator.render.cpp @@ -208,6 +208,19 @@ namespace mrover { mWindow = GlfwPointer{w, h, WINDOW_NAME, nullptr, nullptr}; NODELET_INFO_STREAM(std::format("Created window of size: {}x{}", w, h)); + if (cv::Mat logo = imread(std::filesystem::path{std::source_location::current().file_name()}.parent_path() / "mrover_logo.png", cv::IMREAD_UNCHANGED); + logo.type() == CV_8UC4) { + cvtColor(logo, logo, cv::COLOR_BGRA2RGBA); + GLFWimage logoImage{ + .width = logo.cols, + .height = logo.rows, + .pixels = logo.data, + }; + glfwSetWindowIcon(mWindow.get(), 1, &logoImage); + } else { + ROS_WARN_STREAM("Failed to load logo image"); + } + if (glfwRawMouseMotionSupported()) glfwSetInputMode(mWindow.get(), GLFW_RAW_MOUSE_MOTION, GLFW_TRUE); glfwSetWindowUserPointer(mWindow.get(), this); diff --git a/src/simulator/simulator.sensors.cpp b/src/simulator/simulator.sensors.cpp index caf0bbf53..a6b42f703 100644 --- a/src/simulator/simulator.sensors.cpp +++ b/src/simulator/simulator.sensors.cpp @@ -1,4 +1,5 @@ #include "simulator.hpp" +#include namespace mrover { @@ -249,11 +250,19 @@ namespace mrover { mDriveControllerStatePub.publish(driveControllerState); ControllerState armControllerState; + sensor_msgs::JointState armJointState; + std::vector zeros = {0, 0, 0, 0, 0, 0}; + armJointState.header.stamp = ros::Time::now(); for (auto& linkName: {"arm_a_link", "arm_b_link", "arm_c_link", "arm_d_link", "arm_e_link"}) { armControllerState.name.emplace_back(armMsgToUrdf.backward(linkName).value()); armControllerState.state.emplace_back("Armed"); armControllerState.error.emplace_back("None"); + armJointState.name.emplace_back(armMsgToUrdf.backward(linkName).value()); + armJointState.position.emplace_back(rover.physics->getJointPos(rover.linkNameToMeta.at(linkName).index)); + armJointState.velocity.emplace_back(rover.physics->getJointVel(rover.linkNameToMeta.at(linkName).index)); + armJointState.effort.emplace_back(rover.physics->getJointTorque(rover.linkNameToMeta.at(linkName).index)); + std::uint8_t limitSwitches = 0b000; if (auto limits = rover.model.getLink(linkName)->parent_joint->limits) { double joinPosition = rover.physics->getJointPos(rover.linkNameToMeta.at(linkName).index); @@ -264,16 +273,7 @@ namespace mrover { armControllerState.limit_hit.push_back(limitSwitches); } mArmControllerStatePub.publish(armControllerState); - - sensor_msgs::JointState jointState; - jointState.header.stamp = ros::Time::now(); - for (auto& linkName: {"arm_a_link", "arm_b_link", "arm_c_link", "arm_d_link", "arm_e_link"}) { - jointState.name.push_back(armMsgToUrdf.backward(linkName).value()); - jointState.position.push_back(rover.physics->getJointPos(rover.linkNameToMeta.at(linkName).index)); - jointState.velocity.push_back(rover.physics->getJointVel(rover.linkNameToMeta.at(linkName).index)); - jointState.effort.push_back(rover.physics->getJointTorque(rover.linkNameToMeta.at(linkName).index)); - } - mArmJointStatePub.publish(jointState); + mArmJointStatePub.publish(armJointState); } } diff --git a/src/teleoperation/arm_automation/__init__.py b/src/teleoperation/arm_automation/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/teleoperation/arm_automation/arm_automation.py b/src/teleoperation/arm_automation/arm_automation.py new file mode 100755 index 000000000..4ce34b92a --- /dev/null +++ b/src/teleoperation/arm_automation/arm_automation.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 + +import rospy + +import actionlib + +from mrover.msg import ArmActionAction, ArmActionGoal, ArmActionResult, Position + +from sensor_msgs.msg import JointState + +import numpy as np + + +def arm_automation() -> None: + rospy.init_node("arm_automation") + + server = None + joint_state = None + + pos_pub = rospy.Publisher("arm_position_cmd", Position, queue_size=1) + + def joint_state_callback(msg: JointState): + nonlocal joint_state + joint_state = msg + + rospy.Subscriber("arm_joint_data", JointState, joint_state_callback) + + def execute_callback(goal: ArmActionGoal): + if joint_state is None: + rospy.logerr("No joint state data available") + server.set_aborted(ArmActionResult()) + return + + if goal.name == "de_home": + target_names = ["joint_de_pitch", "joint_de_roll"] + target_positions = [joint_state.position[joint_state.name.index("joint_de_pitch")], np.pi / 8] + rospy.loginfo(f"Moving to {target_positions} for {target_names}") + else: + rospy.logerr("Invalid goal name") + server.set_aborted(ArmActionResult()) + return + + rate = rospy.Rate(20) + while not rospy.is_shutdown(): + if server.is_preempt_requested(): + server.set_preempted() + rospy.loginfo("Preempted") + server.set_aborted(ArmActionResult()) + return + + pos_pub.publish(Position(names=target_names, positions=target_positions)) + + feedback = [ + joint_state.position[joint_state.name.index("joint_de_pitch")], + joint_state.position[joint_state.name.index("joint_de_roll")], + ] + + if np.allclose(target_positions, feedback, atol=0.1): + rospy.loginfo("Reached target") + break + + rate.sleep() + + server.set_succeeded(ArmActionResult()) + + rospy.sleep(1) + + server = actionlib.SimpleActionServer("arm_action", ArmActionAction, execute_cb=execute_callback, auto_start=False) + server.start() + + +if __name__ == "__main__": + try: + arm_automation() + except rospy.ROSInterruptException: + pass diff --git a/src/teleoperation/arm_controller/arm_controller.cpp b/src/teleoperation/arm_controller/arm_controller.cpp index b0d7d23af..dbbe35771 100644 --- a/src/teleoperation/arm_controller/arm_controller.cpp +++ b/src/teleoperation/arm_controller/arm_controller.cpp @@ -16,19 +16,19 @@ namespace mrover { Position positions; positions.names = {"joint_a", "joint_b", "joint_c", "joint_de_pitch", "joint_de_roll"}; positions.positions.resize(positions.names.size()); - SE3d target_frame_to_arm_b_static; + SE3d targetFrameToArmBaseLink; try { - target_frame_to_arm_b_static = SE3Conversions::fromTfTree(mTfBuffer, ik_target.target.header.frame_id, "arm_base_link"); - } catch (...) { - ROS_WARN_THROTTLE(1, "Failed to resolve information about target frame"); + targetFrameToArmBaseLink = SE3Conversions::fromTfTree(mTfBuffer, ik_target.target.header.frame_id, "arm_base_link"); + } catch (tf2::TransformException const& exception) { + ROS_WARN_STREAM_THROTTLE(1, std::format("Failed to get transform from {} to arm_base_link: {}", ik_target.target.header.frame_id, exception.what())); return; } - Eigen::Vector4d target{ik_target.target.pose.position.x, ik_target.target.pose.position.y, ik_target.target.pose.position.z, 1}; - Eigen::Vector4d target_in_arm_b_static = target_frame_to_arm_b_static.transform() * target; - double x = target_in_arm_b_static.x() - END_EFFECTOR_LENGTH; // shift back by the length of the end effector - double z = target_in_arm_b_static.z(); - double y = target_in_arm_b_static.y(); - SE3Conversions::pushToTfTree(mTfBroadcaster, "arm_target", "arm_base_link", target_frame_to_arm_b_static); + SE3d endEffectorInTarget{{ik_target.target.pose.position.x, ik_target.target.pose.position.y, ik_target.target.pose.position.z}, SO3d::Identity()}; + SE3d endEffectorInArmBaseLink = targetFrameToArmBaseLink * endEffectorInTarget; + double x = endEffectorInArmBaseLink.translation().x() - END_EFFECTOR_LENGTH; // shift back by the length of the end effector + double y = endEffectorInArmBaseLink.translation().y(); + double z = endEffectorInArmBaseLink.translation().z(); + SE3Conversions::pushToTfTree(mTfBroadcaster, "arm_target", "arm_base_link", endEffectorInArmBaseLink); double gamma = 0; double x3 = x - LINK_DE * std::cos(gamma); diff --git a/src/teleoperation/arm_controller/pch.hpp b/src/teleoperation/arm_controller/pch.hpp index 2f68bad08..104f5832c 100644 --- a/src/teleoperation/arm_controller/pch.hpp +++ b/src/teleoperation/arm_controller/pch.hpp @@ -2,6 +2,7 @@ #include #include +#include #include diff --git a/src/teleoperation/backend/consumers.py b/src/teleoperation/backend/consumers.py index 346015fae..cf6b24347 100644 --- a/src/teleoperation/backend/consumers.py +++ b/src/teleoperation/backend/consumers.py @@ -3,7 +3,7 @@ import os import threading from datetime import datetime -from math import copysign, pi +from math import copysign, pi, isfinite import pytz from channels.generic.websocket import JsonWebsocketConsumer @@ -11,10 +11,14 @@ import rospy import tf2_ros from backend.models import AutonWaypoint, BasicWaypoint -from geometry_msgs.msg import Twist, Pose, Point, Quaternion, Vector3 +from geometry_msgs.msg import Twist, Pose, Point, Quaternion, Vector3, PoseStamped + +import actionlib # from cv_bridge import CvBridge from mrover.msg import ( + ArmActionAction, + ArmActionGoal, PDLB, ControllerState, GPSWaypoint, @@ -33,8 +37,8 @@ HeaterData, ) from mrover.srv import EnableAuton, AdjustMotor, ChangeCameras, CapturePanorama -from sensor_msgs.msg import NavSatFix, Temperature, RelativeHumidity -from std_msgs.msg import String +from sensor_msgs.msg import NavSatFix, Temperature, RelativeHumidity, JointState +from std_msgs.msg import String, Header from std_srvs.srv import SetBool, Trigger from tf.transformations import euler_from_quaternion from util.SE3 import SE3 @@ -98,6 +102,8 @@ def connect(self): self.arm_moteus_sub = rospy.Subscriber( "/arm_controller_data", ControllerState, self.arm_controller_callback ) + self.arm_joint_sub = rospy.Subscriber("/arm_joint_data", JointState, self.arm_joint_callback) + self.sa_joint_sub = rospy.Subscriber("/sa_joint_data", JointState, self.sa_joint_callback) self.drive_moteus_sub = rospy.Subscriber( "/drive_controller_data", ControllerState, self.drive_controller_callback ) @@ -130,27 +136,11 @@ def connect(self): self.capture_panorama_srv = rospy.ServiceProxy("capture_panorama", CapturePanorama) self.heater_auto_shutoff_srv = rospy.ServiceProxy("science_change_heater_auto_shutoff_state", SetBool) - # ROS Parameters - self.mappings = rospy.get_param("teleop/joystick_mappings") - self.drive_config = rospy.get_param("teleop/drive_controls") - self.max_wheel_speed = rospy.get_param("rover/max_speed") - self.wheel_radius = rospy.get_param("wheel/radius") - self.rover_width = rospy.get_param("rover/width") - self.max_angular_speed = self.max_wheel_speed / (self.rover_width / 2) - self.ra_config = rospy.get_param("teleop/ra_controls") - self.ik_names = rospy.get_param("teleop/ik_multipliers") - self.RA_NAMES = rospy.get_param("teleop/ra_names") - self.brushless_motors = rospy.get_param("brushless_motors/controllers") - self.brushed_motors = rospy.get_param("brushed_motors/controllers") - self.xbox_mappings = rospy.get_param("teleop/xbox_mappings") - self.sa_config = rospy.get_param("teleop/sa_controls") - self.tf_buffer = tf2_ros.Buffer() self.tf_listener = tf2_ros.TransformListener(self.tf_buffer) self.flight_thread = threading.Thread(target=self.flight_attitude_listener) self.flight_thread.start() - - except rospy.ROSException as e: + except Exception as e: rospy.logerr(e) def disconnect(self, close_code): @@ -197,8 +187,8 @@ def receive(self, text_data): self.send_auton_command(message) elif message["type"] == "teleop_enabled": self.send_teleop_enabled(message) - elif message["type"] == "auton_tfclient": - self.auton_bearing() + elif message["type"] == "bearing": + self.bearing() elif message["type"] == "mast_gimbal": self.mast_gimbal(message) elif message["type"] == "max_streams": @@ -230,16 +220,18 @@ def receive(self, text_data): except Exception as e: rospy.logerr(e) + @staticmethod def filter_xbox_axis( - self, - value: int, + value: float, deadzone_threshold: float = DEFAULT_ARM_DEADZONE, quad_control: bool = False, ) -> float: - deadzoned_val = deadzone(value, deadzone_threshold) - return quadratic(deadzoned_val) if quad_control else deadzoned_val + value = deadzone(value, deadzone_threshold) + if quad_control: + value = quadratic(value) + return value - def filter_xbox_button(self, button_array: "List[int]", pos_button: str, neg_button: str) -> float: + def filter_xbox_button(self, button_array: list[float], pos_button: str, neg_button: str) -> float: """ Applies various filtering functions to an axis for controlling the arm :return: Return -1, 0, or 1 depending on what buttons are being pressed @@ -268,158 +260,156 @@ def to_velocity(self, input: int, joint_name: str, brushless: bool = True) -> fl / 2 ) - def handle_controls_message(self, msg): - CACHE = ["cache"] - SA_NAMES = ["sa_x", "sa_y", "sa_z", "sampler", "sensor_actuator"] - RA_NAMES = self.RA_NAMES - ra_slow_mode = False - raw_left_trigger = msg["buttons"][self.xbox_mappings["left_trigger"]] - left_trigger = raw_left_trigger if raw_left_trigger > 0 else 0 - raw_right_trigger = msg["buttons"][self.xbox_mappings["right_trigger"]] - right_trigger = raw_right_trigger if raw_right_trigger > 0 else 0 - arm_pubs = [self.arm_position_cmd_pub, self.arm_velocity_cmd_pub, self.arm_throttle_cmd_pub, self.arm_ik_pub] - sa_pubs = [self.sa_position_cmd_pub, self.sa_velocity_cmd_pub, self.sa_throttle_cmd_pub] - cache_pubs = [self.cache_position_cmd_pub, self.cache_velocity_cmd_pub, self.cache_throttle_cmd_pub] - publishers = [] - controls_names = [] - if msg["type"] == "cache_values": - controls_names = CACHE - publishers = cache_pubs - elif msg["type"] == "arm_values": - controls_names = RA_NAMES - publishers = arm_pubs - elif msg["type"] == "sa_arm_values": - controls_names = SA_NAMES - publishers = sa_pubs - - if msg["arm_mode"] == "ik": - ee_in_map = SE3.from_tf_tree(self.tf_buffer, "base_link", "arm_e_link") - - left_trigger = self.filter_xbox_axis(msg["axes"][self.xbox_mappings["left_trigger"]]) - if left_trigger < 0: - left_trigger = 0 - - right_trigger = self.filter_xbox_axis(msg["axes"][self.xbox_mappings["right_trigger"]]) - if right_trigger < 0: - right_trigger = 0 - ee_in_map.position[0] += ( - self.ik_names["x"] * self.filter_xbox_axis(msg["axes"][self.xbox_mappings["left_js_x"]]), - ) - ee_in_map.position[1] += ( - self.ik_names["y"] * self.filter_xbox_axis(msg["axes"][self.xbox_mappings["left_js_y"]]), + def publish_ik(self, axes: list[float], buttons: list[float]) -> None: + ee_in_map = SE3.from_tf_tree(self.tf_buffer, "base_link", "arm_d_link") + + ee_in_map.position[0] += self.ik_names["x"] * self.filter_xbox_axis(axes[self.xbox_mappings["left_js_x"]]) + ee_in_map.position[1] += self.ik_names["y"] * self.filter_xbox_axis(axes[self.xbox_mappings["left_js_y"]]) + ee_in_map.position[2] += self.ik_names["z"] * self.filter_xbox_button(buttons, "right_trigger", "left_trigger") + + self.send( + text_data=json.dumps( + { + "type": "ik", + "target": { + "position": ee_in_map.position.tolist(), + "quaternion": ee_in_map.rotation.quaternion.tolist(), + }, + } ) - ee_in_map.position[2] -= self.ik_names["z"] * left_trigger + self.ik_names["z"] * right_trigger + ) - arm_ik_cmd = IK( - pose=Pose( - position=Point(*ee_in_map.position), - orientation=Quaternion(*ee_in_map.rotation.quaternion), + self.arm_ik_pub.publish( + IK( + target=PoseStamped( + header=Header(stamp=rospy.Time.now(), frame_id="base_link"), + pose=Pose( + position=Point(*ee_in_map.position), + orientation=Quaternion(*ee_in_map.rotation.quaternion), + ), ) ) - publishers[3].publish(arm_ik_cmd) - - elif msg["arm_mode"] == "position": - position_names = controls_names - if msg["type"] == "arm_values": - position_names = ["joint_b", "joint_c", "joint_de_pitch", "joint_de_roll"] - position_cmd = Position( - names=position_names, - positions=msg["positions"], - ) - publishers[0].publish(position_cmd) - - elif msg["arm_mode"] == "velocity": - velocity_cmd = Velocity() - velocity_cmd.names = controls_names - if msg["type"] == "cache_values": - velocity_cmd.velocities = [ - self.sa_config["cache"]["multiplier"] - * self.filter_xbox_button(msg["buttons"], "right_bumper", "left_bumper") - ] - elif msg["type"] == "sa_arm_values": - velocity_cmd.velocities = [ - self.to_velocity( - self.filter_xbox_axis(msg["axes"][self.sa_config["sa_x"]["xbox_index"]]), "sa_x", False - ), - self.to_velocity( - self.filter_xbox_axis(msg["axes"][self.sa_config["sa_y"]["xbox_index"]]), "sa_y", False - ), - self.to_velocity( - self.filter_xbox_axis(msg["axes"][self.sa_config["sa_z"]["xbox_index"]]), "sa_z", True - ), - self.sa_config["sampler"]["multiplier"] * (right_trigger - left_trigger), - self.sa_config["sensor_actuator"]["multiplier"] - * self.filter_xbox_button(msg["buttons"], "right_bumper", "left_bumper"), - ] - elif msg["type"] == "arm_values": - velocity_cmd.velocities = [ - self.to_velocity( - self.filter_xbox_axis(msg["axes"][self.ra_config["joint_a"]["xbox_index"]]), "joint_a" - ), - self.to_velocity( - self.filter_xbox_axis(msg["axes"][self.ra_config["joint_b"]["xbox_index"]]), "joint_b", False - ), - self.to_velocity( - self.filter_xbox_axis(msg["axes"][self.ra_config["joint_c"]["xbox_index"]]), "joint_c" - ), - self.to_velocity( - self.filter_xbox_axis(msg["axes"][self.ra_config["joint_de_pitch"]["xbox_index"]]), "joint_de_0" - ), - self.to_velocity( - self.filter_xbox_axis(msg["axes"][self.ra_config["joint_de_roll"]["xbox_index"]]), "joint_de_1" - ), - self.ra_config["allen_key"]["multiplier"] * self.filter_xbox_button(msg["buttons"], "y", "a"), - self.ra_config["gripper"]["multiplier"] * self.filter_xbox_button(msg["buttons"], "b", "x"), - ] - publishers[1].publish(velocity_cmd) - - elif msg["arm_mode"] == "throttle": - throttle_cmd = Throttle() - throttle_cmd.names = controls_names - if msg["type"] == "cache_values": - throttle_cmd.throttles = [ - self.sa_config["cache"]["multiplier"] - * self.filter_xbox_button(msg["buttons"], "right_bumper", "left_bumper") - ] - elif msg["type"] == "arm_values": - # d_pad_x = msg["axes"][self.xbox_mappings["d_pad_x"]] - # if d_pad_x > 0.5: - # ra_slow_mode = True - # elif d_pad_x < -0.5: - # ra_slow_mode = False - - throttle_cmd.throttles = [ - self.filter_xbox_axis(msg["axes"][self.ra_config["joint_a"]["xbox_index"]]), - self.filter_xbox_axis(msg["axes"][self.ra_config["joint_b"]["xbox_index"]]), - self.filter_xbox_axis(msg["axes"][self.ra_config["joint_c"]["xbox_index"]]), - self.filter_xbox_button(msg["buttons"], "right_trigger", "left_trigger"), - self.filter_xbox_button(msg["buttons"], "left_bumper", "right_bumper"), - self.ra_config["allen_key"]["multiplier"] * self.filter_xbox_button(msg["buttons"], "y", "a"), - self.ra_config["gripper"]["multiplier"] * self.filter_xbox_button(msg["buttons"], "x", "b"), - ] - - for i, name in enumerate(self.RA_NAMES): - if ra_slow_mode: - throttle_cmd.throttles[i] *= self.ra_config[name]["slow_mode_multiplier"] - if self.ra_config[name]["invert"]: - throttle_cmd.throttles[i] *= -1 - elif msg["type"] == "sa_arm_values": - throttle_cmd.throttles = [ - self.filter_xbox_axis(msg["axes"][self.sa_config["sa_x"]["xbox_index"]]), - self.filter_xbox_axis(msg["axes"][self.sa_config["sa_y"]["xbox_index"]]), - self.filter_xbox_axis(msg["axes"][self.sa_config["sa_z"]["xbox_index"]]), - self.sa_config["sampler"]["multiplier"] * (right_trigger - left_trigger), - self.sa_config["sensor_actuator"]["multiplier"] - * self.filter_xbox_button(msg["buttons"], "right_bumper", "left_bumper"), - ] - - fast_mode_activated = msg["buttons"][self.xbox_mappings["a"]] or msg["buttons"][self.xbox_mappings["b"]] - if not fast_mode_activated: - for i, name in enumerate(SA_NAMES): - # When going up (vel < 0) with SA joint 2, we DON'T want slow mode. - if not (name == "sa_z" and throttle_cmd.throttles[i] < 0): - throttle_cmd.throttles[i] *= self.sa_config[name]["slow_mode_multiplier"] - publishers[2].publish(throttle_cmd) + ) + + def publish_position(self, type, names, positions): + position_cmd = Position( + names=names, + positions=positions, + ) + if type == "arm_values": + self.arm_position_cmd_pub.publish(position_cmd) + elif type == "sa_arm_values": + self.sa_position_cmd_pub.publish(position_cmd) + elif type == "cache_values": + self.cache_position_cmd_pub.publish(position_cmd) + + def publish_velocity(self, type, names, axes, buttons): + left_trigger = self.filter_xbox_axis(axes[self.xbox_mappings["left_trigger"]]) + right_trigger = self.filter_xbox_axis(axes[self.xbox_mappings["right_trigger"]]) + velocity_cmd = Velocity() + velocity_cmd.names = names + if type == "cache_values": + velocity_cmd.velocities = [ + self.sa_config["cache"]["multiplier"] * self.filter_xbox_button(buttons, "right_bumper", "left_bumper") + ] + self.cache_velocity_cmd_pub.publish(velocity_cmd) + elif type == "sa_arm_values": + velocity_cmd.velocities = [ + self.to_velocity(self.filter_xbox_axis(axes[self.sa_config["sa_x"]["xbox_index"]]), "sa_x", False), + self.to_velocity(self.filter_xbox_axis(axes[self.sa_config["sa_y"]["xbox_index"]]), "sa_y", False), + self.to_velocity(self.filter_xbox_axis(axes[self.sa_config["sa_z"]["xbox_index"]]), "sa_z", True), + self.sa_config["sampler"]["multiplier"] * (right_trigger - left_trigger), + self.sa_config["sensor_actuator"]["multiplier"] + * self.filter_xbox_button(buttons, "right_bumper", "left_bumper"), + ] + self.sa_velocity_cmd_pub.publish(velocity_cmd) + elif type == "arm_values": + velocity_cmd.velocities = [ + self.to_velocity(self.filter_xbox_axis(axes[self.ra_config["joint_a"]["xbox_index"]]), "joint_a"), + self.to_velocity( + self.filter_xbox_axis(axes[self.ra_config["joint_b"]["xbox_index"]]), "joint_b", False + ), + self.to_velocity(self.filter_xbox_axis(axes[self.ra_config["joint_c"]["xbox_index"]]), "joint_c"), + self.to_velocity( + self.filter_xbox_axis(axes[self.ra_config["joint_de_pitch"]["xbox_index"]]), "joint_de_0" + ), + self.to_velocity( + self.filter_xbox_axis(axes[self.ra_config["joint_de_roll"]["xbox_index"]]), "joint_de_1" + ), + self.ra_config["allen_key"]["multiplier"] * self.filter_xbox_button(buttons, "y", "a"), + self.ra_config["gripper"]["multiplier"] * self.filter_xbox_button(buttons, "b", "x"), + ] + self.arm_velocity_cmd_pub.publish(velocity_cmd) + + def publish_throttle(self, type, names, axes, buttons): + left_trigger = self.filter_xbox_axis(buttons[self.xbox_mappings["left_trigger"]]) + right_trigger = self.filter_xbox_axis(buttons[self.xbox_mappings["right_trigger"]]) + throttle_cmd = Throttle() + throttle_cmd.names = names + if type == "cache_values": + throttle_cmd.throttles = [ + self.sa_config["cache"]["multiplier"] * self.filter_xbox_button(buttons, "right_bumper", "left_bumper") + ] + self.cache_throttle_cmd_pub.publish(throttle_cmd) + elif type == "arm_values": + throttle_cmd.throttles = [ + self.ra_config["joint_a"]["multiplier"] * self.filter_xbox_axis(axes[self.xbox_mappings["left_x"]]), + self.ra_config["joint_b"]["multiplier"] * self.filter_xbox_axis(axes[self.xbox_mappings["left_y"]]), + self.ra_config["joint_c"]["multiplier"] + * quadratic(self.filter_xbox_axis(axes[self.xbox_mappings["right_y"]])), + self.ra_config["joint_de_pitch"]["multiplier"] + * self.filter_xbox_axis( + buttons[self.xbox_mappings["right_trigger"]] - buttons[self.xbox_mappings["left_trigger"]] + ), + self.ra_config["joint_de_roll"]["multiplier"] + * self.filter_xbox_axis( + buttons[self.xbox_mappings["right_bumper"]] - buttons[self.xbox_mappings["left_bumper"]] + ), + self.ra_config["allen_key"]["multiplier"] * self.filter_xbox_button(buttons, "y", "a"), + self.ra_config["gripper"]["multiplier"] * self.filter_xbox_button(buttons, "b", "x"), + ] + self.arm_throttle_cmd_pub.publish(throttle_cmd) + elif type == "sa_arm_values": + throttle_cmd.throttles = [ + self.filter_xbox_axis(axes[self.sa_config["sa_x"]["xbox_index"]]), + self.filter_xbox_axis(axes[self.sa_config["sa_y"]["xbox_index"]]), + self.filter_xbox_axis(axes[self.sa_config["sa_z"]["xbox_index"]]), + self.sa_config["sampler"]["multiplier"] * (right_trigger - left_trigger), + self.sa_config["sensor_actuator"]["multiplier"] + * self.filter_xbox_button(buttons, "right_bumper", "left_bumper"), + ] + self.sa_throttle_cmd_pub.publish(throttle_cmd) + + def handle_controls_message(self, msg): + names = ["joint_a", "joint_b", "joint_c", "joint_de_pitch", "joint_de_roll", "allen_key", "gripper"] + if msg["type"] == "sa_arm_values": + names = ["sa_x", "sa_y", "sa_z", "sampler", "sensor_actuator"] + elif msg["type"] == "cache_values": + names = ["cache"] + + if msg["buttons"][self.xbox_mappings["home"]] > 0.5: + rospy.loginfo("Homing DE") + + client = actionlib.SimpleActionClient("arm_action", ArmActionAction) + if client.wait_for_server(timeout=rospy.Duration(1)): + goal = ArmActionGoal(name="de_home") + client.send_goal(goal) + + client.wait_for_result() + else: + rospy.logwarn("Arm action server not available") + else: + if msg["arm_mode"] == "ik": + self.publish_ik(msg["axes"], msg["buttons"]) + + elif msg["arm_mode"] == "position": + self.publish_position(type=msg["type"], names=names, positions=msg["positions"]) + + elif msg["arm_mode"] == "velocity": + self.publish_velocity(type=msg["type"], names=names, axes=msg["axes"], buttons=msg["buttons"]) + + elif msg["arm_mode"] == "throttle": + self.publish_throttle(type=msg["type"], names=names, axes=msg["axes"], buttons=msg["buttons"]) def handle_joystick_message(self, msg): # Tiny deadzone so we can safely e-stop with dampen switch @@ -446,6 +436,8 @@ def get_axes_input( # angular_from_lateral = get_axes_input("left_right", 0.4, True) angular = get_axes_input("twist", 0.03, True, self.max_angular_speed * dampen) + linear += get_axes_input("tilt", 0.5, scale=0.1) + self.twist_pub.publish( Twist( linear=Vector3(x=linear), @@ -483,6 +475,11 @@ def arm_controller_callback(self, msg): ) ) + def sa_joint_callback(self, msg): + names = msg.name + z = msg.position[names.index("sa_z")] + self.send(text_data=json.dumps({"type": "sa_z", "sa_z": z})) + def drive_controller_callback(self, msg): hits = [] for n in msg.limit_hit: @@ -657,12 +654,12 @@ def ish_thermistor_data_callback(self, msg): temps = [x.temperature for x in msg.temps] self.send(text_data=json.dumps({"type": "thermistor", "temps": temps})) - def auton_bearing(self): + def bearing(self): base_link_in_map = SE3.from_tf_tree(self.tf_buffer, "map", "base_link") self.send( text_data=json.dumps( { - "type": "auton_tfclient", + "type": "bearing", "rotation": base_link_in_map.rotation.quaternion.tolist(), } ) @@ -814,6 +811,11 @@ def flight_attitude_listener(self): rate.sleep() + def arm_joint_callback(self, msg: JointState) -> None: + # Set non-finite values to zero + msg.position = [x if isfinite(x) else 0 for x in msg.position] + self.send(text_data=json.dumps({"type": "fk", "positions": msg.position})) + def science_spectral_callback(self, msg): self.send( text_data=json.dumps({"type": "spectral_data", "site": msg.site, "data": msg.data, "error": msg.error}) diff --git a/src/teleoperation/frontend/index.html b/src/teleoperation/frontend/index.html index a88854489..e3f02c525 100644 --- a/src/teleoperation/frontend/index.html +++ b/src/teleoperation/frontend/index.html @@ -4,7 +4,7 @@ - Vite App + MRover
diff --git a/src/teleoperation/frontend/package.json b/src/teleoperation/frontend/package.json index 8638e8b32..76d04eba6 100644 --- a/src/teleoperation/frontend/package.json +++ b/src/teleoperation/frontend/package.json @@ -20,6 +20,7 @@ "html2canvas": "^1.4.1", "leaflet": "^1.9.4", "quaternion-to-euler": "^0.5.0", + "three": "^0.163.0", "vue": "^3.3.4", "vue-flight-indicators": "^0.1.1", "vue-router": "^4.2.4", diff --git a/src/teleoperation/frontend/public/meshes/arm_a.fbx b/src/teleoperation/frontend/public/meshes/arm_a.fbx new file mode 100644 index 000000000..8f7c71b41 --- /dev/null +++ b/src/teleoperation/frontend/public/meshes/arm_a.fbx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d72481ca801642e73fea847317225514516d10667d777c0287021a90c77befba +size 869948 diff --git a/src/teleoperation/frontend/public/meshes/arm_b.fbx b/src/teleoperation/frontend/public/meshes/arm_b.fbx new file mode 100644 index 000000000..e9360724a --- /dev/null +++ b/src/teleoperation/frontend/public/meshes/arm_b.fbx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0578a7fe0557e7bc86e3949cc748275386e5a472d564367a00f722f0060d47c1 +size 1127196 diff --git a/src/teleoperation/frontend/public/meshes/arm_c.fbx b/src/teleoperation/frontend/public/meshes/arm_c.fbx new file mode 100644 index 000000000..5fbded9a7 --- /dev/null +++ b/src/teleoperation/frontend/public/meshes/arm_c.fbx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3bad4fb02574c6faf02cac83665d7a48972640301d0ff3089d1a7200f9b81ff +size 883884 diff --git a/src/teleoperation/frontend/public/meshes/arm_d.fbx b/src/teleoperation/frontend/public/meshes/arm_d.fbx new file mode 100644 index 000000000..be192a925 --- /dev/null +++ b/src/teleoperation/frontend/public/meshes/arm_d.fbx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d270eeab807b552603478d239cbe1d5dc0cc374c11cfd8aff8d49b78340df9d3 +size 316844 diff --git a/src/teleoperation/frontend/public/meshes/arm_e.fbx b/src/teleoperation/frontend/public/meshes/arm_e.fbx new file mode 100644 index 000000000..5630b17d9 --- /dev/null +++ b/src/teleoperation/frontend/public/meshes/arm_e.fbx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b42cb29f991ff0dd2752e613388cef21e9fa5030b50bb023d70f8f9f891a0bea +size 560812 diff --git a/src/teleoperation/frontend/public/meshes/bottle.fbx b/src/teleoperation/frontend/public/meshes/bottle.fbx new file mode 100644 index 000000000..25d7a8eed --- /dev/null +++ b/src/teleoperation/frontend/public/meshes/bottle.fbx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df9dfe054c07005d820d7c81906136bd1bf621ebf4e4dcf3fbfb52f7e0fb4716 +size 17596 diff --git a/src/teleoperation/frontend/public/meshes/ground.fbx b/src/teleoperation/frontend/public/meshes/ground.fbx new file mode 100644 index 000000000..05b93f260 --- /dev/null +++ b/src/teleoperation/frontend/public/meshes/ground.fbx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8323c262d0e44f0f1d128a3c68752ef95b70a9b4602de2ff06f43cef106d4b31 +size 43772 diff --git a/src/teleoperation/frontend/public/meshes/hammer.fbx b/src/teleoperation/frontend/public/meshes/hammer.fbx new file mode 100644 index 000000000..9f8b6b9e4 --- /dev/null +++ b/src/teleoperation/frontend/public/meshes/hammer.fbx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b5e36777f78ac597bcd49076a0cf2b674d886c315b50b5d1ff75c7bcea3123a +size 16620 diff --git a/src/teleoperation/frontend/public/meshes/primitives/cube.fbx b/src/teleoperation/frontend/public/meshes/primitives/cube.fbx new file mode 100644 index 000000000..9326aa8ca --- /dev/null +++ b/src/teleoperation/frontend/public/meshes/primitives/cube.fbx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fc4c443d8ae8ca4cca8d98e6aa37a811db6fc23ce4eb169abc3314625a5f27a +size 11756 diff --git a/src/teleoperation/frontend/public/meshes/primitives/cylinder.fbx b/src/teleoperation/frontend/public/meshes/primitives/cylinder.fbx new file mode 100644 index 000000000..02941d306 --- /dev/null +++ b/src/teleoperation/frontend/public/meshes/primitives/cylinder.fbx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4678acc34ec93779fa178d326f321b8cc67976c7e8df754c88fea29b34ee0239 +size 12460 diff --git a/src/teleoperation/frontend/public/meshes/primitives/sphere.fbx b/src/teleoperation/frontend/public/meshes/primitives/sphere.fbx new file mode 100644 index 000000000..07b9cab65 --- /dev/null +++ b/src/teleoperation/frontend/public/meshes/primitives/sphere.fbx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5eb2df5136ad610d75ba102a17a9bee8fbb0cb253a45cc6e8bad1c527559c25 +size 13948 diff --git a/src/teleoperation/frontend/public/meshes/rock.fbx b/src/teleoperation/frontend/public/meshes/rock.fbx new file mode 100644 index 000000000..0bd101bbe --- /dev/null +++ b/src/teleoperation/frontend/public/meshes/rock.fbx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8098184699873dfc775269d78e3a2d069344d89b69fe67d38d06a3dba6f92c4d +size 18940 diff --git a/src/teleoperation/frontend/public/meshes/rover_chassis.fbx b/src/teleoperation/frontend/public/meshes/rover_chassis.fbx new file mode 100644 index 000000000..9e4fe6a3d --- /dev/null +++ b/src/teleoperation/frontend/public/meshes/rover_chassis.fbx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:497810bf42e67701d823becb224787bd04121c788a59303bfd6d485d1268fa01 +size 1966956 diff --git a/src/teleoperation/frontend/public/meshes/rover_left_bogie.fbx b/src/teleoperation/frontend/public/meshes/rover_left_bogie.fbx new file mode 100644 index 000000000..4d3a9e550 --- /dev/null +++ b/src/teleoperation/frontend/public/meshes/rover_left_bogie.fbx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad295bd7c739fd5b8e4d05f02db3494db37253b4315c59b60e06942fb93e3a8f +size 1569308 diff --git a/src/teleoperation/frontend/public/meshes/rover_left_rocker.fbx b/src/teleoperation/frontend/public/meshes/rover_left_rocker.fbx new file mode 100644 index 000000000..dcfad7ae4 --- /dev/null +++ b/src/teleoperation/frontend/public/meshes/rover_left_rocker.fbx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70a8a1a16c7223473ea44751b1a42d2f5e2cbfe41e312cdf1172ccfa0f8c8bcc +size 1062556 diff --git a/src/teleoperation/frontend/public/meshes/rover_left_wheel.fbx b/src/teleoperation/frontend/public/meshes/rover_left_wheel.fbx new file mode 100644 index 000000000..ab6089917 --- /dev/null +++ b/src/teleoperation/frontend/public/meshes/rover_left_wheel.fbx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13129b734d971654de759255f9a6e65c2571bab60ad668a2c325f1a92b377612 +size 556396 diff --git a/src/teleoperation/frontend/public/meshes/rover_right_bogie.fbx b/src/teleoperation/frontend/public/meshes/rover_right_bogie.fbx new file mode 100644 index 000000000..93b721e5a --- /dev/null +++ b/src/teleoperation/frontend/public/meshes/rover_right_bogie.fbx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:607783822c09899857fe0e556d6f8241130f3fbae96d12356ba4beb8f79bad06 +size 1567468 diff --git a/src/teleoperation/frontend/public/meshes/rover_right_rocker.fbx b/src/teleoperation/frontend/public/meshes/rover_right_rocker.fbx new file mode 100644 index 000000000..73f8b8f0e --- /dev/null +++ b/src/teleoperation/frontend/public/meshes/rover_right_rocker.fbx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f90f0f99210eea2963f2ac4a257ff8f908f6906b82dc4bd57fa61f590786da24 +size 1062780 diff --git a/src/teleoperation/frontend/public/meshes/rover_right_wheel.fbx b/src/teleoperation/frontend/public/meshes/rover_right_wheel.fbx new file mode 100644 index 000000000..bb5022926 --- /dev/null +++ b/src/teleoperation/frontend/public/meshes/rover_right_wheel.fbx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:201aaec237d10542c6a97ee8afd7b7bca7c01b6463952872e22a3369b31fa3cb +size 550204 diff --git a/src/teleoperation/frontend/src/components/ArmControls.vue b/src/teleoperation/frontend/src/components/ArmControls.vue index 5a0bf132f..f9520a8b4 100644 --- a/src/teleoperation/frontend/src/components/ArmControls.vue +++ b/src/teleoperation/frontend/src/components/ArmControls.vue @@ -86,6 +86,7 @@ ]" /> + @@ -96,6 +97,7 @@ import ToggleButton from './ToggleButton.vue' import CalibrationCheckbox from './CalibrationCheckbox.vue' import MotorAdjust from './MotorAdjust.vue' import LimitSwitch from './LimitSwitch.vue' +import Rover3D from './Rover3D.vue' // In seconds const updateRate = 0.05 @@ -106,7 +108,8 @@ export default defineComponent({ ToggleButton, CalibrationCheckbox, MotorAdjust, - LimitSwitch + LimitSwitch, + Rover3D }, data() { return { @@ -137,6 +140,7 @@ export default defineComponent({ } }, positions: [], + send_positions: false // Only send after submit is clicked for the first time } }, @@ -152,11 +156,17 @@ export default defineComponent({ alert('Toggling Arm Laser failed.') } } + }, + arm_mode(newMode) { + if (newMode !== 'position') { + this.positions = [] + this.send_positions = false + } } }, - mounted: function() { - document.addEventListener('keydown', this.keyDown); + mounted: function () { + document.addEventListener('keydown', this.keyDown) }, beforeUnmount: function () { @@ -166,22 +176,26 @@ export default defineComponent({ created: function () { interval = window.setInterval(() => { - const gamepads = navigator.getGamepads() - for (let i = 0; i < 4; i++) { - const gamepad = gamepads[i] - if (gamepad) { - // Microsoft and Xbox for old Xbox 360 controllers - // X-Box for new PowerA Xbox One controllers - if ( - gamepad.id.includes('Microsoft') || - gamepad.id.includes('Xbox') || - gamepad.id.includes('X-Box') - ) { - let buttons = gamepad.buttons.map((button) => { - return button.value - }) + if (this.send_positions) { + this.publishJoystickMessage([], [], this.arm_mode, this.positions) + } else if (this.arm_mode !== "position") { + const gamepads = navigator.getGamepads() + for (let i = 0; i < 4; i++) { + const gamepad = gamepads[i] + if (gamepad) { + // Microsoft and Xbox for old Xbox 360 controllers + // X-Box for new PowerA Xbox One controllers + if ( + gamepad.id.includes('Microsoft') || + gamepad.id.includes('Xbox') || + gamepad.id.includes('X-Box') + ) { + let buttons = gamepad.buttons.map((button) => { + return button.value + }) - this.publishJoystickMessage(gamepad.axes, buttons, this.arm_mode, this.positions) + this.publishJoystickMessage(gamepad.axes, buttons, this.arm_mode, []) + } } } } @@ -221,13 +235,15 @@ export default defineComponent({ this.positions = Object.values(this.temp_positions).map( (obj) => (Number(obj.value) * Math.PI) / 180 ) + this.send_positions = true + this.publishJoystickMessage([], [], this.arm_mode, this.positions) }, - keyDown: function(event: { key: string }) { + keyDown: function (event: { key: string }) { if (event.key == ' ') { - this.arm_mode = 'arm_disabled'; + this.arm_mode = 'arm_disabled' } - }, + } } }) diff --git a/src/teleoperation/frontend/src/components/AutonTask.vue b/src/teleoperation/frontend/src/components/AutonTask.vue index 35018caa2..bf06605f8 100644 --- a/src/teleoperation/frontend/src/components/AutonTask.vue +++ b/src/teleoperation/frontend/src/components/AutonTask.vue @@ -20,11 +20,11 @@

Joystick Values

-
- -
+
+ +
@@ -156,7 +156,7 @@ export default defineComponent({ this.odom.latitude_deg = msg.latitude this.odom.longitude_deg = msg.longitude this.odom.altitude = msg.altitude - } else if (msg.type == 'auton_tfclient') { + } else if (msg.type == 'bearing') { this.odom.bearing_deg = quaternionToMapAngle(msg.rotation) } else if (msg.type == "center_map") { this.odom.latitude_deg = msg.latitude @@ -179,7 +179,7 @@ export default defineComponent({ this.sendMessage({ "type": "center_map" }); }, 250) interval = setInterval(() => { - this.sendMessage({ type: 'auton_tfclient' }) + this.sendMessage({ type: 'bearing' }) }, 1000) }, @@ -190,15 +190,15 @@ export default defineComponent({ .wrapper { display: grid; grid-gap: 10px; - grid-template-columns: 60% 40%; + grid-template-columns: auto 30% 30%; grid-template-rows: repeat(6, auto); grid-template-areas: - 'header header' - 'map waypoints' - 'data waypoints' - 'data conditions' - 'moteus moteus' - 'cameras cameras'; + 'header header header' + 'feed map waypoints' + 'data data waypoints' + 'data data conditions' + 'moteus moteus moteus' + 'cameras cameras cameras'; font-family: sans-serif; height: auto; @@ -308,4 +308,8 @@ h2 { .data { grid-area: data; } + +.feed { + grid-area: feed; +} diff --git a/src/teleoperation/frontend/src/components/CameraFeed.vue b/src/teleoperation/frontend/src/components/CameraFeed.vue index efbce30af..8b96eefb8 100644 --- a/src/teleoperation/frontend/src/components/CameraFeed.vue +++ b/src/teleoperation/frontend/src/components/CameraFeed.vue @@ -4,26 +4,26 @@

{{ name }} • ID: {{ id }}

- - - - -
- + > + + +
+ diff --git a/src/teleoperation/frontend/src/components/Rover3D.vue b/src/teleoperation/frontend/src/components/Rover3D.vue new file mode 100644 index 000000000..3c53d78ea --- /dev/null +++ b/src/teleoperation/frontend/src/components/Rover3D.vue @@ -0,0 +1,57 @@ + + + + + \ No newline at end of file diff --git a/src/teleoperation/frontend/src/components/SAArmControls.vue b/src/teleoperation/frontend/src/components/SAArmControls.vue index d624b8036..5047bd2db 100644 --- a/src/teleoperation/frontend/src/components/SAArmControls.vue +++ b/src/teleoperation/frontend/src/components/SAArmControls.vue @@ -45,6 +45,8 @@
+ +

SA Z Position: {{ z_position }} inches

{ + return button.value + }) + + this.publishJoystickMessage(gamepad.axes, buttons, this.arm_mode, []) + } } } } @@ -146,6 +155,15 @@ export default defineComponent({ alert('Toggling Arm Laser failed.') } } + else if (msg.type == 'sa_z') { + this.z_position = msg.sa_z * metersToInches + } + }, + arm_mode(newMode) { + if (newMode !== 'position') { + this.positions = [] + this.send_positions = false + } } }, @@ -178,6 +196,12 @@ export default defineComponent({ this.positions = Object.values(this.temp_positions).map( (obj) => (Number(obj.value) * Math.PI) / 180 ) + this.send_positions = true + this.publishJoystickMessage([], [], this.arm_mode, this.positions) + }, + + zero: function() { + this.sendMessage({type: "arm_adjust", name: "sa_z", value: 0}) } } }) diff --git a/src/teleoperation/frontend/src/components/SATask.vue b/src/teleoperation/frontend/src/components/SATask.vue index 45af1e6fa..4382b7c56 100644 --- a/src/teleoperation/frontend/src/components/SATask.vue +++ b/src/teleoperation/frontend/src/components/SATask.vue @@ -78,13 +78,6 @@ :topic_name="'sa_calibrate_sensor_actuator'" />
-
@@ -113,7 +106,9 @@ import MotorAdjust from './MotorAdjust.vue' import OdometryReading from './OdometryReading.vue' import SAArmControls from './SAArmControls.vue' import { disableAutonLED, quaternionToMapAngle } from '../utils.js' -import { mapState } from 'vuex' +import { mapState, mapActions } from 'vuex' + +let interval: number export default { components: { @@ -182,10 +177,26 @@ export default { this.moteusState.error = msg.error this.moteusState.limit_hit = msg.limit_hit } + else if (msg.type == 'nav_sat_fix') { + this.odom.latitude_deg = msg.latitude + this.odom.longitude_deg = msg.longitude + this.odom.altitude = msg.altitude + } + else if (msg.type == 'bearing') { + this.odom.bearing_deg = quaternionToMapAngle(msg.rotation) + } } }, - created: function () {} + methods: { + ...mapActions('websocket', ['sendMessage']), + }, + + created: function () { + interval = setInterval(() => { + this.sendMessage({ type: 'bearing' }) + }, 1000) + } } diff --git a/src/teleoperation/frontend/src/router/index.ts b/src/teleoperation/frontend/src/router/index.ts index f34e022f1..c8681c3d0 100644 --- a/src/teleoperation/frontend/src/router/index.ts +++ b/src/teleoperation/frontend/src/router/index.ts @@ -4,6 +4,7 @@ import DMESTask from '../components/DMESTask.vue' import AutonTask from '../components/AutonTask.vue' import ISHTask from '../components/ISHTask.vue' import SATask from '../components/SATask.vue' +import Rover3D from '../components/Rover3D.vue' const routes = [ { @@ -41,7 +42,13 @@ const routes = [ path: '/ISHTask', name: 'ISHTask', component: ISHTask - } + }, + { + path: "/Control", + name: "Control", + component: Rover3D, + }, + ] const router = createRouter({ history: createWebHistory(), diff --git a/src/teleoperation/frontend/src/rover_three.js b/src/teleoperation/frontend/src/rover_three.js new file mode 100644 index 000000000..ac83a7d02 --- /dev/null +++ b/src/teleoperation/frontend/src/rover_three.js @@ -0,0 +1,162 @@ +import * as THREE from 'three'; +import { FBXLoader } from 'three/examples/jsm/loaders/FBXLoader' +import { TrackballControls } from 'three/addons/controls/TrackballControls.js'; + +export function threeSetup(containerId) { + // Get the container where the scene should be placed + const container = document.getElementById(containerId); + + const canvas = { + width: container.clientWidth, + height: container.clientHeight + } + + const scene = new THREE.Scene(); + const camera = new THREE.PerspectiveCamera(75, canvas.width / canvas.height, 0.1, 1000); + camera.up.set(0, 0, 1); + + const directionalLight = new THREE.DirectionalLight(0xffffff, 0.5); + directionalLight.position.set(1, 2, 3); + scene.add(directionalLight); + + const renderer = new THREE.WebGLRenderer(); + renderer.setSize(canvas.width, canvas.height); + container.appendChild(renderer.domElement); // append it to your specific HTML element + + scene.background = new THREE.Color(0xcccccc); + + //controls to orbit around model + var controls = new TrackballControls(camera, renderer.domElement); + controls.rotateSpeed = 1.0; + controls.zoomSpeed = 1.2; + controls.panSpeed = 0.8; + controls.noZoom = false; + controls.noPan = false; + controls.staticMoving = true; + controls.dynamicDampingFactor = 0.3; + + const joints = [ + { + name: "chassis", + file: "rover_chassis.fbx", + translation: [0.164882, -0.200235, 0.051497], + rotation: [0, 0, 0], + }, + { + name: "a", + file: "arm_a.fbx", + translation: [0.034346, 0, 0.049024], + rotation: [0, 0, 0], + }, + { + name: "b", + file: "arm_b.fbx", + translation: [0.534365, 0, 0.009056], + rotation: [0, 0, 0], + }, + { + name: "c", + file: "arm_c.fbx", + translation: [0.546033, 0, 0.088594], + rotation: [0, 0, 0], + }, + { + name: "d", + file: "arm_d.fbx", + translation: [0.044886, 0, 0], + rotation: [0, 0, 0], + }, + { + name: "e", + file: "arm_e.fbx", + translation: [0.044886, 0, 0], + rotation: [0, 0, 0], + } + ] + + const loader = new FBXLoader(); + for (let i = 0; i < joints.length; ++i) { + loader.load('/meshes/' + joints[i].file, (fbx) => { + // Traverse the loaded scene to find meshes or objects + fbx.traverse((child)=> { + if (child.isMesh) { + if(joints[i].name == "d" || joints[i].name == "e") { + child.material = new THREE.MeshStandardMaterial({color: 0x00ff00}); + } + else child.material = new THREE.MeshStandardMaterial({color: 0xffffff}); + scene.add(child); + child.scale.set(1, 1, 1); + child.position.set(0, 0, 0); + } + }); + }, + (xhr) => { + console.log((xhr.loaded / xhr.total) * 100 + '% loaded') + }, + (error) => { + console.log(error) + }); + } + + camera.position.x = 2; + camera.position.y = 2; + camera.position.z = 2; + camera.lookAt(new THREE.Vector3(0, 0, 0)); + + const targetCube = new THREE.Mesh( + new THREE.BoxGeometry( 0.1, 0.1, 0.1 ), + new THREE.MeshBasicMaterial({color: 0x00ff00}) + ); + scene.add( targetCube ); + + function animate() { + requestAnimationFrame(animate); + controls.update(); + renderer.render(scene, camera); + } + + animate(); + + function fk(positions) { + + let cumulativeMatrix = new THREE.Matrix4(); + + cumulativeMatrix.makeTranslation(new THREE.Vector3(0, 0, 0.439675)); //makes meshes relative to base_link + + for(let i = 0; i < joints.length; ++i){ + let mesh = scene.getObjectByName(joints[i].name); + + let localMatrix = new THREE.Matrix4(); + + let rotationAngle = positions[i]; // Angle for this joint + if(joints[i].name == "chassis") { + localMatrix.makeTranslation(0,rotationAngle,0); + } + else { + localMatrix.makeRotationY(rotationAngle); + } + + let offset = new THREE.Vector3().fromArray(joints[i].translation); + + localMatrix.setPosition( + new THREE.Vector3().setFromMatrixPosition(localMatrix).add(offset) + ); + + mesh.matrixAutoUpdate = false; + mesh.matrix = cumulativeMatrix.clone(); + + cumulativeMatrix.multiply(localMatrix); + } + } + + function ik(target) { + let quaternion = new THREE.Quaternion(...target.quaternion); + targetCube.position.set(...target.position); + targetCube.setRotationFromQuaternion(quaternion); + } + + // Return an object with the updateJointAngles() function + return { + fk, ik + } +}; diff --git a/urdf/rover/rover.urdf.xacro b/urdf/rover/rover.urdf.xacro index 3aedbbc29..f79b800bb 100644 --- a/urdf/rover/rover.urdf.xacro +++ b/urdf/rover/rover.urdf.xacro @@ -204,7 +204,7 @@ - + From 81c3de8e4a0760f15585ad3259971861e60ab951 Mon Sep 17 00:00:00 2001 From: qhdwight Date: Sat, 27 Apr 2024 20:58:02 -0400 Subject: [PATCH 126/126] Format --- src/localization/gps_linearization.py | 1 + src/navigation/context.py | 6 ++++-- src/navigation/long_range.py | 2 +- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/localization/gps_linearization.py b/src/localization/gps_linearization.py index 84aac80a4..390037622 100755 --- a/src/localization/gps_linearization.py +++ b/src/localization/gps_linearization.py @@ -16,6 +16,7 @@ QUEUE_SIZE = 10 SLOP = 0.5 + class GPSLinearization: """ This node subscribes to GPS and IMU data, linearizes the GPS data diff --git a/src/navigation/context.py b/src/navigation/context.py index 68ca8ab20..3face5797 100644 --- a/src/navigation/context.py +++ b/src/navigation/context.py @@ -168,7 +168,7 @@ def push_frame(self, tags: List[LongRangeTag]) -> None: time_difference = rospy.get_time() - cur_tag.time if time_difference > LONG_RANGE_TAG_EXPIRATION_TIME_SECONDS: del self.__data[cur_tag.tag.id] - # if we do see one of our tags in the new message, increment its hit count + # if we do see one of our tags in the new message, increment its hit count else: cur_tag.hit_count += INCREMENT_WEIGHT if cur_tag.hit_count > self.max_hits: @@ -195,7 +195,8 @@ def get(self, tag_id: int) -> Optional[LongRangeTag]: time_difference = rospy.get_time() - self.__data[tag_id].time if ( # self.__data[tag_id].hit_count >= self.min_hits and - time_difference <= LONG_RANGE_TAG_EXPIRATION_TIME_SECONDS + time_difference + <= LONG_RANGE_TAG_EXPIRATION_TIME_SECONDS ): return self.__data[tag_id].tag else: @@ -271,6 +272,7 @@ def get_approach_target_state(self) -> Optional[State]: if self.ctx.env.current_target_pos() is not None: return approach_post.ApproachPostState() # if we see the tag in the long range camera, go to LongRangeState + assert current_waypoint is not None if self.ctx.env.long_range_tags.get(current_waypoint.tag_id) is not None: return long_range.LongRangeState() elif self.look_for_object(): diff --git a/src/navigation/long_range.py b/src/navigation/long_range.py index 1924c5284..669acde75 100644 --- a/src/navigation/long_range.py +++ b/src/navigation/long_range.py @@ -42,7 +42,7 @@ def get_target_pos(self, context) -> Optional[np.ndarray]: rover_direction = pose.rotation.direction_vector() bearing_to_tag = np.radians(tag.bearing) - # If you have not seen the tag in a while but are waiting until the expiration time is up, + # If you have not seen the tag in a while but are waiting until the expiration time is up, # keep going towards where it was last seen (the direction you are heading), don't use an old bearing value if context.env.long_range_tags._LongRangeTagStore__data[tag_id].hit_count <= 0: bearing_to_tag = 0