diff --git a/CMakeLists.txt b/CMakeLists.txt index 587d063..1c5783a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -20,13 +20,18 @@ find_package(std_msgs REQUIRED) find_package(OpenCV 4.5.4 REQUIRED) find_package(PCL REQUIRED) find_package(pcl_conversions REQUIRED) +find_package(tf2_geometry_msgs REQUIRED) include_directories(include) set(LIB_NAME "${PROJECT_NAME}_component") add_library(${LIB_NAME} SHARED - src/valve_detection.cpp + src/valve_detector.cpp + src/depth_image_processing.cpp + src/pointcloud_processing.cpp + src/angle_detector.cpp + src/valve_pose_ros.cpp ) target_link_libraries(${LIB_NAME} PUBLIC @@ -49,13 +54,15 @@ ament_target_dependencies(${LIB_NAME} PUBLIC vision_msgs geometry_msgs std_msgs - PCL pcl_conversions + tf2 + tf2_ros + tf2_geometry_msgs ) rclcpp_components_register_node( ${LIB_NAME} - PLUGIN "ValveDetectionNode" + PLUGIN "valve_detection::ValvePoseNode" EXECUTABLE ${PROJECT_NAME}_node ) diff --git a/README.md b/README.md index 1cb4209..7b0612c 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,99 @@ -# valve-detection +# Valve Detection [![Industrial CI](https://github.com/vortexntnu/valve-detection/actions/workflows/industrial-ci.yml/badge.svg)](https://github.com/vortexntnu/valve-detection/actions/workflows/industrial-ci.yml) [![pre-commit.ci status](https://results.pre-commit.ci/badge/github/vortexntnu/valve-detection/main.svg)](https://results.pre-commit.ci/latest/github/vortexntnu/valve-detection/main) [![codecov](https://codecov.io/github/vortexntnu/valve-detection/graph/badge.svg?token=6csTwaozlh)](https://codecov.io/github/vortexntnu/valve-detection) + +--- + +### Overview + +The **Valve Detection Node** estimates the **3D pose and orientation** of industrial valves from: + +* A **depth image** or **point cloud**, +* An optional **color image**, and +* **2D detections** (e.g., YOLO bounding boxes). + +It fits a **plane** to the valve annulus, computes the **3D intersection** of the valve center ray with that plane, and estimates the **rotation** of the valve handle using image-based line detection. + +It can output: + +* A `geometry_msgs::msg::PoseArray` of all valve poses, +* Debug point clouds (annulus points and segmented planes), +* Annotated color images showing detections, plane fits, and axes, +* Optional angle detection debug images. + +--- + + + + + +## Launching the Node + + +```bash +ros2 launch valve_detection valve_detection.launch.py +``` + +## How it works + +### 1. Input Synchronization + +The node synchronizes: + +* **Depth (or point cloud)** +* **Color image** (optional) +* **2D detections** + +It supports multiple input modes via `use_depth_image` and `use_color_image`. + +### 2. Annulus Extraction + +For each bounding box, a ring-shaped region (annulus) around the center is extracted from the depth image or point cloud. + +### 3. Plane Segmentation + +RANSAC is used to fit a plane through the annulus points. + +### 4. Ray–Plane Intersection + +The center ray from the camera through the bounding box center is intersected with the plane to find the valve’s 3D position. + +### 5. Orientation Estimation + +* The **plane normal** defines one orientation axis. +* The **valve handle angle** (from image) defines the in-plane rotation. +* Intrinsics are used to back-project that angle into 3D. +* The result is a full 3×3 rotation matrix and quaternion. + +### 6. Pose Publishing + +A `PoseArray` message is published with one `Pose` per detected valve. If orientation computation failed no pose is published. + +--- + +## Visualization Outputs + +| Topic | Description | | +| ------------------------ | ------------------------------------------- | ---------------------------------------- | +| `/valve_detection_image` | Color image with bounding boxes and 3D axes | | +| `/valve_angle_image` | Debug overlay showing detected Hough lines | | +| `/bbx_annulus_pcl` | Ring-shaped depth points used for plane fit | +| `/annulus_plane_pcl` | Segmented plane points | + +--- + + +## Common Issues + +| Issue | Possible Cause | +|----------------------------|-----------------------------------------------------------------------------| +| No poses published | Missing plane segmentation (adjust `plane_ransac_threshold` or annulus size) | +| Angle NaN / no handle lines| Tune Hough and Canny thresholds | + + +--- + +## Future work +* Use actual endpoints of line for backprojection to retrieve the perspective-correct plane angle. Now we just use rotation around optical axis. +TODO: For OBB, estimate line segment from BB size. diff --git a/config/valve_detection_params.yaml b/config/valve_detection_params.yaml index 717572d..f59c6ad 100644 --- a/config/valve_detection_params.yaml +++ b/config/valve_detection_params.yaml @@ -1,5 +1,40 @@ /**: ros__parameters: depth_image_sub_topic: "/zed_node/depth/depth_registered" - color_image_sub_topic: "/image_rect" + color_image_sub_topic: "/zed_node/left/image_rect_color" + pcl_sub_topic: "/zed_node/depth/point_cloud" detections_sub_topic: "/detections_output" + + color_image_info_topic: "/zed_node/depth/camera_info" + + valve_poses_pub_topic: "/valve_poses" + + yolo_img_width: 640 + yolo_img_height: 640 + + annulus_radius_ratio: 0.8 + + plane_ransac_threshold: 0.01 + plane_ransac_max_iterations: 20 + + valve_handle_offset: 0.05 + + use_depth_image: true # if false, use pcl and tf2 to transform pointcloud to color frame + + visualize_detections: true + processed_image_pub_topic: "/valve_detection_image" + debug_visualize: true + annulus_pub_topic: "/bbx_annulus_pcl" + plane_pub_topic: "/annulus_plane_pcl" + angle_detection_image_pub_topic: "/valve_angle_image" + + calculate_angle: true + angle.detection_area_ratio: 0.22 # percentage of bb size + angle.canny_low_threshold: 40 + angle.canny_high_threshold: 80 + angle.canny_aperture_size: 3 # must be odd + angle.hough_rho_res: 1.0 + angle.hough_theta_res: 0.0174533 # 1 degree in radians + angle.hough_threshold: 20 + angle.hough_min_line_length: 45.0 + angle.hough_max_line_gap: 5.0 diff --git a/include/valve_detection/angle_detector.hpp b/include/valve_detection/angle_detector.hpp new file mode 100644 index 0000000..e565ade --- /dev/null +++ b/include/valve_detection/angle_detector.hpp @@ -0,0 +1,72 @@ +#ifndef VALVE_POSE_ANGLE_HPP +#define VALVE_POSE_ANGLE_HPP + +#include +#include +#include +#include "valve_detection/types.hpp" + +namespace valve_detection { + +class AngleDetector { + public: + /** + * @brief Constructs an AngleDetector with a set of parameters. + * @param params The parameters for the detector. + */ + explicit AngleDetector(const AngleDetectorParams& params) + : params_(params) {} + + /** + * @brief Computes the angle for each bounding box in a vector. + * + * This function processes each bounding box to define a region of interest + * (ROI) in the input image, applies Canny edge detection and Hough line + * transform, and then calculates the dominant line angle within each box's + * ROI, and updates the 'theta' member of each box in-place. + * + * @param color_image The input image to be processed. + * @param boxes A vector of BoundingBox objects passed by reference. The + * 'theta' member of each box will be updated. + */ + void compute_angles(const cv::Mat& color_image, + std::vector& boxes) const; + + /** + * @brief Processes a vector of bounding boxes to calculate the angle for + * each, updating the boxes in-place and drawing all visualizations on a + * single image. + * + * @param image_to_draw_on The input/output image. This cv::Mat will be + * modified to include visualizations for all processed boxes. + * @param boxes A vector of BoundingBox objects. The theta member of each + * box will be updated with the calculated angle (or NaN if no line is + * found). + */ + void compute_angles_debug(cv::Mat& image_to_draw_on, + std::vector& boxes) const; + + private: + /** + * @brief Finds the longest line among a set of lines detected in an image. + * @param lines A vector of lines, each represented as cv::Vec4i (x1, y1, + * x2, y2). + * @return The cv::Vec4i representing the longest line. + * If the input vector is empty, returns a zero-length line {0, 0, 0, 0}. + */ + cv::Vec4i find_longest_line(const std::vector& lines) const; + + /** + * @brief Applies Canny edge detection to a grayscale image. + * @param gray_image The input grayscale image. + * @return A binary image (CV_8UC1) with edges detected. + */ + cv::Mat apply_canny_edge_detection(const cv::Mat& gray_image) const; + + /** @brief Parameters for the angle detector. */ + AngleDetectorParams params_; +}; + +} // namespace valve_detection + +#endif // VALVE_POSE_ANGLE_HPP diff --git a/include/valve_detection/depth_image_processing.hpp b/include/valve_detection/depth_image_processing.hpp new file mode 100644 index 0000000..2ddea69 --- /dev/null +++ b/include/valve_detection/depth_image_processing.hpp @@ -0,0 +1,61 @@ +#ifndef VALVE_POSE_DEPTH_HPP +#define VALVE_POSE_DEPTH_HPP + +#include "valve_detection/types.hpp" + +#include +#include +#include +#include +#include +#include +#include + +namespace valve_detection { + +/** + * @brief Projects a 2D pixel with depth into 3D camera coordinates. + * + * @param u The x-coordinate of the pixel in the image. + * @param v The y-coordinate of the pixel in the image. + * @param depth The depth value at the pixel (in meters or the same units as + * intrinsics). + * @param fx Focal length along x-axis. + * @param fy Focal length along y-axis. + * @param cx Principal point x-coordinate. + * @param cy Principal point y-coordinate. + * @param[out] point The resulting 3D point in camera coordinates. + */ +void project_pixel_to_point(int u, + int v, + float depth, + double fx, + double fy, + double cx, + double cy, + pcl::PointXYZ& point); + +/** + * @brief Extracts a 3D point cloud representing the annulus (valve rim) from a + * depth image. + * + * The function selects pixels within the annulus region of a bounding box and + * projects them into 3D points using the camera intrinsics. + * + * @param depth_image The input depth image (CV_32FC1 or similar). + * @param bbox The bounding box around the valve in the image. + * @param image_properties Camera intrinsics and image dimensions. + * @param annulus_radius_ratio Fraction of the bounding box considered as the + * annulus (0.0–1.0). + * @param[out] cloud The resulting point cloud containing the 3D points of the + * annulus. + */ +void extract_annulus_pcl(const cv::Mat& depth_image, + const BoundingBox& bbox, + const ImageProperties& image_properties, + float annulus_radius_ratio, + pcl::PointCloud::Ptr& cloud); + +} // namespace valve_detection + +#endif // VALVE_POSE_DEPTH_HPP diff --git a/include/valve_detection/pointcloud_processing.hpp b/include/valve_detection/pointcloud_processing.hpp new file mode 100644 index 0000000..588496f --- /dev/null +++ b/include/valve_detection/pointcloud_processing.hpp @@ -0,0 +1,39 @@ +#ifndef VALVE_POSE_PCL_HPP +#define VALVE_POSE_PCL_HPP + +#include "valve_detection/types.hpp" + +#include +#include +#include +#include + +namespace valve_detection { + +/** + * @brief Extracts a 3D point cloud representing the annulus (valve rim) from an + * input point cloud. + * + * This function selects points that correspond to the annulus region of a + * bounding box in image space, based on the camera intrinsics and the specified + * radius ratio. + * + * @param input_cloud The input point cloud (typically from a depth camera or + * LIDAR). + * @param bbox The bounding box around the valve in image coordinates. + * @param image_properties Camera intrinsics and image dimensions for + * projection. + * @param annulus_radius_ratio Fraction of the bounding box considered as the + * annulus (0.0–1.0). + * @param[out] cloud The resulting point cloud containing the 3D points of the + * annulus. + */ +void extract_annulus_pcl(const pcl::PointCloud::Ptr& input_cloud, + const BoundingBox& bbox, + const ImageProperties& image_properties, + float annulus_radius_ratio, + pcl::PointCloud::Ptr& cloud); + +} // namespace valve_detection + +#endif // VALVE_POSE_PCL_HPP diff --git a/include/valve_detection/types.hpp b/include/valve_detection/types.hpp new file mode 100644 index 0000000..248452b --- /dev/null +++ b/include/valve_detection/types.hpp @@ -0,0 +1,85 @@ +#ifndef TYPES_HPP +#define TYPES_HPP + +#include + +namespace valve_detection { + +/** + * @struct CameraIntrinsics + * @brief Stores intrinsic parameters of a pinhole camera model. + */ +struct CameraIntrinsics { + double fx; ///< Focal length in pixels along x-axis + double fy; ///< Focal length in pixels along y-axis + double cx; ///< Principal point x-coordinate (in pixels) + double cy; ///< Principal point y-coordinate (in pixels) +}; + +/** + * @struct ImageDimensions + * @brief Represents the width and height of an image. + */ +struct ImageDimensions { + int width; ///< Image width in pixels + int height; ///< Image height in pixels +}; + +/** + * @struct ImageProperties + * @brief Combines camera intrinsics and image dimensions. + */ +struct ImageProperties { + CameraIntrinsics intr; ///< Camera intrinsic parameters + ImageDimensions dim; ///< Image width and height +}; + +/** + * @struct BoundingBox + * @brief Represents a 2D bounding box with center, size, and orientation. + */ +struct BoundingBox { + float center_x; ///< X-coordinate of the bounding box center + float center_y; ///< Y-coordinate of the bounding box center + float size_x; ///< Width of the bounding box + float size_y; ///< Height of the bounding box + float theta; ///< Orientation in radians (positive counter-clockwise) +}; + +/** + * @struct Pose + * @brief Represents a 3D pose with position and orientation. + */ +struct Pose { + Eigen::Vector3f position; ///< 3D position (x, y, z) + Eigen::Quaternionf orientation; ///< Orientation as a quaternion +}; + +/** + * @struct AngleDetectorParams + * @brief Parameters for Canny edge detection and Hough line transform used in + * angle detection. + */ +struct AngleDetectorParams { + float line_detection_area; ///< Fraction of the bounding box to use as ROI + ///< (0.0–1.0) + + // Canny edge detection parameters + int canny_low_threshold; ///< Low threshold for Canny edge detector + int canny_high_threshold; ///< High threshold for Canny edge detector + int canny_aperture_size; ///< Aperture size for the Sobel operator in Canny + + // Hough line transform parameters + double hough_rho_res; ///< Distance resolution in pixels + double hough_theta_res; ///< Angle resolution in radians + int hough_threshold; ///< Minimum number of votes (intersections) to detect + ///< a line + double + hough_min_line_length; ///< Minimum line length to accept (in pixels) + double hough_max_line_gap; ///< Maximum allowed gap between line segments + ///< to treat them as a single line +}; + +} // namespace valve_detection + +#endif // TYPES_HPP diff --git a/include/valve_detection/valve_detection.hpp b/include/valve_detection/valve_detection.hpp deleted file mode 100644 index 5329ce1..0000000 --- a/include/valve_detection/valve_detection.hpp +++ /dev/null @@ -1,126 +0,0 @@ -#ifndef DETECTION_IMAGE_PROCESSOR_HPP -#define DETECTION_IMAGE_PROCESSOR_HPP - -#include -#include -#include -#include -#include -#include -#include - -#include -#include "cv_bridge/cv_bridge.h" -#include "geometry_msgs/msg/pose_stamped.hpp" -#include "geometry_msgs/msg/vector3.hpp" -#include "rclcpp/rclcpp.hpp" -#include "sensor_msgs/msg/camera_info.hpp" -#include "sensor_msgs/msg/image.hpp" -#include "sensor_msgs/msg/point_cloud2.hpp" -#include "std_msgs/msg/header.hpp" -#include "vision_msgs/msg/detection2_d_array.hpp" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include -#include -#include - -class ValveDetectionNode : public rclcpp::Node { - public: - explicit ValveDetectionNode(const rclcpp::NodeOptions& options); - ~ValveDetectionNode() {}; - - private: - message_filters::Subscriber depth_image_sub_; - message_filters::Subscriber color_image_sub_; - message_filters::Subscriber - detections_sub_; - - typedef message_filters::sync_policies::ExactTime< - sensor_msgs::msg::Image, - sensor_msgs::msg::Image, - vision_msgs::msg::Detection2DArray> - MySyncPolicy; - - std::shared_ptr> sync_; - - // Subscriptions - rclcpp::Subscription::SharedPtr - camera_info_subscription_; - rclcpp::Subscription::SharedPtr - camera_info_subscription_yolo_color_; - rclcpp::Subscription::SharedPtr - detections_subscription_; - rclcpp::Subscription::SharedPtr - image_subscription_; - - // Publishers - rclcpp::Publisher::SharedPtr processed_image_pub_; - rclcpp::Publisher::SharedPtr plane_normal_pub_; - rclcpp::Publisher::SharedPtr pointcloud_pub_; - rclcpp::Publisher::SharedPtr - line_pose_pub_; - rclcpp::Publisher::SharedPtr - line_points_pub_; - rclcpp::Publisher::SharedPtr - near_plane_cloud_pub_; - - // Stored messages - sensor_msgs::msg::CameraInfo::SharedPtr camera_info_; - bool camera_info_received_ = false; - sensor_msgs::msg::CameraInfo::SharedPtr camera_info_yolo_color_; - bool camera_info_received_yolo_color_ = false; - // vision_msgs::msg::Detection2DArray::SharedPtr latest_detections_; - // sensor_msgs::msg::Image::SharedPtr latest_image_; - - // Mutex for thread safety - std::mutex mutex_; - - // Used for line fitting direction filtering - static Eigen::Vector3f filter_direction_; - - // Scalars for image resizing - double height_scalar_; - double width_scalar_; - - void synchronized_callback( - const sensor_msgs::msg::Image::ConstSharedPtr& depth_image, - const sensor_msgs::msg::Image::ConstSharedPtr& color_image, - const vision_msgs::msg::Detection2DArray::ConstSharedPtr& detections); - // Callback functions - void camera_info_callback( - const sensor_msgs::msg::CameraInfo::SharedPtr camera_info_msg); - void camera_info_callback_yolo_colored( - const sensor_msgs::msg::CameraInfo::SharedPtr camera_info_msg); - - // Utility functions - void compute_height_width_scalars(); - // void process_and_publish_image(const - // sensor_msgs::msg::Image::ConstSharedPtr & depth_image, - // const sensor_msgs::msg::Image::ConstSharedPtr - // & color_image, const - // vision_msgs::msg::Detection2DArray::ConstSharedPtr - // & detections); - void process_and_publish_image( - const sensor_msgs::msg::Image::ConstSharedPtr& image, - const vision_msgs::msg::Detection2DArray::ConstSharedPtr& detections); - - void project_pixel_to_3d(int u, int v, float depth, pcl::PointXYZ& point); - void project_3d_to_pixel(float x, float y, float z, int& u, int& v); -}; - -#endif // DETECTION_IMAGE_PROCESSOR_HPP diff --git a/include/valve_detection/valve_detector.hpp b/include/valve_detection/valve_detector.hpp new file mode 100644 index 0000000..b60259e --- /dev/null +++ b/include/valve_detection/valve_detector.hpp @@ -0,0 +1,279 @@ +#ifndef VALVE_POSE_BASE_HPP +#define VALVE_POSE_BASE_HPP + +#include +#include +#include +#include +#include +#include +#include +#include "valve_detection/depth_image_processing.hpp" +#include "valve_detection/pointcloud_processing.hpp" +#include "valve_detection/types.hpp" + +namespace valve_detection { + +/** + * @class ValveDetector + * @brief Detects valve poses from color and depth images or point clouds. + * + * This class extracts valve annulus point clouds, segments planes, + * computes ray-plane intersections, estimates valve poses, and can calculate + * the valve rotation angle using the AngleDetector. + */ +class ValveDetector { + public: + /** + * @brief Constructs a ValveDetector with YOLO image dimensions and + * detection parameters. + * @param yolo_img_width Width of the YOLO input image. + * @param yolo_img_height Height of the YOLO input image. + * @param annulus_radius_ratio Ratio of bounding box used to define the + * annulus. + * @param plane_ransac_threshold Distance threshold for plane segmentation. + * @param plane_ransac_max_iterations Max iterations for RANSAC plane + * segmentation. + * @param valve_handle_offset Distance to shift the valve handle along the + * plane normal. + */ + ValveDetector(int yolo_img_width, + int yolo_img_height_, + float annulus_radius_ratio, + float plane_ransac_threshold, + int plane_ransac_max_iterations, + float valve_handle_offset); + + virtual ~ValveDetector() = default; + + /** + * @brief Computes valve poses from depth input and bounding boxes. + * + * This template function supports depth input as either a PCL point cloud + * or an OpenCV depth image. It extracts the annulus points, segments + * planes, computes ray-plane intersections, shifts points along normals, + * and optionally calculates valve rotation angles. + * + * @tparam T Depth input type (cv::Mat or + * pcl::PointCloud::Ptr). + * @param depth_input The input depth data. + * @param color_image Corresponding color image for angle calculation. + * @param boxes Bounding boxes of detected valves. + * @param poses Output vector of computed valve poses. + * @param all_annulus_cloud Optional point cloud for visualizing all annuli. + * @param all_annulus_plane_cloud Optional point cloud for visualizing + * segmented planes. + * @param debug_visualize Whether to populate visualization point clouds. + * @param calculate_angle Whether to compute the valve rotation angle. + */ + template + void Compute_valve_poses( + const T& depth_input, + const std::vector& boxes, + std::vector& poses, + pcl::PointCloud::Ptr& all_annulus_cloud, + pcl::PointCloud::Ptr& all_annulus_plane_cloud, + bool debug_visualize) { + if (debug_visualize) { + all_annulus_cloud->clear(); + all_annulus_plane_cloud->clear(); + } + + for (const auto& box : boxes) { + pcl::PointCloud::Ptr annulus_cloud( + new pcl::PointCloud); + + if constexpr (std::is_same< + T, pcl::PointCloud::Ptr>::value) { + extract_annulus_pcl(depth_input, box, color_image_properties_, + annulus_radius_ratio_, annulus_cloud); + } else if constexpr (std::is_same::value) { + extract_annulus_pcl(depth_input, box, color_image_properties_, + annulus_radius_ratio_, annulus_cloud); + } + + if (annulus_cloud->empty()) + continue; + + if (debug_visualize) { + *all_annulus_cloud += *annulus_cloud; + } + + pcl::ModelCoefficients::Ptr coefficients( + new pcl::ModelCoefficients); + pcl::PointIndices::Ptr inliers(new pcl::PointIndices); + + if (!segment_plane(annulus_cloud, coefficients, inliers)) + continue; + + if (debug_visualize) { + pcl::PointCloud::Ptr annulus_plane_cloud( + new pcl::PointCloud); + pcl::copyPointCloud(*annulus_cloud, inliers->indices, + *annulus_plane_cloud); + *all_annulus_plane_cloud += *annulus_plane_cloud; + } + + Eigen::Vector3f bb_centre_ray = get_ray_direction(box); + Eigen::Vector3f ray_plane_intersection = + find_ray_plane_intersection(coefficients, bb_centre_ray); + if (ray_plane_intersection.isZero()) + continue; + + Eigen::Vector3f plane_normal = + compute_plane_normal(coefficients, bb_centre_ray); + if (plane_normal.isZero()) + continue; + + Eigen::Vector3f shifted_position = + shift_point_along_normal(ray_plane_intersection, plane_normal); + + float angle = box.theta; + + Eigen::Matrix3f rotation_matrix = + create_rotation_matrix(coefficients, plane_normal, angle); + Eigen::Quaternionf rotation_quat; + rmat_to_quat(rotation_matrix, rotation_quat); + + Pose pose; + pose.position = shifted_position; + pose.orientation = rotation_quat; + poses.emplace_back(pose); + } + } + + /** + * @brief Projects a 3D point to pixel coordinates using the camera + * intrinsics. + * @param x X coordinate in 3D space. + * @param y Y coordinate in 3D space. + * @param z Z coordinate in 3D space. + * @param[out] u Output pixel x-coordinate. + * @param[out] v Output pixel y-coordinate. + */ + void project_point_to_pixel(float x, + float y, + float z, + int& u, + int& v) const; + + /** + * @brief Calculates the padding and scaling for letterbox resizing. + */ + void calculate_letterbox_padding(); + + /** + * @brief Transforms bounding box coordinates from letterboxed YOLO input to + * original image coordinates. + * @param bbox Bounding box in YOLO image coordinates. + * @return Bounding box transformed to original image coordinates. + */ + BoundingBox transform_bounding_box(const BoundingBox& bbox) const; + + /** + * @brief Sets the color image properties (intrinsics and dimensions). + */ + void set_color_image_properties(const ImageProperties& properties) { + color_image_properties_ = properties; + } + + /** + * @brief Segments the dominant plane from a point cloud using RANSAC. + * @param cloud Input point cloud. + * @param coefficients Output model coefficients (Ax + By + Cz + D = 0). + * @param inliers Output inlier indices of the plane. + * @return True if a plane with inliers was found, false otherwise. + */ + bool segment_plane(const pcl::PointCloud::Ptr& cloud, + pcl::ModelCoefficients::Ptr& coefficients, + pcl::PointIndices::Ptr& inliers) const; + + /** + * @brief Computes the ray direction vector through the bounding box center + * in camera frame. + * @param bbox Bounding box for the valve. + * @return Normalized Eigen::Vector3f representing the ray direction. + */ + Eigen::Vector3f get_ray_direction(const BoundingBox& bbox) const; + + /** + * @brief Computes the plane normal from plane coefficients, ensuring it + * points against the ray. + * @param coefficients Plane coefficients (Ax + By + Cz + D = 0). + * @param ray_direction Ray direction vector from camera. + * @return Normalized Eigen::Vector3f plane normal, or zero if invalid. + */ + Eigen::Vector3f compute_plane_normal( + const pcl::ModelCoefficients::Ptr& coefficients, + const Eigen::Vector3f& ray_direction) const; + + /** + * @brief Finds the intersection point of a ray and a plane. + * @param coefficients Plane coefficients. + * @param ray_direction Normalized ray direction. + * @return Intersection point in 3D space, or zero vector if no + * intersection. + */ + Eigen::Vector3f find_ray_plane_intersection( + const pcl::ModelCoefficients::Ptr& coefficients, + const Eigen::Vector3f& ray_direction) const; + + /** + * @brief Shifts a point along a plane normal by the valve handle offset. + * @param intersection_point Point on the plane. + * @param plane_normal Normalized plane normal vector. + * @return Shifted 3D point representing valve handle position. + */ + Eigen::Vector3f shift_point_along_normal( + const Eigen::Vector3f& intersection_point, + const Eigen::Vector3f& plane_normal) const; + + /** + * @brief Creates a rotation matrix with the plane normal as z-axis and + * valve angle applied. + * @param coefficients Plane coefficients. + * @param plane_normal Normalized plane normal vector. + * @param angle Valve rotation angle in radians. + * @return Rotation matrix (Eigen::Matrix3f) for the valve pose. + */ + Eigen::Matrix3f create_rotation_matrix( + const pcl::ModelCoefficients::Ptr& coefficients, + const Eigen::Vector3f& plane_normal, + float angle); + + /** + * @brief Converts a 3x3 rotation matrix to a normalized quaternion. + * @param rotation_matrix Input rotation matrix. + * @param quat Output quaternion. + */ + void rmat_to_quat(const Eigen::Matrix3f& rotation_matrix, + Eigen::Quaternionf& quat) const; + + /** + * @brief Draws bounding boxes and 3D axes for valve poses on an image. + * @param image Input color image. + * @param boxes Detected valve bounding boxes. + * @param poses Computed valve poses. + * @return Image with drawn detections. + */ + cv::Mat draw_detections(const cv::Mat& image, + const std::vector& boxes, + const std::vector& poses) const; + + protected: + ImageProperties color_image_properties_; + int yolo_img_width_; + int yolo_img_height_; + float annulus_radius_ratio_; + float plane_ransac_threshold_; + int plane_ransac_max_iterations_; + float valve_handle_offset_; + float letterbox_scale_factor_; + int letterbox_pad_x_; + int letterbox_pad_y_; + Eigen::Vector3f filter_direction_ = Eigen::Vector3f(1, 0, 0); +}; + +} // namespace valve_detection + +#endif // VALVE_POSE_BASE_HPP diff --git a/include/valve_detection_ros/valve_pose_ros.hpp b/include/valve_detection_ros/valve_pose_ros.hpp new file mode 100644 index 0000000..26dee16 --- /dev/null +++ b/include/valve_detection_ros/valve_pose_ros.hpp @@ -0,0 +1,330 @@ +#ifndef VALVE_POSE_DEPTH_ROS_HPP +#define VALVE_POSE_DEPTH_ROS_HPP + +#include +#include +#include +#include +#include +#include +#include +#include +#include "valve_detection/angle_detector.hpp" +#include "valve_detection/valve_detector.hpp" + +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace valve_detection { + +/** + * @class ValvePoseNode + * @brief ROS 2 node for computing valve poses from depth images or point clouds + * and 2D detections. + * + * This node subscribes to depth images or point clouds, color images + * (optional), and 2D detection messages. It synchronizes the inputs, computes + * valve poses using ValveDetector, and publishes results as PoseArray and + * optionally as visualized point clouds or images. + */ +class ValvePoseNode : public rclcpp::Node { + public: + /** + * @brief Constructs the ValvePoseNode with ROS node options. + * @param options Node options. + */ + explicit ValvePoseNode(const rclcpp::NodeOptions& options); + + virtual ~ValvePoseNode() = default; + + private: + /** + * @brief Callback for camera info subscription to obtain intrinsics and + * image size. + * @param camera_info_msg Shared pointer to the CameraInfo message. + */ + void color_image_info_callback( + const sensor_msgs::msg::CameraInfo::SharedPtr camera_info_msg); + + /** + * @brief Initializes the ValveDetector with the specified ros parameters. + */ + void init_valve_detector(); + + /** + * @brief Initializes the angle detector inside the ValveDetector if angle + * calculation is enabled. + */ + void init_angle_detector(); + + /** + * @brief Converts a vision_msgs::BoundingBox2D to local BoundingBox type. + * @param bbox The input 2D bounding box message. + * @return Converted BoundingBox. + */ + BoundingBox to_bounding_box( + const vision_msgs::msg::BoundingBox2D& bbox) const; + + /** + * @brief Publishes a point cloud containing annulus points. + * @param cloud The point cloud to publish. + * @param header ROS message header to use for publication. + */ + void publish_annulus_pcl(const pcl::PointCloud::Ptr& cloud, + const std_msgs::msg::Header& header) const; + + /** + * @brief Publishes a point cloud containing segmented plane points. + * @param cloud The point cloud to publish. + * @param header ROS message header to use for publication. + */ + void publish_annulus_plane(const pcl::PointCloud::Ptr& cloud, + const std_msgs::msg::Header& header) const; + + /** + * @brief Publishes computed valve poses as a PoseArray message. + * @param poses Vector of computed valve poses. + * @param header ROS message header to use for publication. + */ + void publish_valve_poses(const std::vector& poses, + const std_msgs::msg::Header& header) const; + + /** + * @brief Configures message_filters synchronizers based on available + * inputs. + */ + void setup_sync(); + + /** + * @brief Callback for Depth Image + Color Image + Detections + * synchronization. + */ + void di_ci_d_callback( + const sensor_msgs::msg::Image::ConstSharedPtr& depth_image, + const sensor_msgs::msg::Image::ConstSharedPtr& color_image, + const vision_msgs::msg::Detection2DArray::ConstSharedPtr& detections); + + /** + * @brief Callback for Depth Image + Detections synchronization (no color + * image). + */ + void di_d_callback( + const sensor_msgs::msg::Image::ConstSharedPtr& depth_image, + const vision_msgs::msg::Detection2DArray::ConstSharedPtr& detections); + + /** + * @brief Callback for Point Cloud + Color Image + Detections + * synchronization. + */ + void pc_ci_d_callback( + const sensor_msgs::msg::PointCloud2::ConstSharedPtr& pcl, + const sensor_msgs::msg::Image::ConstSharedPtr& color_image, + const vision_msgs::msg::Detection2DArray::ConstSharedPtr& detections); + + /** + * @brief Callback for Point Cloud + Detections synchronization (no color + * image). + */ + void pc_d_callback( + const sensor_msgs::msg::PointCloud2::ConstSharedPtr& pcl, + const vision_msgs::msg::Detection2DArray::ConstSharedPtr& detections); + + /** + * @brief Generic template callback for synchronized inputs. + * + * Supports T = sensor_msgs::msg::Image (depth image) or PointCloud2. + * Handles optional color image and converts messages to OpenCV/PCL formats. + * + * @tparam T Depth input message type. + * @param depth_msg Depth image or point cloud message. + * @param color_image_msg Optional color image message. + * @param detections 2D detection message. + */ + template + void synchronized_callback( + const std::shared_ptr& depth_msg, + const sensor_msgs::msg::Image::ConstSharedPtr& color_image_msg, + const vision_msgs::msg::Detection2DArray::ConstSharedPtr& detections) { + if (!color_image_info_received_) { + return; + } + if (detections->detections.empty() && !visualize_detections_) { + return; + } + + cv::Mat cv_depth_image; + sensor_msgs::msg::PointCloud2 pcl_tf; + cv::Mat cv_color_image; + + if constexpr (std::is_same::value) { + std::string source_frame = depth_msg->header.frame_id; + std::string target_frame = color_image_frame_id_; + + try { + geometry_msgs::msg::TransformStamped transform = + tf_buffer_->lookupTransform( + target_frame, source_frame, + rclcpp::Time(depth_msg->header.stamp)); + + tf2::doTransform(*depth_msg, pcl_tf, transform); + } catch (tf2::TransformException& ex) { + RCLCPP_ERROR(this->get_logger(), + "Failed to transform point cloud: %s", ex.what()); + return; + } + pcl::PointCloud::Ptr cloud( + new pcl::PointCloud); + pcl::fromROSMsg(pcl_tf, *cloud); + + } else if constexpr (std::is_same::value) { + cv_bridge::CvImageConstPtr cv_depth_ptr = + cv_bridge::toCvShare(depth_msg, "32FC1"); + cv_depth_image = cv_depth_ptr->image; + } else { + RCLCPP_ERROR(this->get_logger(), + "Unsupported message type in synchronized_callback."); + return; + } + + if (use_color_image_) { + if (!color_image_msg) { + RCLCPP_ERROR(this->get_logger(), + "Color image message is null."); + return; + } + cv_bridge::CvImageConstPtr cv_ptr = + cv_bridge::toCvShare(color_image_msg, "bgr8"); + + cv_color_image = cv_ptr->image; + } + + std::vector boxes; + boxes.reserve(detections->detections.size()); + for (const auto& detection : detections->detections) { + BoundingBox box = to_bounding_box(detection.bbox); + BoundingBox org_image_box = + valve_detector_->transform_bounding_box(box); + boxes.push_back(org_image_box); + } + + std::vector poses; + poses.reserve(boxes.size()); + + pcl::PointCloud::Ptr all_annulus_cloud( + new pcl::PointCloud); + pcl::PointCloud::Ptr all_annulus_plane_cloud( + new pcl::PointCloud); + + if (calculate_angle_ && debug_visualize_) { + cv::Mat angle_debug_image = cv_color_image.clone(); + + angle_detector_->compute_angles_debug(angle_debug_image, boxes); + sensor_msgs::msg::Image::SharedPtr output_msg = + cv_bridge::CvImage(depth_msg->header, "bgr8", angle_debug_image) + .toImageMsg(); + angle_image_pub_->publish(*output_msg); + } else if (calculate_angle_) { + angle_detector_->compute_angles(cv_color_image, boxes); + } + + if constexpr (std::is_same::value) { + valve_detector_->Compute_valve_poses( + pcl_tf, boxes, poses, all_annulus_cloud, + all_annulus_plane_cloud, debug_visualize_); + } else if constexpr (std::is_same::value) { + valve_detector_->Compute_valve_poses( + cv_depth_image, boxes, poses, all_annulus_cloud, + all_annulus_plane_cloud, debug_visualize_); + } + if (debug_visualize_ && all_annulus_cloud && all_annulus_plane_cloud) { + publish_annulus_pcl(all_annulus_cloud, depth_msg->header); + publish_annulus_plane(all_annulus_plane_cloud, depth_msg->header); + } + if (visualize_detections_) { + cv::Mat visualized_image = + valve_detector_->draw_detections(cv_color_image, boxes, poses); + sensor_msgs::msg::Image::SharedPtr output_msg = + cv_bridge::CvImage(depth_msg->header, "bgr8", visualized_image) + .toImageMsg(); + processed_image_pub_->publish(*output_msg); + } + publish_valve_poses(poses, depth_msg->header); + } + + rclcpp::Subscription::SharedPtr + color_image_info_sub_; + message_filters::Subscriber color_image_sub_; + message_filters::Subscriber pcl_sub_; + message_filters::Subscriber depth_image_sub_; + message_filters::Subscriber + detections_sub_; + + rclcpp::Publisher::SharedPtr + valve_poses_pub_; + rclcpp::Publisher::SharedPtr + annulus_pcl_pub_; + rclcpp::Publisher::SharedPtr + annulus_plane_pub_; + rclcpp::Publisher::SharedPtr processed_image_pub_; + rclcpp::Publisher::SharedPtr angle_image_pub_; + + // Depth image sync policies uses ExactTime with the assumption + // that the depth image and color image are from the same camera + // and are synchronized. + + // Policy for Depth Image + Color Image + Detections + typedef message_filters::sync_policies::ExactTime< + sensor_msgs::msg::Image, + sensor_msgs::msg::Image, + vision_msgs::msg::Detection2DArray> + SyncPolicy_DI_CI_D; + + // Policy for Depth Image + Detections (no color image) + typedef message_filters::sync_policies:: + ExactTime + SyncPolicy_DI_D; + + // Policy for Point Cloud + Color Image + Detections + typedef message_filters::sync_policies::ApproximateTime< + sensor_msgs::msg::PointCloud2, + sensor_msgs::msg::Image, + vision_msgs::msg::Detection2DArray> + SyncPolicy_PC_CI_D; + + // Policy for Point Cloud + Detections (no color image) + typedef message_filters::sync_policies::ApproximateTime< + sensor_msgs::msg::PointCloud2, + vision_msgs::msg::Detection2DArray> + SyncPolicy_PC_D; + + std::shared_ptr> + sync_di_ci_d_; + std::shared_ptr> sync_di_d_; + std::shared_ptr> + sync_pc_ci_d_; + std::shared_ptr> sync_pc_d_; + + bool color_image_info_received_ = false; + std::unique_ptr valve_detector_; + std::unique_ptr angle_detector_; + std::string color_image_frame_id_; + std::shared_ptr tf_buffer_; + std::shared_ptr tf_listener_; + + bool visualize_detections_ = true; + bool debug_visualize_ = false; + bool calculate_angle_ = true; + bool use_color_image_ = true; + bool use_depth_image_ = true; +}; + +} // namespace valve_detection + +#endif // VALVE_POSE_DEPTH_ROS_HPP diff --git a/launch/valve_detection.launch.py b/launch/valve_detection.launch.py index 1bacfd0..28ee6b0 100644 --- a/launch/valve_detection.launch.py +++ b/launch/valve_detection.launch.py @@ -1,23 +1,33 @@ import os from ament_index_python.packages import get_package_share_directory -from launch_ros.actions import Node +from launch_ros.actions import ComposableNodeContainer +from launch_ros.descriptions import ComposableNode from launch import LaunchDescription +config_file = os.path.join( + get_package_share_directory("valve_detection"), + "config", + "valve_detection_params.yaml", +) + def generate_launch_description(): - valve_detection_node = Node( - package='valve_detection', - executable='valve_detection_node', - name='valve_detection_node', - parameters=[ - os.path.join( - get_package_share_directory('valve_detection'), - 'config', - 'valve_detection_params.yaml', - ) + container = ComposableNodeContainer( + name='valve_detection_container', + namespace='', + package='rclcpp_components', + executable='component_container_mt', + composable_node_descriptions=[ + ComposableNode( + package='valve_detection', + plugin='valve_detection::ValvePoseNode', + name='valve_pose_node', + parameters=[config_file], + ), ], output='screen', ) - return LaunchDescription([valve_detection_node]) + + return LaunchDescription([container]) diff --git a/package.xml b/package.xml index 19b40b4..7c4730e 100644 --- a/package.xml +++ b/package.xml @@ -17,6 +17,7 @@ geometry_msgs std_msgs pcl_conversions + tf2_geometry_msgs ament_lint_auto ament_lint_common diff --git a/src/angle_detector.cpp b/src/angle_detector.cpp new file mode 100644 index 0000000..33bf776 --- /dev/null +++ b/src/angle_detector.cpp @@ -0,0 +1,199 @@ +#include "valve_detection/angle_detector.hpp" +#include +#include "valve_detection/types.hpp" + +namespace valve_detection { + +void AngleDetector::compute_angles(const cv::Mat& color_image, + std::vector& boxes) const { + for (auto& box : boxes) { + double width = box.size_x; + double height = box.size_y; + double center_x = box.center_x; + double center_y = box.center_y; + + int x1 = std::max( + static_cast(center_x - width * params_.line_detection_area), + 0); + int y1 = std::max( + static_cast(center_y - height * params_.line_detection_area), + 0); + int x2 = std::min( + static_cast(center_x + width * params_.line_detection_area), + color_image.cols - 1); + int y2 = std::min( + static_cast(center_y + height * params_.line_detection_area), + color_image.rows - 1); + + if (x1 >= x2 || y1 >= y2) { + box.theta = std::numeric_limits::quiet_NaN(); + continue; + } + + cv::Mat roi = color_image(cv::Rect(x1, y1, x2 - x1, y2 - y1)); + + cv::Mat mask = cv::Mat::zeros(roi.size(), CV_8UC1); + cv::Point center((roi.cols / 2), (roi.rows / 2)); + cv::Size axes(roi.cols / 2, roi.rows / 2); + cv::ellipse(mask, center, axes, 0, 0, 360, cv::Scalar(255), -1); + + cv::Mat masked_roi; + roi.copyTo(masked_roi, mask); + + cv::Mat gray; + cv::cvtColor(masked_roi, gray, cv::COLOR_BGR2GRAY); + cv::Mat edges = apply_canny_edge_detection(gray); + + std::vector lines; + cv::HoughLinesP(edges, lines, params_.hough_rho_res, + params_.hough_theta_res, params_.hough_threshold, + params_.hough_min_line_length, + params_.hough_max_line_gap); + + if (lines.empty()) { + box.theta = std::numeric_limits::quiet_NaN(); + continue; + } + + cv::Vec4i longest_line = find_longest_line(lines); + + if (longest_line[0] >= longest_line[2]) { + int temp_x = longest_line[0]; + int temp_y = longest_line[1]; + longest_line[0] = longest_line[2]; + longest_line[1] = longest_line[3]; + longest_line[2] = temp_x; + longest_line[3] = temp_y; + } + + float theta = std::atan2(longest_line[3] - longest_line[1], + longest_line[2] - longest_line[0]); + + if (theta < 0) { + theta += CV_PI; + } + if (theta > CV_PI) { + theta -= CV_PI; + } + box.theta = theta; + } +} + +void AngleDetector::compute_angles_debug( + cv::Mat& image_to_draw_on, + std::vector& boxes) const { + const cv::Scalar ALL_LINES_COLOR(255, 0, 0); + const cv::Scalar LONGEST_LINE_COLOR(0, 255, 0); + const int LINE_THICKNESS = 2; + + cv::Mat output_image = cv::Mat::zeros(image_to_draw_on.size(), CV_8UC3); + + for (auto& box : boxes) { + double width = box.size_x; + double height = box.size_y; + double center_x = box.center_x; + double center_y = box.center_y; + + int x1 = std::max( + static_cast(center_x - width * params_.line_detection_area), + 0); + int y1 = std::max( + static_cast(center_y - height * params_.line_detection_area), + 0); + int x2 = std::min( + static_cast(center_x + width * params_.line_detection_area), + image_to_draw_on.cols - 1); + int y2 = std::min( + static_cast(center_y + height * params_.line_detection_area), + image_to_draw_on.rows - 1); + + if (x1 >= x2 || y1 >= y2) { + box.theta = std::numeric_limits::quiet_NaN(); + continue; + } + + cv::Mat roi = image_to_draw_on(cv::Rect(x1, y1, x2 - x1, y2 - y1)); + + cv::Mat mask = cv::Mat::zeros(roi.size(), CV_8UC1); + cv::Point center((roi.cols / 2), (roi.rows / 2)); + cv::Size axes(roi.cols / 2, roi.rows / 2); + cv::ellipse(mask, center, axes, 0, 0, 360, cv::Scalar(255), -1); + + cv::Mat masked_roi; + roi.copyTo(masked_roi, mask); + + cv::Mat gray; + cv::cvtColor(masked_roi, gray, cv::COLOR_BGR2GRAY); + cv::Mat edges = apply_canny_edge_detection(gray); + + std::vector lines; + cv::HoughLinesP(edges, lines, params_.hough_rho_res, + params_.hough_theta_res, params_.hough_threshold, + params_.hough_min_line_length, + params_.hough_max_line_gap); + + if (lines.empty()) { + box.theta = std::numeric_limits::quiet_NaN(); + + cv::Mat output_roi_gray = + output_image(cv::Rect(x1, y1, x2 - x1, y2 - y1)); + cv::Mat edges_color; + cv::cvtColor(edges, edges_color, cv::COLOR_GRAY2BGR); + edges_color.copyTo(output_roi_gray, mask); + + continue; + } + + cv::Mat output_roi = output_image(cv::Rect(x1, y1, x2 - x1, y2 - y1)); + cv::Mat edges_color; + cv::cvtColor(edges, edges_color, cv::COLOR_GRAY2BGR); + edges_color.copyTo(output_roi, mask); + + const cv::Point offset(x1, y1); + for (const auto& line : lines) { + cv::line(output_image, cv::Point(line[0], line[1]) + offset, + cv::Point(line[2], line[3]) + offset, ALL_LINES_COLOR, + LINE_THICKNESS); + } + + cv::Vec4i longest_line = find_longest_line(lines); + cv::line(output_image, + cv::Point(longest_line[0], longest_line[1]) + offset, + cv::Point(longest_line[2], longest_line[3]) + offset, + LONGEST_LINE_COLOR, LINE_THICKNESS); + + box.theta = std::atan2(longest_line[3] - longest_line[1], + longest_line[2] - longest_line[0]); + } + + image_to_draw_on = output_image; +} + +cv::Vec4i AngleDetector::find_longest_line( + const std::vector& lines) const { + if (lines.empty()) { + return cv::Vec4i(0, 0, 0, 0); + } + + cv::Vec4i longest_line = lines[0]; + double max_length = 0; + + for (const auto& line : lines) { + double length = std::hypot(line[2] - line[0], line[3] - line[1]); + if (length > max_length) { + max_length = length; + longest_line = line; + } + } + return longest_line; +} + +cv::Mat AngleDetector::apply_canny_edge_detection( + const cv::Mat& gray_image) const { + cv::Mat edges; + cv::Canny(gray_image, edges, params_.canny_low_threshold, + params_.canny_high_threshold, params_.canny_aperture_size); + return edges; +} + +} // namespace valve_detection diff --git a/src/depth_image_processing.cpp b/src/depth_image_processing.cpp new file mode 100644 index 0000000..c76fee4 --- /dev/null +++ b/src/depth_image_processing.cpp @@ -0,0 +1,88 @@ +#include "valve_detection/depth_image_processing.hpp" + +namespace valve_detection { + +void project_pixel_to_point(int u, + int v, + float depth, + double fx, + double fy, + double cx, + double cy, + pcl::PointXYZ& point) { + if (depth <= 0 || depth == std::numeric_limits::infinity() || + std::isnan(depth)) { + point.x = point.y = point.z = std::numeric_limits::quiet_NaN(); + return; + } + + point.x = (u - cx) * depth / fx; + point.y = (v - cy) * depth / fy; + point.z = depth; +} + +void extract_annulus_pcl(const cv::Mat& depth_image, + const BoundingBox& bbox, + const ImageProperties& image_properties, + float annulus_radius_ratio, + pcl::PointCloud::Ptr& cloud) { + float center_x = bbox.center_x; + float center_y = bbox.center_y; + float outer_radius_x = bbox.size_x / 2.0f; + float outer_radius_y = bbox.size_y / 2.0f; + + float inner_radius_x = outer_radius_x * annulus_radius_ratio; + float inner_radius_y = outer_radius_y * annulus_radius_ratio; + + int upper_bound_size = static_cast( + M_PI * + ((outer_radius_x * outer_radius_y - inner_radius_x * inner_radius_y) + + 1)); + + cloud->clear(); + cloud->reserve(upper_bound_size); + + for (int y = static_cast(-outer_radius_y); + y <= static_cast(outer_radius_y); ++y) { + for (int x = static_cast(-outer_radius_x); + x <= static_cast(outer_radius_x); ++x) { + float dx_scaled_outer = x / outer_radius_x; + float dy_scaled_outer = y / outer_radius_y; + bool is_inside_outer_ellipse = + (dx_scaled_outer * dx_scaled_outer + + dy_scaled_outer * dy_scaled_outer) <= 1.0f; + + float dx_scaled_inner = x / inner_radius_x; + float dy_scaled_inner = y / inner_radius_y; + bool is_outside_inner_ellipse = + (dx_scaled_inner * dx_scaled_inner + + dy_scaled_inner * dy_scaled_inner) > 1.0f; + + if (is_inside_outer_ellipse && is_outside_inner_ellipse) { + int u = static_cast(center_x + x); + int v = static_cast(center_y + y); + + if (u >= 0 && u < depth_image.cols && v >= 0 && + v < depth_image.rows) { + float depth_value = depth_image.at(v, u); + pcl::PointXYZ point; + project_pixel_to_point( + u, v, depth_value, image_properties.intr.fx, + image_properties.intr.fy, image_properties.intr.cx, + image_properties.intr.cy, point); + + if (!std::isnan(point.x) && !std::isnan(point.y) && + !std::isnan(point.z)) { + cloud->points.push_back(point); + } + } + } + } + } + + cloud->width = static_cast(cloud->points.size()); + cloud->height = 1; + cloud->is_dense = false; +} + +} // namespace valve_detection diff --git a/src/pointcloud_processing.cpp b/src/pointcloud_processing.cpp new file mode 100644 index 0000000..707aa69 --- /dev/null +++ b/src/pointcloud_processing.cpp @@ -0,0 +1,59 @@ +#include "valve_detection/pointcloud_processing.hpp" + +namespace valve_detection { + +void extract_annulus_pcl(const pcl::PointCloud::Ptr& input_cloud, + const BoundingBox& bbox, + const ImageProperties& image_properties, + float annulus_radius_ratio, + pcl::PointCloud::Ptr& cloud) { + float center_x = bbox.center_x; + float center_y = bbox.center_y; + float outer_radius_x = bbox.size_x / 2.0f; + float outer_radius_y = bbox.size_y / 2.0f; + + float inner_radius_x = outer_radius_x * annulus_radius_ratio; + float inner_radius_y = outer_radius_y * annulus_radius_ratio; + + double fx = image_properties.intr.fx; + double fy = image_properties.intr.fy; + double cx = image_properties.intr.cx; + double cy = image_properties.intr.cy; + + cloud->clear(); + cloud->points.reserve(input_cloud->points.size()); + + for (const auto& point_in : input_cloud->points) { + if (std::isnan(point_in.z)) { + continue; + } + + int u = static_cast((point_in.x * fx / point_in.z) + cx); + int v = static_cast((point_in.y * fy / point_in.z) + cy); + + if (u >= center_x - outer_radius_x && u <= center_x + outer_radius_x && + v >= center_y - outer_radius_y && v <= center_y + outer_radius_y) { + float dx_scaled_outer = (u - center_x) / outer_radius_x; + float dy_scaled_outer = (v - center_y) / outer_radius_y; + bool is_inside_outer_ellipse = + (dx_scaled_outer * dx_scaled_outer + + dy_scaled_outer * dy_scaled_outer) <= 1.0f; + + float dx_scaled_inner = (u - center_x) / inner_radius_x; + float dy_scaled_inner = (v - center_y) / inner_radius_y; + bool is_outside_inner_ellipse = + (dx_scaled_inner * dx_scaled_inner + + dy_scaled_inner * dy_scaled_inner) > 1.0f; + + if (is_inside_outer_ellipse && is_outside_inner_ellipse) { + cloud->points.push_back(point_in); + } + } + } + + cloud->width = static_cast(cloud->points.size()); + cloud->height = 1; + cloud->is_dense = false; +} + +} // namespace valve_detection diff --git a/src/valve_detection.cpp b/src/valve_detection.cpp deleted file mode 100644 index 312bed9..0000000 --- a/src/valve_detection.cpp +++ /dev/null @@ -1,760 +0,0 @@ -#include - -using std::placeholders::_1; -using std::placeholders::_2; -using std::placeholders::_3; - -ValveDetectionNode::ValveDetectionNode(const rclcpp::NodeOptions& options) - : Node("valve_detection_node", options) { - auto depth_image_sub_topic = - this->declare_parameter("depth_image_sub_topic"); - auto color_image_sub_topic = - this->declare_parameter("color_image_sub_topic"); - auto detections_sub_topic = - this->declare_parameter("detections_sub_topic"); - - rclcpp::QoS qos = rclcpp::QoS(rclcpp::KeepLast(10)) - .reliability(RMW_QOS_POLICY_RELIABILITY_BEST_EFFORT); - depth_image_sub_.subscribe(this, depth_image_sub_topic, - qos.get_rmw_qos_profile()); - color_image_sub_.subscribe(this, color_image_sub_topic, - qos.get_rmw_qos_profile()); - detections_sub_.subscribe(this, detections_sub_topic, - qos.get_rmw_qos_profile()); - - sync_ = std::make_shared>( - MySyncPolicy(10), depth_image_sub_, color_image_sub_, detections_sub_); - sync_->registerCallback(std::bind( - &ValveDetectionNode::synchronized_callback, this, _1, _2, _3)); - - // Create subscriptions with synchronized callbacks - camera_info_subscription_yolo_color_ = - this->create_subscription( - "/yolov8_encoder/resize/camera_info", 10, - std::bind(&ValveDetectionNode::camera_info_callback_yolo_colored, - this, std::placeholders::_1)); - - // Create subscriptions with synchronized callbacks - camera_info_subscription_ = - this->create_subscription( - "/zed_node/depth/camera_info", 10, - std::bind(&ValveDetectionNode::camera_info_callback, this, - std::placeholders::_1)); - - processed_image_pub_ = this->create_publisher( - "yolov8_valve_detection_image", 10); - plane_normal_pub_ = - this->create_publisher("plane_normal", 10); - pointcloud_pub_ = this->create_publisher( - "point_cloud", 10); - line_pose_pub_ = this->create_publisher( - "line_pose", 10); - line_points_pub_ = this->create_publisher( - "line_points", 10); - near_plane_cloud_pub_ = - this->create_publisher( - "near_plane_cloud", 10); -} - -Eigen::Vector3f ValveDetectionNode::filter_direction_ = - Eigen::Vector3f(1, 0, 0); - -void ValveDetectionNode::camera_info_callback( - const sensor_msgs::msg::CameraInfo::SharedPtr camera_info_msg) { - if (!camera_info_received_) { - camera_info_ = camera_info_msg; - camera_info_received_ = true; - RCLCPP_INFO(this->get_logger(), - "Camera resize info received and stored."); - - compute_height_width_scalars(); - // unsubscribe to avoid further callback executions - camera_info_subscription_.reset(); - } -} - -void ValveDetectionNode::camera_info_callback_yolo_colored( - const sensor_msgs::msg::CameraInfo::SharedPtr camera_info_msg) { - if (!camera_info_received_yolo_color_) { - camera_info_yolo_color_ = camera_info_msg; - camera_info_received_yolo_color_ = true; - RCLCPP_INFO(this->get_logger(), - "Camera yolo color info received and stored."); - - compute_height_width_scalars(); - // unsubscribe to avoid further callback executions - camera_info_subscription_yolo_color_.reset(); - } -} - -void ValveDetectionNode::compute_height_width_scalars() { - if (camera_info_received_ && camera_info_received_yolo_color_) { - height_scalar_ = - static_cast(camera_info_yolo_color_->height - 280) / - camera_info_->height; - width_scalar_ = static_cast(camera_info_yolo_color_->width) / - camera_info_->width; - - RCLCPP_INFO(this->get_logger(), "Height scalar: %.3f", height_scalar_); - RCLCPP_INFO(this->get_logger(), "Width scalar: %.3f", width_scalar_); - } -} - -void ValveDetectionNode::project_pixel_to_3d(int u, - int v, - float depth, - pcl::PointXYZ& point) { - if (!camera_info_ || depth <= 0) { - return; // Skip invalid depth or missing camera info - } - - // Extract intrinsic parameters - double fx = camera_info_->k[0]; // k[0] = fx - double fy = camera_info_->k[4]; // k[4] = fy - double cx = camera_info_->k[2]; // k[2] = cx - double cy = camera_info_->k[5]; // k[5] = cy - - // Calculate 3D point coordinates - // transform from optical to camera frame - point.x = depth; - point.y = -(u - cx) * depth / fx; - point.z = -(v - cy) * depth / fy; -} - -void ValveDetectionNode::project_3d_to_pixel(float x, - float y, - float z, - int& u, - int& v) { - if (!camera_info_ || z <= 0) { - return; // Ensure camera info is available and z is positive - } - - // Extract intrinsic parameters - double fx = camera_info_->k[0]; // Focal length in x direction - double fy = camera_info_->k[4]; // Focal length in y direction - double cx = camera_info_->k[2]; // Principal point in x direction - double cy = camera_info_->k[5]; // Principal point in y direction - - // Project the 3D point to 2D pixel coordinates - // transform from camera to optical frame - u = static_cast((-y * fx) / x + cx); - v = static_cast((-z * fy) / x + cy); -} - -void ValveDetectionNode::synchronized_callback( - const sensor_msgs::msg::Image::ConstSharedPtr& depth_image, - const sensor_msgs::msg::Image::ConstSharedPtr& color_image, - const vision_msgs::msg::Detection2DArray::ConstSharedPtr& detections) { - RCLCPP_INFO(this->get_logger(), - "Synchronized image and detection messages received."); - - // process_and_publish_image(depth_image, color_image, detections); - process_and_publish_image(depth_image, detections); -} - -// void ValveDetectionNode::process_and_publish_image( -// const sensor_msgs::msg::Image::ConstSharedPtr & depth_image, -// const sensor_msgs::msg::Image::ConstSharedPtr & color_image, -// const vision_msgs::msg::Detection2DArray::ConstSharedPtr & detections) -// { -// if (!camera_info_received_ || !camera_info_received_yolo_color_) -// { -// return; -// } - -// try -// { -// // Convert depth and color images -// cv::Mat cv_depth_image = cv_bridge::toCvCopy(depth_image, -// "32FC1")->image; cv::Mat cv_color_image = -// cv_bridge::toCvCopy(color_image, "bgr8")->image; - -// if (!detections->detections.empty()) -// { -// const auto *first_detection = &detections->detections[0]; - -// // The most centred of the two most likely variables are chosen -// as a boundingbox. -// // Calculate distance to center (cx, cy) -// double cx = camera_info_->k[2]; // k[2] = cx -// double cy = camera_info_->k[5]; // k[5] = cy -// if (detections->detections.size() > 1) -// { -// float dist_first = -// std::sqrt(std::pow(detections->detections[0].bbox.center.position.x -// - cx, 2) + -// std::pow(detections->detections[0].bbox.center.position.y -// - cy, 2)); - -// float dist_second = -// std::sqrt(std::pow(detections->detections[1].bbox.center.position.x -// - cx, 2) + -// std::pow(detections->detections[1].bbox.center.position.y -// - cy, 2)); - -// // Select the detection that is closer to the center. -// if (dist_first > dist_second) { -// first_detection = &detections->detections[1]; -// } -// } - -// int center_x = -// static_cast(first_detection->bbox.center.position.x / -// this->width_scalar_); int center_y = -// static_cast((first_detection->bbox.center.position.y - 140) -// / this->height_scalar_); int width = -// static_cast(first_detection->bbox.size_x / -// this->width_scalar_); int height = -// static_cast(first_detection->bbox.size_y / -// this->height_scalar_); - -// pcl::PointCloud::Ptr cloud(new -// pcl::PointCloud); bool points_added = false; - -// for (int y = center_y - (height / 2); y <= center_y + (height / -// 2); ++y) -// { -// for (int x = center_x - (width / 2); x <= center_x + (width / -// 2); ++x) -// { -// int dx = x - center_x; -// int dy = y - center_y; -// if (dx * dx / (float)((width / 2) * (width / 2)) + dy * -// dy / (float)((height / 2) * (height / 2)) <= 1.0) -// { -// if (y >= 0 && y < cv_depth_image.rows && x >= 0 && x -// < cv_depth_image.cols) -// { -// float depth_value = cv_depth_image.at(y, -// x); pcl::PointXYZ point; project_pixel_to_3d(x, -// y, depth_value, point); -// // if (y == center_y && x == center_x) -// // { -// // RCLCPP_INFO(this->get_logger(), "center 2d -// point: (%d, %d, %f)", x, y, depth_value); -// // RCLCPP_INFO(this->get_logger(), "Center 3d -// point: (%.3f, %.3f, %.3f)", point.x, point.y, -// point.z); -// // } -// if (depth_value > 0) // Check for valid depth -// { -// cloud->points.push_back(point); -// points_added = true; -// } -// } -// } -// } -// } - -// if (points_added && cloud->points.size() > 0) -// { - -// sensor_msgs::msg::PointCloud2 cloud_msg; -// pcl::toROSMsg(*cloud, cloud_msg); -// cloud_msg.header.stamp = depth_image->header.stamp; -// cloud_msg.header.frame_id = "zed_left_camera_frame"; - -// pointcloud_pub_->publish(cloud_msg); - -// pcl::SACSegmentation seg; -// seg.setOptimizeCoefficients(true); -// seg.setModelType(pcl::SACMODEL_PLANE); -// seg.setMethodType(pcl::SAC_RANSAC); -// seg.setDistanceThreshold(0.01); - -// pcl::ModelCoefficients::Ptr coefficients(new -// pcl::ModelCoefficients); pcl::PointIndices::Ptr inliers(new -// pcl::PointIndices); seg.setInputCloud(cloud); -// seg.segment(*inliers, *coefficients); - -// if (inliers->indices.size() > 0) -// { -// geometry_msgs::msg::Vector3 normal_msg; -// normal_msg.x = coefficients->values[0]; -// normal_msg.y = coefficients->values[1]; -// normal_msg.z = coefficients->values[2]; -// plane_normal_pub_->publish(normal_msg); - -// RCLCPP_INFO(this->get_logger(), "RANSAC successful: Plane -// normal (%.3f, %.3f, %.3f)", -// coefficients->values[0], -// coefficients->values[1], -// coefficients->values[2]); - -// pcl::PointCloud::Ptr near_plane_cloud(new -// pcl::PointCloud); - -// // Select points within a distance threshold of the plane -// model pcl::ExtractIndices extract; -// extract.setInputCloud(cloud); -// extract.setIndices(inliers); -// extract.setNegative(false); // Select the inliers -// extract.filter(*near_plane_cloud); - -// // Now we can use `selectWithinDistance` to select -// inliers that are within the distance threshold -// pcl::PointIndices::Ptr selected_inliers(new -// pcl::PointIndices()); float threshold_distance = 0.02; // -// Example threshold (3 cm) - -// // Logic for selecting points close to the -// threshold_distance of the plane. for (size_t i = 0; i < -// cloud->points.size(); ++i) { -// const auto& point = cloud->points[i]; -// float distance = std::abs(coefficients->values[0] * -// point.x + -// coefficients->values[1] * -// point.y + -// coefficients->values[2] * -// point.z + -// coefficients->values[3]) / -// std::sqrt(coefficients->values[0] * -// coefficients->values[0] + -// coefficients->values[1] * -// coefficients->values[1] + -// coefficients->values[2] * -// coefficients->values[2]); -// if (distance < threshold_distance) { -// selected_inliers->indices.push_back(i); -// } -// } -// RCLCPP_INFO(this->get_logger(), "Selected inlier size: -// %zu", selected_inliers->indices.size()); - -// // Extract the selected inliers -// pcl::PointCloud::Ptr -// final_inliers_cloud(new pcl::PointCloud); - -// for (const auto& idx : selected_inliers->indices) -// { -// // Convert 3D point (x, y) to image space (u, v) -// int u, v; -// const auto& point = cloud->points[idx]; -// project_3d_to_pixel(point.x, point.y, point.z, u, -// v); - -// // Check if the point (u, v) is inside the -// smaller ellipse int dx = u - center_x; int dy = v -// - center_y; - -// // Check if point is inside the smaller ellipse -// in image space float a = -// static_cast(width) / 6.0f; // Semi-major -// axis float b = static_cast(height) / 6.0f; -// // Semi-minor axis - -// if ((dx * dx) / (a * a) + (dy * dy) / (b * b) -// <= 1.0f) { -// // Point is inside the ellipse, add it to the -// near_plane_cloud -// final_inliers_cloud->points.push_back(cloud->points[idx]); -// } - -// } - -// near_plane_cloud = final_inliers_cloud; - -// // Calculate centroid position from near_plane_cloud -// Eigen::Vector3f centroid(0, 0, 0); -// if (!near_plane_cloud->empty()) { -// for (const auto& point : near_plane_cloud->points) { -// centroid += Eigen::Vector3f(point.x, point.y, -// point.z); -// } -// centroid /= near_plane_cloud->points.size(); -// } else { -// RCLCPP_WARN(this->get_logger(), "No valid near plane -// points found."); return; -// } - -// int x1 = std::max(center_x - width / 2, 0); -// int y1 = std::max(center_y - height / 2, 0); -// int x2 = std::min(center_x + width / 2, cv_color_image.cols - -// 1); int y2 = std::min(center_y + height / 2, -// cv_color_image.rows - 1); - -// // Extract ROI (region of interest) from the color image -// cv::Mat roi = cv_color_image(cv::Rect(x1, y1, x2 - x1, y2 - -// y1)); - -// // Convert to grayscale for edge detection -// cv::Mat gray; -// cv::cvtColor(roi, gray, cv::COLOR_BGR2GRAY); - -// // Apply Canny edge detection -// cv::Mat edges; -// cv::Canny(gray, edges, 50, 150, 3); - -// // Detect lines using Hough Transform -// std::vector lines; -// cv::HoughLinesP(edges, lines, 1, CV_PI / 180, 50, 50, 10); - -// if (!lines.empty()) -// { -// cv::Vec4i longest_line = lines[0]; -// double max_length = 0; - -// for (const auto &line : lines) -// { -// double length = std::hypot(line[2] - line[0], line[3] -// - line[1]); if (length > max_length) -// { -// max_length = length; -// longest_line = line; -// } -// } - -// double angle = std::atan2(longest_line[3] - -// longest_line[1], longest_line[2] - longest_line[0]); - -// // ANGLE TO QUATERNION WITH RESPECT TO PLANE NORMAL. - -// // Publish detected line -// geometry_msgs::msg::PoseStamped line_pose_msg; -// line_pose_msg.header.stamp = depth_image->header.stamp; -// line_pose_msg.header.frame_id = "zed_left_camera_frame"; -// line_pose_msg.pose.position.x = centroid.x(); -// line_pose_msg.pose.position.y = centroid.y(); -// line_pose_msg.pose.position.z = centroid.z(); - -// Eigen::Quaternionf quaternion; -// quaternion.setFromTwoVectors(Eigen::Vector3f(1, 0, 0), -// line_direction.normalized()); - -// if (std::isnan(quaternion.x()) || -// std::isnan(quaternion.y()) || -// std::isnan(quaternion.z()) || -// std::isnan(quaternion.w())) { -// RCLCPP_WARN(this->get_logger(), "Invalid quaternion -// computed, skipping pose publishing."); return; -// } - -// line_pose_msg.pose.orientation.x = quaternion.x(); -// line_pose_msg.pose.orientation.y = quaternion.y(); -// line_pose_msg.pose.orientation.z = quaternion.z(); -// line_pose_msg.pose.orientation.w = quaternion.w(); - -// line_pose_pub_->publish(line_pose_msg); - -// RCLCPP_INFO(this->get_logger(), "Detected line -// published."); -// } -// else -// { -// RCLCPP_WARN(this->get_logger(), "No lines detected in -// bounding box."); -// } -// } -// else -// { -// RCLCPP_WARN(this->get_logger(), "No plane inliers found."); -// } -// } -// } -// } -// catch (cv_bridge::Exception &e) -// { -// RCLCPP_ERROR(this->get_logger(), "CvBridge Error: %s", e.what()); -// } -// } - -void ValveDetectionNode::process_and_publish_image( - const sensor_msgs::msg::Image::ConstSharedPtr& image, - const vision_msgs::msg::Detection2DArray::ConstSharedPtr& detections) { - std::string target_frame = "zed_left_camera_frame"; - if (!camera_info_received_ || !camera_info_received_yolo_color_) { - return; - } - - try { - cv::Mat cv_image = cv_bridge::toCvCopy(image, "32FC1")->image; - - if (!detections->detections.empty()) { - const auto* first_detection = &detections->detections[0]; - // The most centred of the two most likely variables are chosen as a - // boundingbox. Calculate distance to center (cx, cy) - double cx = camera_info_->k[2]; // k[2] = cx - double cy = camera_info_->k[5]; // k[5] = cy - if (detections->detections.size() > 1) { - float dist_first = std::sqrt( - std::pow( - detections->detections[0].bbox.center.position.x - cx, - 2) + - std::pow( - detections->detections[0].bbox.center.position.y - cy, - 2)); - - float dist_second = std::sqrt( - std::pow( - detections->detections[1].bbox.center.position.x - cx, - 2) + - std::pow( - detections->detections[1].bbox.center.position.y - cy, - 2)); - - // Select the detection that is closer to the center. - if (dist_first > dist_second) { - first_detection = &detections->detections[1]; - } - } - - int center_x = static_cast( - first_detection->bbox.center.position.x / this->width_scalar_); - int center_y = static_cast( - (first_detection->bbox.center.position.y - 140) / - this->height_scalar_); - int width = static_cast(first_detection->bbox.size_x / - this->width_scalar_); - int height = static_cast(first_detection->bbox.size_y / - this->height_scalar_); - - pcl::PointCloud::Ptr cloud( - new pcl::PointCloud); - bool points_added = false; - - for (int y = center_y - (height / 2); y <= center_y + (height / 2); - ++y) { - for (int x = center_x - (width / 2); - x <= center_x + (width / 2); ++x) { - int dx = x - center_x; - int dy = y - center_y; - if (dx * dx / (float)((width / 2) * (width / 2)) + - dy * dy / (float)((height / 2) * (height / 2)) <= - 1.0) { - if (y >= 0 && y < cv_image.rows && x >= 0 && - x < cv_image.cols) { - float depth_value = cv_image.at(y, x); - pcl::PointXYZ point; - project_pixel_to_3d(x, y, depth_value, point); - // if (y == center_y && x == center_x) - // { - // RCLCPP_INFO(this->get_logger(), "center 2d - // point: (%d, %d, %f)", x, y, depth_value); - // RCLCPP_INFO(this->get_logger(), "Center 3d - // point: (%.3f, %.3f, %.3f)", point.x, point.y, - // point.z); - // } - if (depth_value > 0) // Check for valid depth - { - cloud->points.push_back(point); - points_added = true; - } - } - } - } - } - - if (points_added && cloud->points.size() > 0) { - sensor_msgs::msg::PointCloud2 cloud_msg; - pcl::toROSMsg(*cloud, cloud_msg); - cloud_msg.header.stamp = image->header.stamp; - cloud_msg.header.frame_id = target_frame; - - pointcloud_pub_->publish(cloud_msg); - - pcl::SACSegmentation seg; - seg.setOptimizeCoefficients(true); - seg.setModelType(pcl::SACMODEL_PLANE); - seg.setMethodType(pcl::SAC_RANSAC); - seg.setDistanceThreshold(0.01); - - pcl::ModelCoefficients::Ptr coefficients( - new pcl::ModelCoefficients); - pcl::PointIndices::Ptr inliers(new pcl::PointIndices); - seg.setInputCloud(cloud); - seg.segment(*inliers, *coefficients); - - if (inliers->indices.size() > 0) { - geometry_msgs::msg::Vector3 normal_msg; - normal_msg.x = coefficients->values[0]; - normal_msg.y = coefficients->values[1]; - normal_msg.z = coefficients->values[2]; - plane_normal_pub_->publish(normal_msg); - - RCLCPP_INFO( - this->get_logger(), - "RANSAC successful: Plane normal (%.3f, %.3f, %.3f)", - coefficients->values[0], coefficients->values[1], - coefficients->values[2]); - - pcl::PointCloud::Ptr near_plane_cloud( - new pcl::PointCloud); - - // Select points within a distance threshold of the plane - // model - pcl::ExtractIndices extract; - extract.setInputCloud(cloud); - extract.setIndices(inliers); - extract.setNegative(false); // Select the inliers - extract.filter(*near_plane_cloud); - - // Now we can use `selectWithinDistance` to select inliers - // that are within the distance threshold - pcl::PointIndices::Ptr selected_inliers( - new pcl::PointIndices()); - float threshold_distance = - 0.02; // Example threshold (3 cm) - - // Logic for selecting points close to the - // threshold_distance of the plane. - for (size_t i = 0; i < cloud->points.size(); ++i) { - const auto& point = cloud->points[i]; - float distance = - std::abs(coefficients->values[0] * point.x + - coefficients->values[1] * point.y + - coefficients->values[2] * point.z + - coefficients->values[3]) / - std::sqrt(coefficients->values[0] * - coefficients->values[0] + - coefficients->values[1] * - coefficients->values[1] + - coefficients->values[2] * - coefficients->values[2]); - if (distance < threshold_distance) { - selected_inliers->indices.push_back(i); - } - } - RCLCPP_INFO(this->get_logger(), "Selected inlier size: %zu", - selected_inliers->indices.size()); - - // Extract the selected inliers - pcl::PointCloud::Ptr final_inliers_cloud( - new pcl::PointCloud); - pcl::PointCloud::Ptr near_plane_cloud_copy( - new pcl::PointCloud); - - for (const auto& idx : selected_inliers->indices) { - // Convert 3D point (x, y) to image space (u, v) - int u, v; - const auto& point = cloud->points[idx]; - project_3d_to_pixel(point.x, point.y, point.z, u, v); - - // Check if the point (u, v) is inside the smaller - // ellipse - int dx = u - center_x; - int dy = v - center_y; - - // Check if point is inside the smaller ellipse in image - // space - float a = static_cast(width) / - 6.0f; // Semi-major axis - float b = static_cast(height) / - 6.0f; // Semi-minor axis - - if ((dx * dx) / (a * a) + (dy * dy) / (b * b) <= 1.0f) { - // Point is inside the ellipse, add it to the - // near_plane_cloud - final_inliers_cloud->points.push_back( - cloud->points[idx]); - } - } - - near_plane_cloud_copy = near_plane_cloud; - near_plane_cloud = final_inliers_cloud; - - RCLCPP_INFO(this->get_logger(), - "near_plane_points size: %zu", - near_plane_cloud->points.size()); - - if (near_plane_cloud->points.size() > 0) { - sensor_msgs::msg::PointCloud2 near_plane_cloud_msg; - pcl::toROSMsg(*near_plane_cloud, near_plane_cloud_msg); - near_plane_cloud_msg.header.stamp = image->header.stamp; - near_plane_cloud_msg.header.frame_id = target_frame; - near_plane_cloud_pub_->publish( - near_plane_cloud_msg); // Publish near plane cloud - - Eigen::Vector4f centroid; - pcl::compute3DCentroid(*near_plane_cloud, centroid); - - Eigen::Matrix3f covariance; - pcl::computeCovarianceMatrixNormalized( - *near_plane_cloud, centroid, covariance); - - Eigen::SelfAdjointEigenSolver - eigen_solver(covariance, - Eigen::ComputeEigenvectors); - - Eigen::Vector3f line_direction = - eigen_solver.eigenvectors().col(2); - - // Filter the line direction to maintain consistency - if (filter_direction_.dot(line_direction) < 0) { - line_direction = - -line_direction; // Flip to maintain - // consistency - } - - filter_direction_ = line_direction; - - geometry_msgs::msg::PoseStamped line_pose_msg; - line_pose_msg.header.stamp = image->header.stamp; - line_pose_msg.header.frame_id = target_frame; - - line_pose_msg.pose.position.x = centroid[0]; - line_pose_msg.pose.position.y = centroid[1]; - line_pose_msg.pose.position.z = centroid[2]; - - Eigen::Quaternionf quaternion; - quaternion.setFromTwoVectors(Eigen::Vector3f(1, 0, 0), - line_direction); - - line_pose_msg.pose.orientation.x = quaternion.x(); - line_pose_msg.pose.orientation.y = quaternion.y(); - line_pose_msg.pose.orientation.z = quaternion.z(); - line_pose_msg.pose.orientation.w = quaternion.w(); - - line_pose_pub_->publish(line_pose_msg); - RCLCPP_INFO(this->get_logger(), - "Line fitted and published."); - - // Publish line points as a PointCloud2 message - pcl::PointCloud::Ptr line_cloud( - new pcl::PointCloud); - const int num_points = - 100; // Number of points along the line - - for (int i = -num_points / 2; i <= num_points / 2; - ++i) { - pcl::PointXYZ line_point; - float scale = - i * 0.01; // Adjust the scale for length - - line_point.x = - centroid[0] + scale * line_direction[0]; - line_point.y = - centroid[1] + scale * line_direction[1]; - line_point.z = - centroid[2] + scale * line_direction[2]; - line_cloud->points.push_back(line_point); - } - - // Convert to ROS message and publish - sensor_msgs::msg::PointCloud2 line_cloud_msg; - pcl::toROSMsg(*line_cloud, line_cloud_msg); - line_cloud_msg.header.stamp = image->header.stamp; - line_cloud_msg.header.frame_id = target_frame; - line_points_pub_->publish(line_cloud_msg); - - // RCLCPP_INFO(this->get_logger(), "Line points - // published as PointCloud2."); - } - } else { - RCLCPP_WARN(this->get_logger(), "No plane inliers found."); - } - } else { - RCLCPP_WARN(this->get_logger(), - "No valid points found for RANSAC."); - } - } - - auto processed_msg = - cv_bridge::CvImage(image->header, "32FC1", cv_image).toImageMsg(); - processed_image_pub_->publish(*processed_msg); - } catch (cv_bridge::Exception& e) { - RCLCPP_ERROR(this->get_logger(), "CvBridge Error: %s", e.what()); - } -} - -RCLCPP_COMPONENTS_REGISTER_NODE(ValveDetectionNode) diff --git a/src/valve_detector.cpp b/src/valve_detector.cpp new file mode 100644 index 0000000..3ad858a --- /dev/null +++ b/src/valve_detector.cpp @@ -0,0 +1,290 @@ +#include "valve_detection/valve_detector.hpp" + +namespace valve_detection { + +ValveDetector::ValveDetector(int yolo_img_width_, + int yolo_img_height_, + float annulus_radius_ratio, + float plane_ransac_threshold, + int plane_ransac_max_iterations, + float valve_handle_offset) + : yolo_img_width_(yolo_img_width_), + yolo_img_height_(yolo_img_height_), + annulus_radius_ratio_(annulus_radius_ratio), + plane_ransac_threshold_(plane_ransac_threshold), + plane_ransac_max_iterations_(plane_ransac_max_iterations), + valve_handle_offset_(valve_handle_offset) {} + +cv::Mat ValveDetector::draw_detections(const cv::Mat& image, + const std::vector& boxes, + const std::vector& poses) const { + cv::Mat visualized_image = image.clone(); + + cv::Mat camera_matrix = + (cv::Mat_(3, 3) << color_image_properties_.intr.fx, 0, + color_image_properties_.intr.cx, 0, color_image_properties_.intr.fy, + color_image_properties_.intr.cy, 0, 0, 1); + cv::Mat dist_coeffs = cv::Mat::zeros(4, 1, CV_64F); + + for (size_t i = 0; i < boxes.size(); ++i) { + const auto& box = boxes[i]; + + int x1 = box.center_x - box.size_x / 2; + int y1 = box.center_y - box.size_y / 2; + int x2 = box.center_x + box.size_x / 2; + int y2 = box.center_y + box.size_y / 2; + cv::rectangle(visualized_image, cv::Point(x1, y1), cv::Point(x2, y2), + cv::Scalar(0, 255, 0), 2); + + std::ostringstream label; + label << "id:" << i; + if (!std::isnan(box.theta)) { + label << "Valve θ=" << std::fixed << std::setprecision(2) + << (box.theta * 180.0 / M_PI) << "°"; + cv::putText(visualized_image, label.str(), cv::Point(x1, y1 - 5), + cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 255, 0), + 1); + } + // Draw pose axes only if valid pose exists + if (i < poses.size()) { + const Pose& pose = poses[i]; + if (!std::isnan(pose.position[0]) && + !std::isnan(pose.position[1]) && + !std::isnan(pose.position[2]) && !std::isnan(box.theta)) { + cv::Mat tvec = (cv::Mat_(3, 1) << pose.position[0], + pose.position[1], pose.position[2]); + + Eigen::Matrix3f rotation_matrix = + pose.orientation.toRotationMatrix(); + cv::Mat rmat = (cv::Mat_(3, 3) << rotation_matrix(0, 0), + rotation_matrix(0, 1), rotation_matrix(0, 2), + rotation_matrix(1, 0), rotation_matrix(1, 1), + rotation_matrix(1, 2), rotation_matrix(2, 0), + rotation_matrix(2, 1), rotation_matrix(2, 2)); + cv::Mat rvec; + cv::Rodrigues(rmat, rvec); + + cv::drawFrameAxes(visualized_image, camera_matrix, dist_coeffs, + rvec, tvec, 0.1); + } + } + } + return visualized_image; +} + +void ValveDetector::rmat_to_quat(const Eigen::Matrix3f& rotation_matrix, + Eigen::Quaternionf& quat) const { + quat = Eigen::Quaternionf(rotation_matrix); + quat.normalize(); +} + +void ValveDetector::project_point_to_pixel(float x, + float y, + float z, + int& u, + int& v) const { + if (z <= 0) { + u = v = -1; // Invalid pixel coordinates + return; + } + + double fx = color_image_properties_.intr.fx; + double fy = color_image_properties_.intr.fy; + double cx = color_image_properties_.intr.cx; + double cy = color_image_properties_.intr.cy; + + u = static_cast((x * fx / z) + cx); + v = static_cast((y * fy / z) + cy); +} + +void ValveDetector::calculate_letterbox_padding() { + int org_image_width = color_image_properties_.dim.width; + int org_image_height = color_image_properties_.dim.height; + + letterbox_scale_factor_ = + std::min(static_cast(yolo_img_width_) / org_image_width, + static_cast(yolo_img_height_) / org_image_height); + + double resized_width = org_image_width * letterbox_scale_factor_; + double resized_height = org_image_height * letterbox_scale_factor_; + + letterbox_pad_x_ = (yolo_img_width_ - resized_width) / 2.0; + letterbox_pad_y_ = (yolo_img_height_ - resized_height) / 2.0; +} + +BoundingBox ValveDetector::transform_bounding_box( + const BoundingBox& bbox) const { + BoundingBox transformed_bbox = bbox; + + transformed_bbox.center_x = + (bbox.center_x - letterbox_pad_x_) / letterbox_scale_factor_; + transformed_bbox.center_y = + (bbox.center_y - letterbox_pad_y_) / letterbox_scale_factor_; + transformed_bbox.size_x /= letterbox_scale_factor_; + transformed_bbox.size_y /= letterbox_scale_factor_; + + return transformed_bbox; +} + +bool ValveDetector::segment_plane( + const pcl::PointCloud::Ptr& cloud, + pcl::ModelCoefficients::Ptr& coefficients, + pcl::PointIndices::Ptr& inliers) const { + if (cloud->points.empty()) { + return false; + } + + pcl::SACSegmentation seg; + seg.setOptimizeCoefficients(true); + seg.setModelType(pcl::SACMODEL_PLANE); + seg.setMethodType(pcl::SAC_RANSAC); + seg.setDistanceThreshold(plane_ransac_threshold_); + seg.setMaxIterations(plane_ransac_max_iterations_); + seg.setInputCloud(cloud); + seg.segment(*inliers, *coefficients); + + return !inliers->indices.empty(); +} + +Eigen::Vector3f ValveDetector::get_ray_direction( + const BoundingBox& bbox) const { + float u = bbox.center_x; + float v = bbox.center_y; + + float fx = color_image_properties_.intr.fx; + float fy = color_image_properties_.intr.fy; + float cx = color_image_properties_.intr.cx; + float cy = color_image_properties_.intr.cy; + + float xc = (u - cx) / fx; + float yc = (v - cy) / fy; + + Eigen::Vector3f ray_direction; + ray_direction << xc, yc, 1.0f; + + return ray_direction.normalized(); +} + +Eigen::Vector3f ValveDetector::compute_plane_normal( + const pcl::ModelCoefficients::Ptr& coefficients, + const Eigen::Vector3f& ray_direction) const { + Eigen::Vector3f normal_vector; + + if (!coefficients || coefficients->values.size() < 3) { + return Eigen::Vector3f::Zero(); + } + + normal_vector << coefficients->values[0], coefficients->values[1], + coefficients->values[2]; + + normal_vector.normalize(); + + if (normal_vector.dot(ray_direction) > 0.0) { + normal_vector = -normal_vector; + } + + return normal_vector; +} + +Eigen::Vector3f ValveDetector::find_ray_plane_intersection( + const pcl::ModelCoefficients::Ptr& coefficients, + const Eigen::Vector3f& ray_direction) const { + if (!coefficients || coefficients->values.size() < 4) { + return Eigen::Vector3f::Zero(); + } + + Eigen::Vector3f plane_normal; + plane_normal << coefficients->values[0], coefficients->values[1], + coefficients->values[2]; + float D = coefficients->values[3]; + + float normal_dot_ray = plane_normal.dot(ray_direction); + + if (std::abs(normal_dot_ray) < 1e-6) { + // The ray is parallel or contained within the plane. + return Eigen::Vector3f::Zero(); + } + + float t = -D / normal_dot_ray; + + Eigen::Vector3f intersection_point = t * ray_direction; + + return intersection_point; +} + +Eigen::Vector3f ValveDetector::shift_point_along_normal( + const Eigen::Vector3f& intersection_point, + const Eigen::Vector3f& plane_normal) const { + return intersection_point + (plane_normal * valve_handle_offset_); +} + +Eigen::Matrix3f ValveDetector::create_rotation_matrix( + const pcl::ModelCoefficients::Ptr& coefficients, + const Eigen::Vector3f& plane_normal, + float angle) { + if (!coefficients || coefficients->values.size() < 4) { + return Eigen::Matrix3f::Identity(); + } + + Eigen::Vector3f z_axis = plane_normal; + float D = coefficients->values[3]; + + float fx = color_image_properties_.intr.fx; + float fy = color_image_properties_.intr.fy; + float cx = color_image_properties_.intr.cx; + float cy = color_image_properties_.intr.cy; + + Eigen::Matrix3f K; + K << fx, 0, cx, 0, fy, cy, 0, 0, 1; + Eigen::Matrix3f Kinv = K.inverse(); + + // Define two image points along the given angle (through principal point) + // --- + float len = 50.0f; // arbitrary pixel length (only direction matters) + Eigen::Vector3f p1(cx, cy, 1.f); + Eigen::Vector3f p2(cx + len * std::cos(angle), cy + len * std::sin(angle), + 1.f); + + // Back project points to rays + Eigen::Vector3f r1 = (Kinv * p1).normalized(); + Eigen::Vector3f r2 = (Kinv * p2).normalized(); + + // Compute intersections of rays with the plane + float denom1 = z_axis.dot(r1); + float denom2 = z_axis.dot(r2); + + if (std::abs(denom1) < 1e-6f || std::abs(denom2) < 1e-6f) { + // rays parallel to plane — return identity fallback + return Eigen::Matrix3f::Identity(); + } + + float t1 = -D / denom1; + float t2 = -D / denom2; + + Eigen::Vector3f X1 = t1 * r1; + Eigen::Vector3f X2 = t2 * r2; + + // Compute in-plane direction corresponding to the image line angle + Eigen::Vector3f x_axis = (X2 - X1).normalized(); + + // Project onto the plane (for numerical stability) + x_axis = (x_axis - x_axis.dot(z_axis) * z_axis).normalized(); + + // Ensure consistent direction (optional: avoid flipping frame between + // frames) + if (filter_direction_.dot(x_axis) < 0) + x_axis = -x_axis; + filter_direction_ = x_axis; + + Eigen::Vector3f y_axis = z_axis.cross(x_axis).normalized(); + x_axis = y_axis.cross(z_axis).normalized(); + + Eigen::Matrix3f rotation_matrix; + rotation_matrix.col(0) = x_axis; // X_obj: direction of the image line + rotation_matrix.col(1) = y_axis; // Y_obj: perpendicular in-plane + rotation_matrix.col(2) = z_axis; // Z_obj: plane normal + + return rotation_matrix; +} + +} // namespace valve_detection diff --git a/src/valve_pose_ros.cpp b/src/valve_pose_ros.cpp new file mode 100644 index 0000000..8a07168 --- /dev/null +++ b/src/valve_pose_ros.cpp @@ -0,0 +1,300 @@ +#include "valve_detection_ros/valve_pose_ros.hpp" + +namespace valve_detection { + +using namespace std::placeholders; + +ValvePoseNode::ValvePoseNode(const rclcpp::NodeOptions& options) + : Node("valve_detection_node", options) { + init_valve_detector(); + + setup_sync(); + + if (calculate_angle_) { + init_angle_detector(); + } + + std::string color_image_info_topic = + this->declare_parameter("color_image_info_topic"); + + rclcpp::QoS qos = rclcpp::QoS(rclcpp::KeepLast(1)) + .reliability(RMW_QOS_POLICY_RELIABILITY_BEST_EFFORT); + + color_image_info_sub_ = + this->create_subscription( + color_image_info_topic, qos, + std::bind(&ValvePoseNode::color_image_info_callback, this, + std::placeholders::_1)); + + std::string valve_poses_pub_topic = + this->declare_parameter("valve_poses_pub_topic"); + + valve_poses_pub_ = this->create_publisher( + valve_poses_pub_topic, qos); + + debug_visualize_ = this->declare_parameter("debug_visualize"); + + if (debug_visualize_) { + std::string annulus_pub_topic = + this->declare_parameter("annulus_pub_topic"); + annulus_pcl_pub_ = + this->create_publisher( + annulus_pub_topic, qos); + std::string plane_pub_topic = + this->declare_parameter("plane_pub_topic"); + annulus_plane_pub_ = + this->create_publisher( + plane_pub_topic, qos); + angle_image_pub_ = this->create_publisher( + this->declare_parameter( + "angle_detection_image_pub_topic"), + qos); + } + if (visualize_detections_) { + std::string processed_image_pub_topic = + this->declare_parameter("processed_image_pub_topic"); + processed_image_pub_ = this->create_publisher( + processed_image_pub_topic, qos); + } + if (!use_depth_image_) { + tf_buffer_ = std::make_shared(this->get_clock()); + tf_listener_ = + std::make_shared(*tf_buffer_); + } +} + +void ValvePoseNode::init_valve_detector() { + int yolo_img_width = this->declare_parameter("yolo_img_width"); + int yolo_img_height = this->declare_parameter("yolo_img_height"); + + float annulus_radius_ratio = + this->declare_parameter("annulus_radius_ratio"); + + float plane_ransac_threshold = + this->declare_parameter("plane_ransac_threshold"); + int plane_ransac_max_iterations = + this->declare_parameter("plane_ransac_max_iterations"); + + float valve_handle_offset = + this->declare_parameter("valve_handle_offset"); + + valve_detector_ = std::make_unique( + yolo_img_width, yolo_img_height, annulus_radius_ratio, + plane_ransac_threshold, plane_ransac_max_iterations, + valve_handle_offset); +} + +void ValvePoseNode::setup_sync() { + visualize_detections_ = + this->declare_parameter("visualize_detections"); + calculate_angle_ = this->declare_parameter("calculate_angle"); + use_depth_image_ = this->declare_parameter("use_depth_image"); + + use_color_image_ = (calculate_angle_ || visualize_detections_); + + std::string color_image_sub_topic = + this->declare_parameter("color_image_sub_topic"); + std::string depth_image_sub_topic = + this->declare_parameter("depth_image_sub_topic"); + std::string pcl_sub_topic = + this->declare_parameter("pcl_sub_topic"); + std::string detections_sub_topic = + this->declare_parameter("detections_sub_topic"); + + rclcpp::QoS qos = rclcpp::QoS(rclcpp::KeepLast(10)) + .reliability(RMW_QOS_POLICY_RELIABILITY_BEST_EFFORT); + + if (use_color_image_ && use_depth_image_) { + color_image_sub_.subscribe(this, color_image_sub_topic, + qos.get_rmw_qos_profile()); + depth_image_sub_.subscribe(this, depth_image_sub_topic, + qos.get_rmw_qos_profile()); + detections_sub_.subscribe(this, detections_sub_topic, + qos.get_rmw_qos_profile()); + sync_di_ci_d_ = + std::make_shared>( + SyncPolicy_DI_CI_D(10), depth_image_sub_, color_image_sub_, + detections_sub_); + sync_di_ci_d_->registerCallback( + std::bind(&ValvePoseNode::di_ci_d_callback, this, _1, _2, _3)); + + } else if (use_depth_image_) { + depth_image_sub_.subscribe(this, depth_image_sub_topic, + qos.get_rmw_qos_profile()); + detections_sub_.subscribe(this, detections_sub_topic, + qos.get_rmw_qos_profile()); + sync_di_d_ = + std::make_shared>( + SyncPolicy_DI_D(10), depth_image_sub_, detections_sub_); + sync_di_d_->registerCallback( + std::bind(&ValvePoseNode::di_d_callback, this, _1, _2)); + + } else if (use_color_image_ && !use_depth_image_) { + pcl_sub_.subscribe(this, pcl_sub_topic, qos.get_rmw_qos_profile()); + color_image_sub_.subscribe(this, color_image_sub_topic, + qos.get_rmw_qos_profile()); + detections_sub_.subscribe(this, detections_sub_topic, + qos.get_rmw_qos_profile()); + sync_pc_ci_d_ = + std::make_shared>( + SyncPolicy_PC_CI_D(10), pcl_sub_, color_image_sub_, + detections_sub_); + sync_pc_ci_d_->registerCallback( + std::bind(&ValvePoseNode::pc_ci_d_callback, this, _1, _2, _3)); + + } else { + pcl_sub_.subscribe(this, pcl_sub_topic, qos.get_rmw_qos_profile()); + detections_sub_.subscribe(this, detections_sub_topic, + qos.get_rmw_qos_profile()); + sync_pc_d_ = + std::make_shared>( + SyncPolicy_PC_D(10), pcl_sub_, detections_sub_); + sync_pc_d_->registerCallback( + std::bind(&ValvePoseNode::pc_d_callback, this, _1, _2)); + } +} + +void ValvePoseNode::di_ci_d_callback( + const sensor_msgs::msg::Image::ConstSharedPtr& depth_image, + const sensor_msgs::msg::Image::ConstSharedPtr& color_image, + const vision_msgs::msg::Detection2DArray::ConstSharedPtr& detections) { + synchronized_callback(depth_image, color_image, + detections); +} + +void ValvePoseNode::di_d_callback( + const sensor_msgs::msg::Image::ConstSharedPtr& depth_image, + const vision_msgs::msg::Detection2DArray::ConstSharedPtr& detections) { + synchronized_callback(depth_image, nullptr, + detections); +} + +void ValvePoseNode::pc_ci_d_callback( + const sensor_msgs::msg::PointCloud2::ConstSharedPtr& pcl, + const sensor_msgs::msg::Image::ConstSharedPtr& color_image, + const vision_msgs::msg::Detection2DArray::ConstSharedPtr& detections) { + synchronized_callback(pcl, color_image, + detections); +} + +void ValvePoseNode::pc_d_callback( + const sensor_msgs::msg::PointCloud2::ConstSharedPtr& pcl, + const vision_msgs::msg::Detection2DArray::ConstSharedPtr& detections) { + synchronized_callback(pcl, nullptr, + detections); +} + +void ValvePoseNode::init_angle_detector() { + if (calculate_angle_) { + AngleDetectorParams angle_params; + angle_params.line_detection_area = + this->declare_parameter("angle.detection_area_ratio"); + angle_params.canny_low_threshold = + this->declare_parameter("angle.canny_low_threshold"); + angle_params.canny_high_threshold = + this->declare_parameter("angle.canny_high_threshold"); + angle_params.canny_aperture_size = + this->declare_parameter("angle.canny_aperture_size"); + angle_params.hough_rho_res = + this->declare_parameter("angle.hough_rho_res"); + angle_params.hough_theta_res = + this->declare_parameter("angle.hough_theta_res"); + angle_params.hough_threshold = + this->declare_parameter("angle.hough_threshold"); + angle_params.hough_min_line_length = + this->declare_parameter("angle.hough_min_line_length"); + angle_params.hough_max_line_gap = + this->declare_parameter("angle.hough_max_line_gap"); + + angle_detector_ = std::make_unique(angle_params); + } +} + +void ValvePoseNode::color_image_info_callback( + const sensor_msgs::msg::CameraInfo::SharedPtr camera_info_msg) { + if (!color_image_info_received_) { + ImageProperties img_props; + img_props.intr.fx = camera_info_msg->k[0]; + img_props.intr.fy = camera_info_msg->k[4]; + img_props.intr.cx = camera_info_msg->k[2]; + img_props.intr.cy = camera_info_msg->k[5]; + img_props.dim.width = camera_info_msg->width; + img_props.dim.height = camera_info_msg->height; + + valve_detector_->set_color_image_properties(img_props); + valve_detector_->calculate_letterbox_padding(); + + color_image_frame_id_ = camera_info_msg->header.frame_id; + color_image_info_received_ = true; + color_image_info_sub_.reset(); + } +} + +BoundingBox ValvePoseNode::to_bounding_box( + const vision_msgs::msg::BoundingBox2D& bbox) const { + BoundingBox box; + box.center_x = bbox.center.position.x; + box.center_y = bbox.center.position.y; + box.size_x = bbox.size_x; + box.size_y = bbox.size_y; + box.theta = bbox.center.theta; + return box; +} + +void ValvePoseNode::publish_annulus_pcl( + const pcl::PointCloud::Ptr& cloud, + const std_msgs::msg::Header& header) const { + sensor_msgs::msg::PointCloud2 cloud_msg; + pcl::toROSMsg(*cloud, cloud_msg); + cloud_msg.header = header; + cloud_msg.is_dense = false; + cloud_msg.width = static_cast(cloud->points.size()); + cloud_msg.height = 1; + + annulus_pcl_pub_->publish(cloud_msg); +} + +void ValvePoseNode::publish_annulus_plane( + const pcl::PointCloud::Ptr& cloud, + const std_msgs::msg::Header& header) const { + sensor_msgs::msg::PointCloud2 cloud_msg; + pcl::toROSMsg(*cloud, cloud_msg); + cloud_msg.header = header; + cloud_msg.is_dense = false; + cloud_msg.width = static_cast(cloud->points.size()); + cloud_msg.height = 1; + + annulus_plane_pub_->publish(cloud_msg); +} + +void ValvePoseNode::publish_valve_poses( + const std::vector& poses, + const std_msgs::msg::Header& header) const { + geometry_msgs::msg::PoseArray pose_array_msg; + pose_array_msg.header = header; + + for (const auto& pose : poses) { + if (!std::isnan(pose.orientation.x()) && + !std::isnan(pose.orientation.y()) && + !std::isnan(pose.orientation.z()) && + !std::isnan(pose.orientation.w()) && + !std::isnan(pose.position.x()) && !std::isnan(pose.position.y()) && + !std::isnan(pose.position.z())) { + geometry_msgs::msg::Pose pose_msg; + pose_msg.position.x = pose.position.x(); + pose_msg.position.y = pose.position.y(); + pose_msg.position.z = pose.position.z(); + pose_msg.orientation.x = pose.orientation.x(); + pose_msg.orientation.y = pose.orientation.y(); + pose_msg.orientation.z = pose.orientation.z(); + pose_msg.orientation.w = pose.orientation.w(); + + pose_array_msg.poses.push_back(pose_msg); + } + } + valve_poses_pub_->publish(pose_array_msg); +} + +} // namespace valve_detection + +RCLCPP_COMPONENTS_REGISTER_NODE(valve_detection::ValvePoseNode)