diff --git a/src/asterix/param/asterix.in.yml b/src/asterix/param/asterix.in.yml index 81175d37..c9f09644 100644 --- a/src/asterix/param/asterix.in.yml +++ b/src/asterix/param/asterix.in.yml @@ -49,6 +49,14 @@ asterix: strategy_mode: NORMAL + localisation_node: + ros__parameters: + vlx_lat_x: 80.0 + vlx_lat_y: 130.5 + vlx_face_x: 100.5 + vlx_face_y: 110.0 + + lcd_driver: ros__parameters: use_sim_time: !Var use_sim_time diff --git a/src/modules/localisation/README.md b/src/modules/localisation/README.md new file mode 100644 index 00000000..5ef6f989 --- /dev/null +++ b/src/modules/localisation/README.md @@ -0,0 +1,171 @@ +# Localisation package + +## What does this package do ? + +- Publish the initial map->odom transformation, +- Subscribe to "odom" topic and publish on "odom_map_relative" the map relative robot pose, +- Compute the transformation to re-adjust odometry and publish it. + + +## Odometry re-adjustment method + +In order to re-adjust the odometry, 2 things are used: +- Vision from assurancetourix package, +- 6 VL53L1X around the robot. + +### Vision + +Assurancetourix is publishing aruco positions and orientation through /allies_positions_markers topic. By subscribing to the topic and identify the marker that correspond to the node's namespace, the position and orientation of the robot can be found. + +### VL53L1X + +There are 6 VL53L1X on the robot, by knowing there positions relative to the robot and by getting the distance they're returning, with an certain amount of mathematical consideration (described below), the position and orientation of the robot can re found. + +The limitation of this algorithm is that **we do have to know an estimation of the position and orientation of the robot to validate and refine the pose (position/orientation).** + +### Problem raised by a mix of vision and VLX + +We're estimating the vision localization precision of about 1 centimeter average which isn't enough in some cases (particularly near walls). + +So, in order to increase the precision, the information given by the vision needs to be refined by a vlx measure. But keep in mind that vision gives a pose STAMPED information which means the marker given by assurancetourix has a delay of a few milliseconds (~350ms average). The vlx measure takes (in simulation) about 60ms which give a final difference between theses stamps of about 400ms. + +If the robot is moving at something like 20cm/s, we do have to allow a difference of 8cm between the vision pose and vlx computed pose. Unrealistic, goblet have a 7cm diameter. + +If you haven't understood yet what the problem is, here it is: **the difference of stamps between vlx measure and vision measure must be as small as possible.** + +### Another approach + +We do know that every 750ms we're gonna try to re-adjust odometry by allowing the node to receive a marker information. A routine can be thought that waits for 0.65s, launch a loop getting the vlx measures and stamp of the sample and storing them in a queue of something like 10 samples ( I add a small delay between each sample to avoid overloading the localisation_node thread ) . The older element of this queue is 600ms old, it's easy to retrieve the nearest element of the queue from the vision marker stamp. When retrieving this element, the vlx sample thread is stopped. + +With this nearest queue element, we have a maximum difference of stamps of 0.04ms which is 10 times better than the previous approach. We're going to consider that's enough of an optimization. + +We now just have to compare the vlx computed pose and vision pose of the robot, if those are really near ( < 5cm, <0.1rad ) and the estimated projection of vlx on the walls are effectively on the right wall, we're considering the correction as valid. The valid transformation is sent to drive for it to re-adjust odometry. If the vlx computed pose isn't considered as valid, the vision pose is sent to drive. + +After sending the correction transformation to drive, we have to restart the vlx sample thread with a starting delay of 0.65 second. + +### Near walls routine + +There's a specific behavior of this package near walls: in the odom callback, while detecting the robot position being near walls (<30cm from a wall), the node short circuit the behavior previously presented and adopt a new strategy. + +This strategy considers an "infinite" sampling of the vlx sampling thread with no starting delay. Each 0.5 seconds odom callback, the node ask for an odometry re-adjustment considering the last robot pose that sent drive. + +As previously, there's a comparison between stamps to get the nearest one of the queue that gives the sample to consider for calculation. When calculating the new pose from the vlx distance values, the thread is stopped avoid overloading of the localisation_node thread. + +If the computed pose is considered as valid, it's sent to drive to re-adjust odometry. Else the pose is just thrown away, and nothing is sent to re-adjust odometry. + +Then, the vlx sampling thread is restarted with no delay because the node can continue trying to readjust odometry with vision callback with the strategy described in the previous paragraph. + +### Avoiding concurrency problems + +You've understood it, the localisation node contains several threads (odom callback, marker callback and vlx sample). In order to avoid concurrency problems (especially deadlocks, several thread starting, overloading, etc.) several strategies are deployed: +- **Deadlocks:** + The resources shared by several threads can be writable only in one thread, in other threads, this resources is only readable. + - **Several thread starting:** +Only one vlx sampling thread can be started, when asking for the creation of this thread, there's a check of this existing thread before creating it or raising that this thread is already running. Same thing for stopping this thread, if no vlx sampling thread exists, the node raises that this thread doesn't exist. +- **Overloading:** +Getting vlx values is a blocking call, so there's a need of a small non-blocking delay between each sample that allow the node doing its other works. + +## Mathematical considerations + +This paragraph is about to explain you how we can compute the pose (position/orientation) of the robot from the vlx measured distances. + +As all geometrical problem it's way easier to understand what's happening with a schematic of the situation: + +

+ +

+
+ +Now, time to deep dive into calculations: + ++ ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}f(d1,d2,d3)=(x,y,\theta)) + + ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\theta):\ +Considering the triangle surrounded in purple and basic trigonometric formula:\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\tan(\theta)=\frac{d2-d1}{2e})\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\Rightarrow\boxed{\theta=\arctan{\frac{d2-d1}{2e}}}%20(3)%20) + + + ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}x) and ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}y):\ +Now that we now ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\theta), it's fairly simple to get x and y by projecting vlx distances using it. + +

+ +

+
+ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\boxed{y=%20(%20\%20\frac{d2-d1}{2}%20%2B%20d%20\%20).\cos{\theta}}%20\%20(1))\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\boxed{x=(d3%2Br).\cos{\theta}%2BL.\sin{\theta}}\%20(2)) + ++ ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}f(x,y,\theta)=(d1,d2,d3)) + + ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}d1)\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}(3)\Rightarrow\tan{\theta}=\frac{d2-d1}{2e})\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\Rightarrow\d2=2e.\tan{\theta}%2Bd1%20(4)%20)\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}(1)\Rightarrow\y=(\frac{d1%2B(d1%2B2e.\tan{\theta})}{2}%2Bd).\cos{\theta})\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\Rightarrow\y=(d1%2B2e.\tan{\theta}).\cos{\theta})\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\Rightarrow\boxed{d1=\frac{y}{\cos{\theta}}-e.\tan{\theta}-d}) + + + ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}d2)\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}(4)\Rightarrow\d2=2e.\tan{\theta}%2B\frac{y}{\cos{\theta}}-e.\tan{\theta}-d)\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\Rightarrow\boxed{d2=\frac{y}{\cos{\theta}}%2Be.\tan{\theta}-d}) + + + ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}d3)\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}(2)\Rightarrow\x-L.\sin{\theta}=(d3%2Br).\cos{\theta})\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\Rightarrow\frac{x}{\cos{\theta}}-L.\tan{\theta}=d3%2Br)\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\Rightarrow\boxed{d3=\frac{x}{\cos{\theta}}-L.\tan{\theta}-r}) + ++ ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}f(x,y,\theta,d1,d2,d3)=(d1\_proj,d2\_proj,d3\_proj)) + +

+ +

+
+ +All theses are just simple projection knowing ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}x,y,\theta) and ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}f(x,y,\theta)=(d1,d2,d3)). It constitutes an extra tool to validate the pose estimation. Indeed, if a projection is negative, it's because the vlx doesn't measure the distance to the desired wall, so the resulting pose will obviously be incorrect. + +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\boxed{d1\_\proj=x%2B(d1%2Bd).\sin{\theta}-e.\cos{\theta}})\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\boxed{d2\_\proj=x%2B(d2%2Bd).\sin{\theta}%2Be.\cos{\theta}})\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\boxed{d3\_\proj=y-(d3%2Br).\sin{\theta}-L.\cos{\theta}}) + +### Formulas summary +\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\boxed{x=(d3%2Br).\cos{\theta}%2BL.\sin{\theta}}\%20)\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\boxed{y=%20(%20\%20\frac{d2-d1}{2}%20%2B%20d%20\%20).\cos{\theta}})\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\boxed{\theta=\arctan{\frac{d2-d1}{2e}}})\ +\ +\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\boxed{d1=\frac{y}{\cos{\theta}}-e.\tan{\theta}-d})\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\boxed{d2=\frac{y}{\cos{\theta}}%2Be.\tan{\theta}-d})\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\boxed{d3=\frac{x}{\cos{\theta}}-L.\tan{\theta}-r})\ +\ +\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\boxed{d1\_\proj=x%2B(d1%2Bd).\sin{\theta}-e.\cos{\theta}})\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\boxed{d2\_\proj=x%2B(d2%2Bd).\sin{\theta}%2Be.\cos{\theta}})\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\boxed{d3\_\proj=y-(d3%2Br).\sin{\theta}-L.\cos{\theta}}) + +## In practice + +Now that we've established the theory, time to find all the cases. The table below summarizes the different cases implemented in the algorithm, it also considers some tweaks in the algorithm (such as invert angle in some cases due to some symmetries, etc.). + +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}x) the distance between the robot and its sideways wall.\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}y) the distance between the robot and its facing wall.\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\theta) the orientation of the robot relative to the wall as described in the first schematic.\ +![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\phi) the orientation of the robot relative to the map. + + +| Position | Orientation | d1 | d2 | d3 | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\theta) | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}x) | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}y) | +|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +| Top blue | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\phi\in[\frac{\pi}{4},\frac{3\pi}{4}])| vlx_0x31 | vlx_0x32 | vlx_0x33 | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\frac{\pi}{2}-\phi) | robot_pose_x . 1000 | 2000 - robot_pose_y . 1000 | +| Top blue | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\phi\in[-\frac{\pi}{4},-\frac{3\pi}{4}])| vlx_0x34 | vlx_0x35 | vlx_0x30 | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}-\frac{\pi}{2}-\phi) | robot_pose_x . 1000 | 2000 - robot_pose_y . 1000 | +| Top blue | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\phi\in[\frac{-\pi}{4},\frac{\pi}{4}])| vlx_0x34 | vlx_0x35 | vlx_0x33 | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}-\phi) | robot_pose_x . 1000 | 2000 - robot_pose_y . 1000 | +| Top blue | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\phi\in[-\pi,-\frac{3\pi}{4}]\cup[\frac{3\pi}{4},\pi])| vlx_0x31 | vlx_0x32 | vlx_0x30 | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\pi-\phi) | robot_pose_x . 1000 | 2000 - robot_pose_y . 1000 | +| Bottom blue | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\phi\in[\frac{\pi}{4},\frac{3\pi}{4}])| vlx_0x34 | vlx_0x35 | vlx_0x33 | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\frac{\pi}{2}-\phi) | robot_pose_x . 1000 | robot_pose_y . 1000 | +| Bottom blue | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\phi\in[-\frac{\pi}{4},-\frac{3\pi}{4}])| vlx_0x31 | vlx_0x32 | vlx_0x30 | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}-\frac{\pi}{2}-\phi) | robot_pose_x . 1000 | robot_pose_y . 1000 | +| Bottom blue | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\phi\in[\frac{-\pi}{4},\frac{\pi}{4}])| vlx_0x34 | vlx_0x35 | vlx_0x30 | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}-\phi) | robot_pose_x . 1000 | robot_pose_y . 1000 | +| Bottom blue | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\phi\in[-\pi,-\frac{3\pi}{4}]\cup[\frac{3\pi}{4},\pi])| vlx_0x31 | vlx_0x32 | vlx_0x33 | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\pi-\phi) | robot_pose_x . 1000 | robot_pose_y . 1000 | +| Top yellow | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\phi\in[\frac{\pi}{4},\frac{3\pi}{4}])| vlx_0x31 | vlx_0x32 | vlx_0x30 | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\frac{\pi}{2}-\phi) | 3000 - robot_pose_x . 1000 | 2000 - robot_pose_y . 1000 | +| Top yellow | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\phi\in[-\frac{\pi}{4},-\frac{3\pi}{4}])| vlx_0x34 | vlx_0x35 | vlx_0x33 | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}-\frac{\pi}{2}-\phi) | 3000 - robot_pose_x . 1000 | 2000 - robot_pose_y . 1000 | +| Top yellow | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\phi\in[\frac{-\pi}{4},\frac{\pi}{4}])| vlx_0x31 | vlx_0x32 | vlx_0x33 | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}-\phi) | 3000 - robot_pose_x . 1000 | 2000 - robot_pose_y . 1000 | +| Top yellow | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\phi\in[-\pi,-\frac{3\pi}{4}]\cup[\frac{3\pi}{4},\pi])| vlx_0x34 | vlx_0x35 | vlx_0x30 | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\pi-\phi) | 3000 - robot_pose_x . 1000 | 2000 - robot_pose_y . 1000 | +| Bottom yellow | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\phi\in[\frac{\pi}{4},\frac{3\pi}{4}])| vlx_0x34 | vlx_0x35 | vlx_0x30 | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\frac{\pi}{2}-\phi) | 3000 - robot_pose_x . 1000 | robot_pose_y . 1000 | +| Bottom yellow | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\phi\in[-\frac{\pi}{4},-\frac{3\pi}{4}])| vlx_0x31 | vlx_0x32 | vlx_0x33 | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}-\frac{\pi}{2}-\phi) | 3000 - robot_pose_x . 1000 | robot_pose_y . 1000 | +| Bottom yellow | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\phi\in[\frac{-\pi}{4},\frac{\pi}{4}])| vlx_0x31 | vlx_0x32 | vlx_0x30 | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}-\phi) | 3000 - robot_pose_x . 1000 | robot_pose_y . 1000 | +| Bottom yellow | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\phi\in[-\pi,-\frac{3\pi}{4}]\cup[\frac{3\pi}{4},\pi])| vlx_0x34 | vlx_0x35 | vlx_0x33 | ![formula](https://render.githubusercontent.com/render/math?math=\color{brown}\pi-\phi) | 3000 - robot_pose_x . 1000 | robot_pose_y . 1000 | diff --git a/src/modules/localisation/localisation/localisation_node.py b/src/modules/localisation/localisation/localisation_node.py index 90174b23..526a0d68 100644 --- a/src/modules/localisation/localisation/localisation_node.py +++ b/src/modules/localisation/localisation/localisation_node.py @@ -1,54 +1,71 @@ #!/usr/bin/env python3 -"""Robot localisation node.""" +""" Robot localisation node """ import math import numpy as np - +import time +import copy import rclpy -from geometry_msgs.msg import TransformStamped + +from geometry_msgs.msg import TransformStamped, PoseStamped, Quaternion from rcl_interfaces.msg import SetParametersResult from visualization_msgs.msg import MarkerArray from tf2_ros import StaticTransformBroadcaster -from transformix_msgs.srv import TransformixParametersTransformStamped +from .vlx.vlx_readjustment import VlxReadjustment +from localisation.utils import euler_to_quaternion, is_simulation +from nav_msgs.msg import Odometry +from tf2_kdl import transform_to_kdl, do_transform_frame class Localisation(rclpy.node.Node): - """Robot localisation node.""" + """ Robot localisation node """ def __init__(self): + """ Init Localisation node """ super().__init__("localisation_node") self.robot = self.get_namespace().strip("/") self.side = self.declare_parameter("side", "blue") self.add_on_set_parameters_callback(self._on_set_parameters) - self._x, self._y, self._theta = self.fetchStartPosition() + self.robot_pose = PoseStamped() + ( + self.robot_pose.pose.position.x, + self.robot_pose.pose.position.y, + theta, + ) = self.fetchStartPosition() + self.robot_pose.pose.orientation = euler_to_quaternion(theta) self._tf_brodcaster = StaticTransformBroadcaster(self) self._tf = TransformStamped() self._tf.header.frame_id = "map" self._tf.child_frame_id = "odom" self.update_transform() - self.get_initial_tf_srv = self.create_service( - TransformixParametersTransformStamped, - "get_odom_map_tf", - self.get_initial_tf_srv_callback, - ) self.subscription_ = self.create_subscription( MarkerArray, "/allies_positions_markers", self.allies_subscription_callback, 10, ) + self.subscription_ = self.create_subscription( + Odometry, + "odom", + self.odom_callback, + 10, + ) self.tf_publisher_ = self.create_publisher( TransformStamped, "adjust_odometry", 10 ) + self.odom_map_relative_publisher_ = self.create_publisher( + PoseStamped, "odom_map_relative", 10 + ) self.last_odom_update = 0 self.get_logger().info(f"Default side is {self.side.value}") + self.vlx = VlxReadjustment(self) self.get_logger().info("Localisation node is ready") def _on_set_parameters(self, params): - """Handle Parameter events especially for side.""" + """ Handle Parameter events especially for side """ result = SetParametersResult() try: for param in params: @@ -57,15 +74,20 @@ def _on_set_parameters(self, params): self.side = param else: setattr(self, param.name, param) - self._x, self._y, self._theta = self.fetchStartPosition() + ( + self.robot_pose.pose.position.x, + self.robot_pose.pose.position.y, + theta, + ) = self.fetchStartPosition() + self.robot_pose.pose.orientation = euler_to_quaternion(theta) + self.update_transform() result.successful = True except BaseException as e: result.reason = e return result def fetchStartPosition(self): - """Fetch start position for side and robot.""" - # TODO : Calibrate the start position using VL53L1X + """ Fetch start position for side and robot """ if self.robot == "asterix": if self.side.value == "blue": return (0.29, 1.33, 0) @@ -76,76 +98,85 @@ def fetchStartPosition(self): return (0.29, 1.33, 0) elif self.side.value == "yellow": return (3 - 0.29, 1.33, np.pi) - # Make it crash in case of undefined parameters return None - def euler_to_quaternion(self, yaw, pitch=0, roll=0): - """Conversion between euler angles and quaternions.""" - qx = np.sin(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) - np.cos( - roll / 2 - ) * np.sin(pitch / 2) * np.sin(yaw / 2) - qy = np.cos(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2) + np.sin( - roll / 2 - ) * np.cos(pitch / 2) * np.sin(yaw / 2) - qz = np.cos(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2) - np.sin( - roll / 2 - ) * np.sin(pitch / 2) * np.cos(yaw / 2) - qw = np.cos(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) + np.sin( - roll / 2 - ) * np.sin(pitch / 2) * np.sin(yaw / 2) - return [qx, qy, qz, qw] - - def quaternion_to_euler(self, x, y, z, w): - """Conversion between quaternions and euler angles.""" - t0 = +2.0 * (w * x + y * z) - t1 = +1.0 - 2.0 * (x * x + y * y) - X = math.atan2(t0, t1) - t2 = +2.0 * (w * y - z * x) - t2 = +1.0 if t2 > +1.0 else t2 - t2 = -1.0 if t2 < -1.0 else t2 - Y = math.asin(t2) - t3 = +2.0 * (w * z + x * y) - t4 = +1.0 - 2.0 * (y * y + z * z) - Z = math.atan2(t3, t4) - return (X, Y, Z) - def update_transform(self): - """Update and publish transform callback.""" + """ Update and publish odom --> map transform """ self._tf.header.stamp = self.get_clock().now().to_msg() - self._tf.transform.translation.x = float(self._x) - self._tf.transform.translation.y = float(self._y) - qx, qy, qz, qw = self.euler_to_quaternion(self._theta) - self._tf.transform.rotation.x = float(qx) - self._tf.transform.rotation.y = float(qy) - self._tf.transform.rotation.z = float(qz) - self._tf.transform.rotation.w = float(qw) - self._initial_tf = self._tf + self._tf.transform.translation.x = self.robot_pose.pose.position.x + self._tf.transform.translation.y = self.robot_pose.pose.position.y + self._tf.transform.rotation = self.robot_pose.pose.orientation + self._initial_tf = copy.deepcopy(self._tf) self._tf_brodcaster.sendTransform(self._tf) def allies_subscription_callback(self, msg): - """Identity the robot marker in assurancetourix marker_array detection - publish the transformation for drive to readjust odometry""" - for allie_marker in msg.markers: + """Identity the robot marker in assurancetourix marker_array detection, + send the marker to vlx_readjustment in order to try to refine the position + at a stamp given by the marker""" + for ally_marker in msg.markers: if ( - allie_marker.text.lower() == self.robot - and (self.get_clock().now().to_msg().sec - self.last_odom_update) > 5 + ally_marker.text.lower() == self.robot + and (self.get_clock().now().to_msg().sec - self.last_odom_update) > 0.75 ): - self._x = allie_marker.pose.position.x - self._y = allie_marker.pose.position.y - q = allie_marker.pose.orientation - self._theta = self.quaternion_to_euler(q.x, q.y, q.z, q.w)[2] - self._tf.header.stamp = allie_marker.header.stamp - self._tf.transform.translation.x = allie_marker.pose.position.x - self._tf.transform.translation.y = allie_marker.pose.position.y - self._tf.transform.translation.z = float(0) - self._tf.transform.rotation = q - self.tf_publisher_.publish(self._tf) - self.last_odom_update = self.get_clock().now().to_msg().sec - - def get_initial_tf_srv_callback(self, request, response): - self.get_logger().info(f"incoming request for {self.robot} odom -> map tf") - response.transform_stamped = self._initial_tf - return response + if ( + is_simulation() + ): # simulate marker delais (image analysis from assurancetourix) + time.sleep(0.15) + if self.vlx.continuous_sampling == 0: + self.create_and_send_tf( + ally_marker.pose.position.x, + ally_marker.pose.position.y, + ally_marker.pose.orientation, + ally_marker.header.stamp, + ) + else: + self.vlx.try_to_readjust_with_vlx( + ally_marker.pose.position.x, + ally_marker.pose.position.y, + ally_marker.pose.orientation, + ally_marker.header.stamp, + ) + if self.vlx.continuous_sampling in [0, 1]: + self.vlx.start_continuous_sampling_thread(0.65, 1) + + def is_near_walls(self, pt): + """ Return true if the robot if less than 30cm from the wall """ + return pt.x < 0.3 or pt.y < 0.3 or pt.x > 2.7 or pt.y > 1.7 + + def create_and_send_tf(self, x, y, q, stamp): + """ Create and send tf to drive for it to re-adjust odometry """ + self._tf.header.stamp = stamp + self._tf.transform.translation.x = x + self._tf.transform.translation.y = y + self._tf.transform.translation.z = float(0) + self._tf.transform.rotation = q + self.last_odom_update = self.get_clock().now().to_msg().sec + self.tf_publisher_.publish(self._tf) + + def odom_callback(self, msg): + """Odom callback, computes the new pose of the robot relative to map, + send this new pose on odom_map_relative topic and start vlx routine + if this pose is near walls""" + robot_tf = TransformStamped() + robot_tf.transform.translation.x = msg.pose.pose.position.x + robot_tf.transform.translation.y = msg.pose.pose.position.y + robot_tf.transform.rotation = msg.pose.pose.orientation + robot_frame = transform_to_kdl(robot_tf) + new_robot_pose_kdl = do_transform_frame(robot_frame, self._initial_tf) + self.robot_pose.pose.position.x = new_robot_pose_kdl.p.x() + self.robot_pose.pose.position.y = new_robot_pose_kdl.p.y() + q = new_robot_pose_kdl.M.GetQuaternion() + self.robot_pose.header.stamp = msg.header.stamp + self.robot_pose.header.frame_id = "map" + self.robot_pose.pose.orientation = Quaternion(x=q[0], y=q[1], z=q[2], w=q[3]) + self.odom_map_relative_publisher_.publish(self.robot_pose) + if self.is_near_walls(self.robot_pose.pose.position): + if self.vlx.continuous_sampling == 2: + self.vlx.near_wall_routine(self.robot_pose) + else: + self.vlx.start_near_wall_routine(self.robot_pose) + elif self.vlx.continuous_sampling == 2: + self.vlx.stop_near_wall_routine() def main(args=None): @@ -156,5 +187,7 @@ def main(args=None): rclpy.spin(localisation) except KeyboardInterrupt: pass + localisation.vlx.stop_continuous_sampling_thread() + localisation.vlx.sensors.node.destroy_node() localisation.destroy_node() rclpy.shutdown() diff --git a/src/modules/localisation/localisation/sensors_sim.py b/src/modules/localisation/localisation/sensors_sim.py deleted file mode 100644 index bc5e7954..00000000 --- a/src/modules/localisation/localisation/sensors_sim.py +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env python3 - - -"""Simulated sensors definition for localisation.""" - - -class Sensors: - def __init__(self, node, addrs=[]): - """Init sensors baseclass.""" - self.node = node - self._addrs = addrs - self._vlx_array = [] - for addr in addrs: - vlx = self.node.robot.getDistanceSensor(f"vlx_0x{addr:x}") - try: - vlx.enable(100) # Sampling rate of 100ms - self._vlx_array.append(vlx) - except BaseException: - print(f"No simulated VL53L1X sensor at address {addr} !") - - def get_distances(self): - """Fetch distances from VL53L1Xs.""" - distances = {} - for vlx, addr in zip(self._vlx_array, self._addrs): - distances.update({addr: round(vlx.getValue(), 4)}) - return distances - - def __del__(self): - """Destructor.""" - for vlx in self._vlx_array: - vlx.disable() diff --git a/src/modules/localisation/localisation/utils.py b/src/modules/localisation/localisation/utils.py new file mode 100644 index 00000000..b1fc4ddb --- /dev/null +++ b/src/modules/localisation/localisation/utils.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python3 + + +"""Some utils functions""" + +import numpy as np +import math + +from geometry_msgs.msg import Quaternion +from platform import machine + + +def euler_to_quaternion(yaw, pitch=0, roll=0): + """Conversion between euler angles and quaternions.""" + qx = np.sin(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) - np.cos( + roll / 2 + ) * np.sin(pitch / 2) * np.sin(yaw / 2) + qy = np.cos(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2) + np.sin( + roll / 2 + ) * np.cos(pitch / 2) * np.sin(yaw / 2) + qz = np.cos(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2) - np.sin( + roll / 2 + ) * np.sin(pitch / 2) * np.cos(yaw / 2) + qw = np.cos(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) + np.sin( + roll / 2 + ) * np.sin(pitch / 2) * np.sin(yaw / 2) + return Quaternion(x=qx, y=qy, z=qz, w=qw) + + +def quaternion_to_euler(q): + """Conversion between quaternions and euler angles. + + :param q: The quaternion to transform to euler angles + :type q: geometry_msgs.msg.Quaternion + :return: euler angles equivalent + :rtype: tuple (x,y,z) + """ + x, y, z, w = q.x, q.y, q.z, q.w + t0 = +2.0 * (w * x + y * z) + t1 = +1.0 - 2.0 * (x * x + y * y) + X = math.atan2(t0, t1) + t2 = +2.0 * (w * y - z * x) + t2 = +1.0 if t2 > +1.0 else t2 + t2 = -1.0 if t2 < -1.0 else t2 + Y = math.asin(t2) + t3 = +2.0 * (w * z + x * y) + t4 = +1.0 - 2.0 * (y * y + z * z) + Z = math.atan2(t3, t4) + return (X, Y, Z) + + +def in_rectangle(rect, pose): + """Return true if the coord are inside the rect coordinates + + :param rect: The list of rect coordinates. + :type rect: array [x_min, y_min, x_max_ y_max] + :param pose: the pose to test + :type coord: geometry_msgs.msg.PoseStamped + :return: true if the coord are inside the rect coordinates, else false + :rtype: bool + """ + x = pose.pose.position.x + y = pose.pose.position.y + return rect[0] < x and rect[1] < y and rect[2] > x and rect[3] > y + + +def is_simulation(): + return True if machine() != "aarch64" else False diff --git a/src/modules/localisation/localisation/vlx/__init__.py b/src/modules/localisation/localisation/vlx/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/modules/localisation/localisation/sensors.py b/src/modules/localisation/localisation/vlx/sensors.py similarity index 85% rename from src/modules/localisation/localisation/sensors.py rename to src/modules/localisation/localisation/vlx/sensors.py index 9de820e5..4c626603 100644 --- a/src/modules/localisation/localisation/sensors.py +++ b/src/modules/localisation/localisation/vlx/sensors.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 -"""Sensors definition for localisation.""" +""" Sensors definition for localisation """ from VL53L1X import VL53L1X @@ -9,7 +9,7 @@ class Sensors: def __init__(self, i2c_bus=3, addrs=[]): - """Init sensors baseclass.""" + """ Init Sensors """ self._i2c_bus = i2c_bus self._vlx_array = [] for addr in addrs: @@ -24,14 +24,14 @@ def __init__(self, i2c_bus=3, addrs=[]): ) def get_distances(self): - """Fetch distances from VL53L1Xs.""" + """ Fetch distances from VL53L1X """ distances = {} for vlx in self._vlx_array: distances.update({vlx.i2c_address: vlx.get_distance()}) return distances def __del__(self): - """Destructor.""" + """ Destructor """ for vlx in self._vlx_array: vlx.stop_ranging() vlx.close() diff --git a/src/modules/localisation/localisation/vlx/sensors_sim.py b/src/modules/localisation/localisation/vlx/sensors_sim.py new file mode 100644 index 00000000..e01e2140 --- /dev/null +++ b/src/modules/localisation/localisation/vlx/sensors_sim.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python3 + + +""" Simulated sensors definition for localisation """ + +from os import environ + +from builtin_interfaces.msg._time import Time +from webots_ros2_core.webots_node import WebotsNode + + +class Sensors: + """ Sensors class """ + + def __init__(self, node, addrs=[]): + """ Init Sensors """ + supervisor = node.robot + "_vlx_manager" + environ["WEBOTS_ROBOT_NAME"] = supervisor + self.node = VlxSupervisor(supervisor, args=None) + self._addrs = addrs + self._vlx_array = [] + for addr in addrs: + vlx = self.node.robot.getDevice(f"vlx_0x{addr}") + try: + vlx.enable(100) # Sampling rate of 100ms + self._vlx_array.append(vlx) + except BaseException: + print(f"No simulated VL53L1X sensor at address {addr} !") + + def get_distances(self): + """ Fetch distances from VL53L1X """ + distances = {} + for vlx, addr in zip(self._vlx_array, self._addrs): + self.node.robot.step(1) + distances.update({addr: round(vlx.getValue(), 4)}) + return distances + + def get_time_stamp(self): + """ Get time stamp from Webots """ + self.node.robot.step(0) + t = self.node.robot.getTime() + return Time(sec=int(t), nanosec=int((t % int(t)) * 1e9)) + + def __del__(self): + """ Destructor """ + for vlx in self._vlx_array: + vlx.disable() + + +class VlxSupervisor(WebotsNode): + """ Vlx manager node """ + + def __init__(self, supervisor, args=None): + """ Init VlxSupervisor """ + super().__init__(supervisor, args) diff --git a/src/modules/localisation/localisation/vlx/vlx_readjustment.py b/src/modules/localisation/localisation/vlx/vlx_readjustment.py new file mode 100644 index 00000000..4c4e743f --- /dev/null +++ b/src/modules/localisation/localisation/vlx/vlx_readjustment.py @@ -0,0 +1,511 @@ +#!/usr/bin/env python3 + + +""" Vlx readjustment for localisation package """ + +import numpy as np +import copy +import time +import threading + +from localisation.utils import * +from geometry_msgs.msg import Pose +from builtin_interfaces.msg._time import Time + +if is_simulation(): + from .sensors_sim import Sensors +else: + from .sensors import Sensors + +bottom_blue_area = [0.0, 0.0, 0.9, 1.0] +top_blue_area = [0.0, 1.0, 1.5, 2.0] + +bottom_yellow_area = [2.1, 0.0, 3.0, 1.0] +top_yellow_area = [1.5, 1.0, 3.0, 2.0] + + +class VlxReadjustment: + """ Vlx readjustment class, contains routine and algorithm relative to vlx """ + + def __init__(self, parent_node): + """ Init VlxReadjustment """ + self.parent = parent_node + if is_simulation(): + self.sim_offset = 0.0215 + self.sensors = Sensors(parent_node, [30, 31, 32, 33, 34, 35]) + else: + self.sim_offset = 0.0 + self.sensors = Sensors(addrs=[30, 31, 32, 33, 34, 35]) + self.parent.declare_parameter("vlx_lat_x", 0.0) + self.parent.declare_parameter("vlx_lat_y", 0.0) + self.parent.declare_parameter("vlx_face_x", 0.0) + self.parent.declare_parameter("vlx_face_y", 0.0) + self.vlx_lat_x = self.parent.get_parameter("vlx_lat_x")._value + self.vlx_lat_y = self.parent.get_parameter("vlx_lat_y")._value + self.vlx_face_x = self.parent.get_parameter("vlx_face_x")._value + self.vlx_face_y = self.parent.get_parameter("vlx_face_y")._value + self.values_stamped_array = [ + VlxStamped( + self.sensors.get_distances(), + self.get_clock().now() + if not is_simulation() + else self.sensors.get_time_stamp(), + ) + ] + self.thread_continuous_sampling = None + self.continuous_sampling = 0 + self.near_walls_last_check_stamp = None + + def near_wall_routine(self, pose_stamped): + """Near wall routine: launch try_to_readjust_with_vlx every 0.5 seconds + considering the pose_stamped given in argument""" + actual_stamp = ( + self.get_clock().now() + if not is_simulation() + else self.sensors.get_time_stamp() + ) + if ( + abs( + float( + pose_stamped.header.stamp.sec + + pose_stamped.header.stamp.nanosec * 1e-9 + ) + - float( + self.near_walls_last_check_stamp.sec + + self.near_walls_last_check_stamp.nanosec * 1e-9 + ) + ) + < 0.5 + ): + self.try_to_readjust_with_vlx( + pose_stamped.pose.position.x, + pose_stamped.pose.position.y, + pose_stamped.pose.orientation, + pose_stamped.header.stamp, + ) + self.near_walls_last_check_stamp = ( + self.get_clock().now() + if not is_simulation() + else self.sensors.get_time_stamp() + ) + + def start_near_wall_routine(self, pose_stamped): + """Start the near wall routine by starting the thread_continuous_sampling + and launching a near_wall_routine immediately after""" + if self.near_walls_last_check_stamp == None: + self.near_walls_last_check_stamp = Time(sec=0, nanosec=0) + if self.thread_continuous_sampling != None: + self.stop_continuous_sampling_thread() + self.start_continuous_sampling_thread(0.0, 2) + self.near_wall_routine(pose_stamped) + else: + None + + def stop_near_wall_routine(self): + """ Stop the near wall routine by stopping the thread_continuous_sampling """ + if self.near_walls_last_check_stamp != None: + self.near_walls_last_check_stamp = None + self.stop_continuous_sampling_thread() + else: + None + + def start_continuous_sampling_thread(self, sleep_time, continuous_samp): + """Start the thread_continuous_sampling with an initial waiting time + and mode given by continuous_samp: 0->stopped, 1->nominal, 2->near_wall""" + if self.thread_continuous_sampling == None: + self.thread_continuous_sampling = threading.Thread( + target=self.continuous_vlx_sampling, + args=( + sleep_time, + continuous_samp, + ), + ) + self.thread_continuous_sampling.start() + else: + None + + def stop_continuous_sampling_thread(self): + """ Safely stop the thread_continuous_sampling """ + if self.thread_continuous_sampling != None: + self.continuous_sampling = 0 + self.thread_continuous_sampling.join() + self.thread_continuous_sampling = None + else: + None + + def continuous_vlx_sampling(self, sleep_time_before_sampling, continuous_samp): + """Thread function, wait sleep_time_before_sampling seconds before + continuous sampling until continuous_sampling==0""" + self.continuous_sampling = continuous_samp + time.sleep(sleep_time_before_sampling) + while self.continuous_sampling != 0: + actual_stamp = ( + self.get_clock().now() + if not is_simulation() + else self.sensors.get_time_stamp() + ) + vlx_values = self.sensors.get_distances() + self.values_stamped_array.insert(0, VlxStamped(vlx_values, actual_stamp)) + if len(self.values_stamped_array) > 10: + self.values_stamped_array.pop() + time.sleep(0.02) + + def try_to_readjust_with_vlx(self, x, y, q, stamp): + """Retrieve the nearest vlx values stamped from the given stamp then + send those values to compute_data function that returns the corrected + pose if not None then send this corrected pose to parent.create_and_send_tf""" + if self.continuous_sampling != 2: + send_vision_tf = True + else: + send_vision_tf = False + for i in range(len(self.values_stamped_array)): + if ( + abs( + float( + self.values_stamped_array[i].stamp.sec + + self.values_stamped_array[i].stamp.nanosec * 1e-9 + ) + - float(stamp.sec + stamp.nanosec * 1e-9) + ) + < 0.06 + ): + new_stamp = self.values_stamped_array[i].stamp + self.stop_continuous_sampling_thread() + + pose = Pose() + pose.position.x = x + pose.position.y = y + pose.orientation = q + news = self.compute_data(pose, self.values_stamped_array[i].values) + if news != None: + send_vision_tf = False + self.parent.create_and_send_tf(news[0], news[1], news[2], new_stamp) + break + if send_vision_tf: + self.parent.create_and_send_tf(x, y, q, stamp) + if self.continuous_sampling == 1: + self.stop_continuous_sampling_thread() + if self.near_walls_last_check_stamp != None: + self.start_continuous_sampling_thread(0.0, 2) + + def compute_data(self, pose_considered, vlx_values): + """Fetching data for calculations detailed in README, computing new_pose + from these data, estimate if this pose is valid before returning it""" + data = self.fetch_data(pose_considered, vlx_values) + + if data == None: + return None + else: + x, y, theta = self.get_pose_from_vlx( + data["d"][0], data["d"][1], data["d"][2], data["vlx_0x30"] + ) + new_x, new_y, new_theta = ( + data["pose_est"][0](x, y), + data["pose_est"][1](x, y), + data["pose_est"][2](theta), + ) + d1_est, d2_est, d3_est = self.get_vlx_from_pose( + [data["wall_relative"][0], data["wall_relative"][1]], + data["wall_relative"][2], + data["vlx_0x30"], + ) + d1_proj_est, d2_proj_est, d3_proj_est = self.est_proj_wall( + d1_est, + d2_est, + d3_est, + data["wall_relative"][2], + [data["wall_relative"][0], data["wall_relative"][1]], + data["case"], + data["inv_angle"], + data["inv_lat"], + ) + + if ( + d1_est > 0 + and d2_est > 0 + and d3_est > 0 + and abs(new_x - pose_considered.position.x) < 0.05 + and abs(new_y - pose_considered.position.y) < 0.05 + and abs(new_theta - quaternion_to_euler(pose_considered.orientation)[2]) + < 0.1 + ): + return new_x, new_y, euler_to_quaternion(new_theta) + else: + return None + + def fetch_data(self, pose_considered, values): + """Returning data as described in the table at the end of README + + all other data that are needed for the pose calculations""" + angle = quaternion_to_euler(pose_considered.orientation)[2] + + if in_rectangle(bottom_blue_area, self.parent.robot_pose): + x_wall, y_wall = ( + pose_considered.position.x * 1000, + (pose_considered.position.y - self.sim_offset) * 1000, + ) + if angle > np.pi / 4 and angle < 3 * np.pi / 4: + d1, d2, d3 = values[34], values[35], values[33] + robot_pose_wall_relative = [x_wall, y_wall] + rectif_angle = np.pi / 2 - angle + x_est = lambda x, y: x / 1000 + y_est = lambda x, y: y / 1000 - self.sim_offset + theta_est = lambda t: t + np.pi / 2 + case = 1 + elif angle < -np.pi / 4 and angle > -3 * np.pi / 4: + d1, d2, d3 = values[31], values[32], values[30] + robot_pose_wall_relative = [x_wall, y_wall] + rectif_angle = -np.pi / 2 - angle + x_est = lambda x, y: x / 1000 + y_est = lambda x, y: y / 1000 - self.sim_offset + theta_est = lambda t: t - np.pi / 2 + case = 2 + elif angle > -np.pi / 4 and angle < np.pi / 4: + d1, d2, d3 = values[34], values[35], values[30] + robot_pose_wall_relative = [y_wall, x_wall] + rectif_angle = 0 - angle + x_est = lambda x, y: y / 1000 + y_est = lambda x, y: x / 1000 - self.sim_offset + theta_est = lambda t: t + case = 3 + else: + d1, d2, d3 = values[31], values[32], values[33] + robot_pose_wall_relative = [y_wall, x_wall] + rectif_angle = np.pi - angle + x_est = lambda x, y: y / 1000 + y_est = lambda x, y: x / 1000 - self.sim_offset + theta_est = lambda t: (t - np.pi) if t > 0 else (t + np.pi) + case = 4 + + inv_angle_condition = True + inv_lat_condition = True if case in [1, 2] else False + + elif in_rectangle(top_blue_area, self.parent.robot_pose): + x_wall, y_wall = ( + pose_considered.position.x * 1000, + 2000 - pose_considered.position.y * 1000, + ) + if angle > np.pi / 4 and angle < 3 * np.pi / 4: + d1, d2, d3 = values[31], values[32], values[33] + robot_pose_wall_relative = [x_wall, y_wall] + rectif_angle = np.pi / 2 - angle + x_est = lambda x, y: x / 1000 + y_est = lambda x, y: (2000 - y) / 1000 + theta_est = lambda t: t + np.pi / 2 + case = 1 + elif angle < -np.pi / 4 and angle > -3 * np.pi / 4: + d1, d2, d3 = values[34], values[35], values[30] + robot_pose_wall_relative = [x_wall, y_wall] + rectif_angle = -np.pi / 2 - angle + x_est = lambda x, y: x / 1000 + y_est = lambda x, y: (2000 - y) / 1000 + theta_est = lambda t: t - np.pi / 2 + case = 2 + elif angle > -np.pi / 4 and angle < np.pi / 4: + d1, d2, d3 = values[34], values[35], values[33] + robot_pose_wall_relative = [y_wall, x_wall] + rectif_angle = 0 - angle + x_est = lambda x, y: y / 1000 + y_est = lambda x, y: (2000 - x) / 1000 + theta_est = lambda t: t + case = 3 + else: + d1, d2, d3 = values[31], values[32], values[30] + robot_pose_wall_relative = [y_wall, x_wall] + rectif_angle = np.pi - angle + x_est = lambda x, y: y / 1000 + y_est = lambda x, y: (2000 - x) / 1000 + theta_est = lambda t: (t - np.pi) if t > 0 else (t + np.pi) + case = 4 + + inv_angle_condition = False + inv_lat_condition = False + + elif in_rectangle(bottom_yellow_area, self.parent.robot_pose): + x_wall, y_wall = ( + 3000 - (pose_considered.position.x - self.sim_offset) * 1000, + (pose_considered.position.y - self.sim_offset) * 1000, + ) + if angle > np.pi / 4 and angle < 3 * np.pi / 4: + d1, d2, d3 = values[34], values[35], values[30] + robot_pose_wall_relative = [x_wall, y_wall] + rectif_angle = np.pi / 2 - angle + x_est = lambda x, y: (3000 - x) / 1000 + self.sim_offset + y_est = lambda x, y: y / 1000 - self.sim_offset + theta_est = lambda t: t + np.pi / 2 + case = 1 + elif angle < -np.pi / 4 and angle > -3 * np.pi / 4: + d1, d2, d3 = values[31], values[32], values[33] + robot_pose_wall_relative = [x_wall, y_wall] + rectif_angle = -np.pi / 2 - angle + x_est = lambda x, y: (3000 - x) / 1000 + self.sim_offset + y_est = lambda x, y: y / 1000 - self.sim_offset + theta_est = lambda t: t - np.pi / 2 + case = 2 + elif angle > -np.pi / 4 and angle < np.pi / 4: + d1, d2, d3 = values[31], values[32], values[30] + robot_pose_wall_relative = [y_wall, x_wall] + rectif_angle = 0 - angle + x_est = lambda x, y: (3000 - y) / 1000 + self.sim_offset + y_est = lambda x, y: x / 1000 - self.sim_offset + theta_est = lambda t: t + case = 3 + else: + d1, d2, d3 = values[34], values[35], values[33] + robot_pose_wall_relative = [y_wall, x_wall] + rectif_angle = np.pi - angle + x_est = lambda x, y: (3000 - y) / 1000 + self.sim_offset + y_est = lambda x, y: x / 1000 - self.sim_offset + theta_est = lambda t: (t - np.pi) if t > 0 else (t + np.pi) + case = 4 + + inv_angle_condition = False + inv_lat_condition = True + + elif in_rectangle(top_yellow_area, self.parent.robot_pose): + x_wall, y_wall = ( + 3000 - (pose_considered.position.x - self.sim_offset) * 1000, + 2000 - pose_considered.position.y * 1000, + ) + if angle > np.pi / 4 and angle < 3 * np.pi / 4: + d1, d2, d3 = values[31], values[32], values[30] + robot_pose_wall_relative = [x_wall, y_wall] + rectif_angle = np.pi / 2 - angle + x_est = lambda x, y: (3000 - x) / 1000 + self.sim_offset + y_est = lambda x, y: (2000 - y) / 1000 + theta_est = lambda t: t + np.pi / 2 + case = 1 + elif angle < -np.pi / 4 and angle > -3 * np.pi / 4: + d1, d2, d3 = values[34], values[35], values[33] + robot_pose_wall_relative = [x_wall, y_wall] + rectif_angle = -np.pi / 2 - angle + x_est = lambda x, y: (3000 - x) / 1000 + self.sim_offset + y_est = lambda x, y: (2000 - y) / 1000 + theta_est = lambda t: t - np.pi / 2 + case = 2 + elif angle > -np.pi / 4 and angle < np.pi / 4: + d1, d2, d3 = values[31], values[32], values[33] + robot_pose_wall_relative = [y_wall, x_wall] + rectif_angle = 0 - angle + x_est = lambda x, y: (3000 - y) / 1000 + self.sim_offset + y_est = lambda x, y: (2000 - x) / 1000 + theta_est = lambda t: t + case = 3 + else: + d1, d2, d3 = values[34], values[35], values[30] + robot_pose_wall_relative = [y_wall, x_wall] + rectif_angle = np.pi - angle + x_est = lambda x, y: (3000 - y) / 1000 + self.sim_offset + y_est = lambda x, y: (2000 - x) / 1000 + theta_est = lambda t: (t - np.pi) if t > 0 else (t + np.pi) + case = 4 + + inv_angle_condition = True + inv_lat_condition = True if case in [3, 4] else False + + else: + return None + + return { + "d": [d1, d2, d3], + "wall_relative": [ + robot_pose_wall_relative[0], + robot_pose_wall_relative[1], + rectif_angle, + ], + "pose_est": [x_est, y_est, theta_est], + "case": case, + "vlx_0x30": True if d3 == values[30] else False, + "inv_angle": inv_angle_condition, + "inv_lat": inv_lat_condition, + } + + def get_pose_from_vlx(self, d1, d2, d3, vlx_0x30): + """ Computing a pose relative to the wall considering vlx measured distances """ + theta = np.arctan((d2 - d1) / (2 * self.vlx_face_y)) + if vlx_0x30: + x = (d3 + self.vlx_lat_y) * np.cos(theta) - self.vlx_lat_x * np.sin(theta) + else: + x = (d3 + self.vlx_lat_y) * np.cos(theta) + self.vlx_lat_x * np.sin(theta) + y = ((d1 + d2) / 2 + self.vlx_face_x) * np.cos(theta) + return (x, y, theta) + + def get_vlx_from_pose(self, robot_pose_wall_relative, theta, vlx_0x30): + """ Computing an estimation of vlx distances from a given pose and orientation """ + d1 = ( + (robot_pose_wall_relative[1]) / np.cos(theta) + + self.vlx_face_y * np.tan(theta) + - self.vlx_face_x + ) + d2 = ( + (robot_pose_wall_relative[1]) / np.cos(theta) + - self.vlx_face_y * np.tan(theta) + - self.vlx_face_x + ) + if vlx_0x30: + d3 = ( + robot_pose_wall_relative[0] / np.cos(theta) + - self.vlx_lat_x * np.tan(theta) + - self.vlx_lat_y + ) + else: + d3 = ( + robot_pose_wall_relative[0] / np.cos(theta) + + self.vlx_lat_x * np.tan(theta) + - self.vlx_lat_y + ) + return (d1, d2, d3) + + def est_proj_wall( + self, + d1_est, + d2_est, + d3_est, + rectif_angle, + robot_pose_wall_relative, + case, + inv_angle=False, + inv_lat=False, + ): + """Computing an estimation of the projection of vlx wall hit from a given + position relative to the wall and an estimation of the vlx distance""" + if case in [1, 2]: + considered_angle = rectif_angle + considered_vlx_face_y = self.vlx_face_y + else: + considered_angle = -rectif_angle + considered_vlx_face_y = -self.vlx_face_y + + if case in [2, 3]: + considered_vlx_lat_x = -self.vlx_lat_x + else: + considered_vlx_lat_x = self.vlx_lat_x + if inv_angle: + considered_angle = -considered_angle + if inv_lat: + considered_vlx_lat_x = -considered_vlx_lat_x + + d1_est_proj_wall = ( + robot_pose_wall_relative[0] + + (d1_est + self.vlx_face_x) * np.sin(considered_angle) + + considered_vlx_face_y * np.cos(considered_angle) + ) + d2_est_proj_wall = ( + robot_pose_wall_relative[0] + + (d2_est + self.vlx_face_x) * np.sin(considered_angle) + - considered_vlx_face_y * np.cos(considered_angle) + ) + d3_est_proj_wall = ( + robot_pose_wall_relative[1] + - (d3_est + self.vlx_lat_y) * np.sin(considered_angle) + - considered_vlx_lat_x * np.cos(considered_angle) + ) + return (d1_est_proj_wall, d2_est_proj_wall, d3_est_proj_wall) + + +class VlxStamped: + """ VlxStamped class, an instance contains stamp + vlx values """ + + def __init__(self, values, stamp): + """ Init VlxStamped """ + self.values = copy.deepcopy(values) + self.stamp = stamp diff --git a/src/modules/localisation/package.xml b/src/modules/localisation/package.xml index fa32760c..2c4062da 100644 --- a/src/modules/localisation/package.xml +++ b/src/modules/localisation/package.xml @@ -4,8 +4,8 @@ localisation 0.8.3 Robot localisation module - Ewen BRUN - ECAM Makers :: CDFR 2020 + Philéas LAMBERT + ECAM Makers :: CDFR 2021 rclpy std_msgs diff --git a/src/modules/localisation/schematic_pictures/vlx_math_schematic.png b/src/modules/localisation/schematic_pictures/vlx_math_schematic.png new file mode 100755 index 00000000..d2639e1e Binary files /dev/null and b/src/modules/localisation/schematic_pictures/vlx_math_schematic.png differ diff --git a/src/modules/localisation/schematic_pictures/vlx_proj.png b/src/modules/localisation/schematic_pictures/vlx_proj.png new file mode 100755 index 00000000..d9fe76eb Binary files /dev/null and b/src/modules/localisation/schematic_pictures/vlx_proj.png differ diff --git a/src/modules/localisation/schematic_pictures/vlx_x_y_determination.png b/src/modules/localisation/schematic_pictures/vlx_x_y_determination.png new file mode 100755 index 00000000..8482d270 Binary files /dev/null and b/src/modules/localisation/schematic_pictures/vlx_x_y_determination.png differ diff --git a/src/modules/localisation/setup.py b/src/modules/localisation/setup.py index 010cb8f2..48c97aeb 100644 --- a/src/modules/localisation/setup.py +++ b/src/modules/localisation/setup.py @@ -13,7 +13,7 @@ setup( name=package_name, - version="0.8.3", + version="0.8.4", packages=find_packages(), data_files=[ (path.join("share", package_name), ["package.xml"]), @@ -23,11 +23,11 @@ install_requires=["setuptools"], author="Ewen BRUN", author_email="ewen.brun@ecam.fr", - maintainer="Ewen BRUN", - maintainer_email="ewen.brun@ecam.fr", - keywords=["ROS2", "", "CDFR"], - description="Code node cetautomatix.", - license="ECAM Makers :: CDFR 2020", + maintainer="Philéas LAMBERT", + maintainer_email="phileas.lambert@ecam.fr", + keywords=["ROS2", "localisation", "CDFR"], + description="Code node localisation.", + license="ECAM Makers :: CDFR 2021", entry_points={ "console_scripts": [ "localisation = localisation.localisation_node:main", diff --git a/src/navigation/teb_obstacles/teb_obstacles/teb_obstacles.py b/src/navigation/teb_obstacles/teb_obstacles/teb_obstacles.py index c617c544..2f73410e 100644 --- a/src/navigation/teb_obstacles/teb_obstacles/teb_obstacles.py +++ b/src/navigation/teb_obstacles/teb_obstacles/teb_obstacles.py @@ -11,9 +11,8 @@ from costmap_converter_msgs.msg import ObstacleArrayMsg, ObstacleMsg from visualization_msgs.msg import MarkerArray, Marker from nav_msgs.msg import Odometry -from geometry_msgs.msg import Point32 +from geometry_msgs.msg import Point32, PoseStamped from platform import machine -from transformix_msgs.srv import TransformixParametersTransformStamped class Teb_obstacles(Node): @@ -25,14 +24,15 @@ def __init__(self): "obelix" if self.get_namespace().strip("/") == "asterix" else "asterix" ) - self.get_ally_odom_transformation() - self.allies_subscription_ = self.create_subscription( - Odometry, f"/{self.ally}/odom", self.allies_subscription_callback, 10 + PoseStamped, + f"/{self.ally}/odom_map_relative", + self.allies_subscription_callback, + 10, ) self.enemies_subscription_ = self.create_subscription( MarkerArray, - "/enemies_positions_markers", + "/ennemies_positions_markers", self.enemies_subscription_callback, 10, ) @@ -51,29 +51,6 @@ def __init__(self): self.get_logger().info("teb_dynamic_obstacles node is ready") - def get_ally_odom_transformation(self): - if self.simulation: - return - - get_tf_client = self.create_client( - TransformixParametersTransformStamped, f"/{self.ally}/get_odom_map_tf" - ) - - if not get_tf_client.wait_for_service(timeout_sec=15.0): - self.get_logger().info( - f"No service /{self.ally}/get_odom_map_tf availible, is there ony one robot?" - ) - return - get_tf_request = TransformixParametersTransformStamped.Request() - future = get_tf_client.call_async(get_tf_request) - rclpy.spin_until_future_complete(self, future) - try: - response = future.result() - except Exception as e: - self.get_logger().info("Service call failed %r" % (e,)) - else: - self.odom_map_tf = response.transform_stamped - def initObstaclesArray(self): """ObstacleArray index 0: ally, index 1-2: enemies""" self.obstacles = ObstacleArrayMsg() @@ -130,9 +107,8 @@ def allies_subscription_callback(self, msg): ) > 0.3 ): - pose = msg.pose.pose - x = pose.position.x + self.odom_map_tf.transform.translation.x - y = pose.position.y + self.odom_map_tf.transform.translation.y + x = msg.pose.position.x + y = msg.pose.position.y tmp_marker = Marker() tmp_marker.pose.position.x = x tmp_marker.pose.position.y = y diff --git a/src/obelix/param/obelix.in.yml b/src/obelix/param/obelix.in.yml index 7798b9d1..0ffbe37a 100644 --- a/src/obelix/param/obelix.in.yml +++ b/src/obelix/param/obelix.in.yml @@ -1,5 +1,6 @@ !Defaults footprint: "[[-0.090, -0.175], [-0.090, 0.175], [0.090, 0.175], [0.090, -0.175]]" +use_sim_time: False max_steps_frequency: 10000 speedramp_resolution: 128 microsteps: 16 @@ -7,17 +8,20 @@ steps_per_turn: 400 wheels_separation: 0.282 wheels_radius: 0.061 min_turning_radius: 0.0 -max_vel_x: 0.4 -max_vel_x_backwards: 0.4 -max_vel_theta: 1.0 +max_vel_x_nominal: 2.0 +max_vel_x_accurate: 0.15 +max_vel_theta: 3.1 +max_vel_theta_accurate: 0.5 acc_lim_x: 0.5 -acc_lim_theta: 0.5 +acc_lim_theta: 2.5 robotbase_length: 0.18 robotbase_width: 0.315 --- obelix: drive_node: ros__parameters: + use_sim_time: !Var use_sim_time + wheels: separation: !Var wheels_separation radius: !Var wheels_radius @@ -45,28 +49,53 @@ obelix: strategy_mode: NORMAL + localisation_node: + ros__parameters: + vlx_lat_x: 80.0 + vlx_lat_y: 152.5 + vlx_face_x: 70.0 + vlx_face_y: 130.0 + + lcd_driver: ros__parameters: + use_sim_time: !Var use_sim_time + i2c_bus: 6 i2c_addr: 0x27 banner: "Obelix v0.8.3" + tree: + ros__parameters: + use_sim_time: !Var use_sim_time + controller_server: ros__parameters: - DynamicFollowPath: + NominalController: footprint_model.vertices: !Var footprint # Robot min_turning_radius: !Var min_turning_radius - max_vel_x: !Var max_vel_x - max_vel_x_backwards: !Var max_vel_x_backwards + max_vel_x: !Var max_vel_x_nominal + max_vel_x_backwards: !Var max_vel_x_nominal max_vel_theta: !Var max_vel_theta acc_lim_x: !Var acc_lim_x acc_lim_theta: !Var acc_lim_theta + AccurateController: + footprint_model.vertices: !Var footprint + + # Robot + min_turning_radius: !Var min_turning_radius + max_vel_x: !Var max_vel_x_accurate + max_vel_x_backwards: !Var max_vel_x_accurate + max_vel_theta: !Var max_vel_theta_accurate + acc_lim_x: !Var acc_lim_x + acc_lim_theta: !Var acc_lim_theta + local_costmap: local_costmap: diff --git a/tools/simulation/protos/Asterix.proto b/tools/simulation/protos/Asterix.proto index 9edf73b8..747a3b82 100644 --- a/tools/simulation/protos/Asterix.proto +++ b/tools/simulation/protos/Asterix.proto @@ -14,6 +14,7 @@ PROTO Asterix [ field SFBool supervisor TRUE # Is `Robot.supervisor`. field SFBool synchronization FALSE # Is `Robot.synchronization`. field MFNode arucoTag [ Aruco { } ] # Is `Robot.arucoTag`. + field MFNode vlx [ VlxManager { name "asterix_vlx_manager" } ] # Is `Robot.vlx`. ] { Robot { @@ -138,35 +139,9 @@ PROTO Asterix [ translation 0 0 0.17 children IS arucoTag } - VL53L1X { - name "vlx_0x30" - translation 0.08 -0.13 -0.14 - rotation 0 0 1 -1.5708 - } - VL53L1X { - name "vlx_0x31" - translation 0.1 -0.11 -0.14 - rotation 0 0 1 0 - } - VL53L1X { - name "vlx_0x32" - translation 0.1 0.11 -0.14 - rotation 0 0 1 0 - } - VL53L1X { - name "vlx_0x33" - translation 0.08 0.13 -0.14 - rotation 0 0 1 1.5708 - } - VL53L1X { - name "vlx_0x34" - translation -0.1 0.11 -0.14 - rotation 0 0 1 -3.14159 - } - VL53L1X { - name "vlx_0x35" - translation -0.1 -0.11 -0.14 - rotation 0 0 1 -3.14159 + Transform { + translation 0 0 0.0 + children IS vlx } ] name "robotbase" diff --git a/tools/simulation/protos/VlxManager.proto b/tools/simulation/protos/VlxManager.proto new file mode 100644 index 00000000..e8ca897e --- /dev/null +++ b/tools/simulation/protos/VlxManager.proto @@ -0,0 +1,56 @@ +#VRML_SIM R2020b utf8 +PROTO VlxManager [ + field SFVec3f vlx0_tr 0.08 -0.1305 -0.14 + field SFRotation vlx0_rot 0 0 1 -1.5708 + field SFVec3f vlx1_tr 0.1005 -0.11 -0.14 + field SFRotation vlx1_rot 0 0 1 0 + field SFVec3f vlx2_tr 0.1005 0.11 -0.14 + field SFRotation vlx2_rot 0 0 1 0 + field SFVec3f vlx3_tr 0.08 0.1305 -0.14 + field SFRotation vlx3_rot 0 0 1 1.5708 + field SFVec3f vlx4_tr -0.1005 0.11 -0.14 + field SFRotation vlx4_rot 0 0 1 -3.14159 + field SFVec3f vlx5_tr -0.1005 -0.11 -0.14 + field SFRotation vlx5_rot 0 0 1 -3.14159 + field SFString name "vlx_manager" +] +{ + Robot { + name IS name + controller "" + supervisor TRUE + synchronization FALSE + children [ + VL53L1X { + name "vlx_0x30" + translation IS vlx0_tr + rotation IS vlx0_rot + } + VL53L1X { + name "vlx_0x31" + translation IS vlx1_tr + rotation IS vlx1_rot + } + VL53L1X { + name "vlx_0x32" + translation IS vlx2_tr + rotation IS vlx2_rot + } + VL53L1X { + name "vlx_0x33" + translation IS vlx3_tr + rotation IS vlx3_rot + } + VL53L1X { + name "vlx_0x34" + translation IS vlx4_tr + rotation IS vlx4_rot + } + VL53L1X { + name "vlx_0x35" + translation IS vlx5_tr + rotation IS vlx5_rot + } + ] + } +}