From 3d00899aed717a8e92b792a3895ef3c06700def7 Mon Sep 17 00:00:00 2001 From: GPrathap Date: Wed, 31 Jan 2024 09:42:35 +0000 Subject: [PATCH 1/5] adding robot control task --- uol_turtlebot_control/package.xml | 29 +++ .../resource/uol_turtlebot_control | 0 uol_turtlebot_control/setup.cfg | 4 + uol_turtlebot_control/setup.py | 26 ++ .../uol_turtlebot_control/__init__.py | 0 .../robot_control_strategy.py | 226 ++++++++++++++++++ 6 files changed, 285 insertions(+) create mode 100644 uol_turtlebot_control/package.xml create mode 100644 uol_turtlebot_control/resource/uol_turtlebot_control create mode 100644 uol_turtlebot_control/setup.cfg create mode 100644 uol_turtlebot_control/setup.py create mode 100644 uol_turtlebot_control/uol_turtlebot_control/__init__.py create mode 100644 uol_turtlebot_control/uol_turtlebot_control/robot_control_strategy.py diff --git a/uol_turtlebot_control/package.xml b/uol_turtlebot_control/package.xml new file mode 100644 index 0000000..7de2db3 --- /dev/null +++ b/uol_turtlebot_control/package.xml @@ -0,0 +1,29 @@ + + + + uol_turtlebot_control + 0.0.0 + Basic Diff drive robot control + op + TODO: License declaration + + geometry_msgs + rclcpp + sensor_msgs + + geometry_msgs + rclcpp + sensor_msgs + + ament_copyright + ament_flake8 + ament_pep257 + python3-pytest + + rclpy + std_msgs + + + ament_python + + diff --git a/uol_turtlebot_control/resource/uol_turtlebot_control b/uol_turtlebot_control/resource/uol_turtlebot_control new file mode 100644 index 0000000..e69de29 diff --git a/uol_turtlebot_control/setup.cfg b/uol_turtlebot_control/setup.cfg new file mode 100644 index 0000000..bcb64a6 --- /dev/null +++ b/uol_turtlebot_control/setup.cfg @@ -0,0 +1,4 @@ +[develop] +script_dir=$base/lib/uol_turtlebot_control +[install] +install_scripts=$base/lib/uol_turtlebot_control diff --git a/uol_turtlebot_control/setup.py b/uol_turtlebot_control/setup.py new file mode 100644 index 0000000..c121c3f --- /dev/null +++ b/uol_turtlebot_control/setup.py @@ -0,0 +1,26 @@ +from setuptools import setup + +package_name = 'uol_turtlebot_control' + +setup( + name=package_name, + version='0.0.0', + packages=[package_name], + data_files=[ + ('share/ament_index/resource_index/packages', + ['resource/' + package_name]), + ('share/' + package_name, ['package.xml']), + ], + install_requires=['setuptools'], + zip_safe=True, + maintainer='Geesara', + maintainer_email='ggeesara@gmail.com', + description='Basic Diff drive robot control', + license='TODO: License declaration', + tests_require=['pytest'], + entry_points={ + 'console_scripts': [ + 'control_strategy = uol_turtlebot_control.robot_control_strategy:main', + ], + }, +) diff --git a/uol_turtlebot_control/uol_turtlebot_control/__init__.py b/uol_turtlebot_control/uol_turtlebot_control/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/uol_turtlebot_control/uol_turtlebot_control/robot_control_strategy.py b/uol_turtlebot_control/uol_turtlebot_control/robot_control_strategy.py new file mode 100644 index 0000000..162a559 --- /dev/null +++ b/uol_turtlebot_control/uol_turtlebot_control/robot_control_strategy.py @@ -0,0 +1,226 @@ +import rclpy +from rclpy.node import Node +from std_msgs.msg import String +from sensor_msgs.msg import LaserScan +from geometry_msgs.msg import Twist +from nav_msgs.msg import Odometry +import time +import cv2 +import numpy as np +from sensor_msgs.msg import Image, CompressedImage, CameraInfo +import std_msgs.msg as std_msg +import rclpy.qos as qos + + +class ControlStrategy(Node): + def __init__(self, delta_t, ): + super().__init__('control_strategy') + self.control_pub = self.create_publisher(Twist, '/cmd_vel', 30) + self.control_sub = self.create_subscription(Twist, '/cmd_vel', self.listener_callback, 10) + self.odom_sub = self.create_subscription(Odometry, "/odom", self.set_pose, 20) + + self.i = 0 + self.set_robot_init_pose = None + self.robot_current_pose = None + self.robot_current_control = [] + self.r = 0.3 # Wheel radius + self.L = 1.25 # Axle length + self.D = 0.07 # Distance between the front front wheel and rear axle + self.Ts = delta_t # Sampling time + self.t = np.arange(0, 10, self.Ts) # Simulation time + self.end_controller = False + self.timer = self.create_timer(self.Ts, self.timer_callback) + + def euler_from_quaternion(self, quaternion): + """ + Converts quaternion (w in last place) to euler roll, pitch, yaw + quaternion = [x, y, z, w] + """ + x = quaternion.x + y = quaternion.y + z = quaternion.z + w = quaternion.w + + sinr_cosp = 2 * (w * x + y * z) + cosr_cosp = 1 - 2 * (x * x + y * y) + roll = np.arctan2(sinr_cosp, cosr_cosp) + + sinp = 2 * (w * y - z * x) + pitch = np.arcsin(sinp) + + siny_cosp = 2 * (w * z + x * y) + cosy_cosp = 1 - 2 * (y * y + z * z) + yaw = np.arctan2(siny_cosp, cosy_cosp) + + return roll, pitch, yaw + + def listener_callback(self, msg): + self.robot_current_control = [msg.linear.x, msg.angular.z] + + def stop_vehicle(self, ): + self.send_vel(0.0, 0.0) + + def wrap_to_pi(self, x): + x = np.array([x]) + xwrap = np.remainder(x, 2*np.pi) + mask = np.abs(xwrap)>np.pi + xwrap[mask] -= 2*np.pi * np.sign(xwrap[mask]) + return xwrap[0] + + def diff_drive_init(self, left_wheel_v, right_wheel_v, duration=5): + self.duration = duration + self.wL = left_wheel_v # Left wheel velocity + self.wR = right_wheel_v # Right wheel velocity + self.time_utilized = 0.0 + + def inter_point_diff_drive_init(self, duration=10, r_distance=1.3 + , refPose=np.array([3,6,0]), k_p=0.5, k_w=0.7, dmin=0.7): + + self.duration = duration + self.r_distance = r_distance + self.refPose = refPose + self.k_p = k_p + self.k_w = k_w + self.dmin = dmin + self.time_utilized = 0.0 + self.xT = self.refPose[0] - self.r_distance*np.cos(self.refPose[2]) + self.yT = self.refPose[1] - self.r_distance*np.sin(self.refPose[2]) + self.state = 0 + + def inter_direction_diff_drive_init(self, duration=10, r_distance=1.3 + , refPose=np.array([3,6,0]), k_p=0.5, k_w=0.7, dmin=0.7): + self.duration = duration + self.r_distance = r_distance + self.refPose = refPose + self.k_p = k_p + self.k_w = k_w + self.dmin = dmin + self.time_utilized = 0.0 + + def inter_direction_diff_drive(self, ): + if(self.robot_current_pose is not None): + if(self.duration < self.time_utilized): + self.get_logger().info(f'End of simulation', once=True) + self.stop_vehicle() + self.end_controller = True + return + + self.D = np.sqrt((self.robot_current_pose[0]-self.refPose[0])**2 + + (self.robot_current_pose[1]-self.refPose[1])**2) + + if(self.D < self.dmin): + self.get_logger().info(f'Reach to the goal pose', once=True) + self.stop_vehicle() + self.end_controller = True + return + + beta = np.arctan(self.r_distance/self.D) + phiR = np.arctan2(self.refPose[1]-self.robot_current_pose[1], self.refPose[0]-self.robot_current_pose[0]) + alpha = self.wrap_to_pi(phiR-self.refPose[2]) + + if(alpha <0): + beta = -beta + ##Controller + if(np.abs(alpha) < np.abs(beta)): + ePhi = self.wrap_to_pi(phiR - self.robot_current_pose[2] + alpha) + else: + ePhi = self.wrap_to_pi(phiR - self.robot_current_pose[2] + beta) + + v = self.k_p*self.D + w = self.k_w*ePhi + print("Distance to the goal: ", self.D) + dq = np.array([v*np.cos(self.robot_current_pose[2]+self.Ts*w/2) + , v*np.sin(self.robot_current_pose[2]+self.Ts*w/2), w]) + self.robot_current_pose = self.robot_current_pose + self.Ts*dq # Integration + self.robot_current_pose[2] = self.wrap_to_pi(self.robot_current_pose[2]) # Map orientation angle to [-pi, pi] + self.send_vel(v, w) + self.time_utilized = self.time_utilized + self.Ts + + def inter_point_diff_drive(self, ): + if(self.robot_current_pose is not None): + if(self.duration < self.time_utilized): + self.stop_vehicle() + self.get_logger().info(f'End of simulation', once=True) + self.end_controller = True + return + + self.D = np.sqrt((self.robot_current_pose[0]-self.refPose[0])**2 + + (self.robot_current_pose[1]-self.refPose[1])**2) + + if(self.D < self.dmin): + self.stop_vehicle() + self.get_logger().info(f'Reach to the goal pose', once=True) + self.end_controller = True + return + + if self.state == 0: + d = np.sqrt((self.yT-self.robot_current_pose[1])**2 + + (self.xT-self.robot_current_pose[0])**2) + if(d < self.dmin): + self.state = 1 + self.phiT = np.arctan2(self.yT-self.robot_current_pose[1], self.xT-self.robot_current_pose[0]) + self.ePhi = self.phiT - self.robot_current_pose[2] + else: + self.ePhi = self.refPose[2] - self.robot_current_pose[2] + + v = self.k_p*self.D + w = self.k_w*self.ePhi + print("Distance to the goal: ", self.D) + dq = np.array([v*np.cos(self.robot_current_pose[2]+self.Ts*w/2), v*np.sin(self.robot_current_pose[2]+self.Ts*w/2), w]) + self.robot_current_pose = self.robot_current_pose + self.Ts*dq # Integration + self.robot_current_pose[2] = self.wrap_to_pi(self.robot_current_pose[2]) # Map orientation angle to [-pi, pi] + self.send_vel(v, w) + self.time_utilized = self.time_utilized + self.Ts + + def perform_action_diff_drive_one_step(self): + if(self.robot_current_pose is not None): + if(self.duration < self.time_utilized): + self.stop_vehicle() + self.get_logger().info(f'End of simulation', once=True) + self.end_controller = True + return + v = self.r/2*(self.wR+self.wL) # Robot velocity + w = self.r/self.L*(self.wR-self.wL) # Robot angular velocity + dq = np.array([v*np.cos(self.robot_current_pose[2]+self.Ts*w/2), v*np.sin(self.robot_current_pose[2]+self.Ts*w/2), w]) + self.robot_current_pose = self.robot_current_pose + self.Ts*dq # Integration + self.robot_current_pose[2] = self.wrap_to_pi(self.robot_current_pose[2]) # Map orientation angle to [-pi, pi] + self.send_vel(v, w) + self.time_utilized = self.time_utilized + self.Ts + + def set_pose(self, msg): + _, _, yaw = self.euler_from_quaternion(msg.pose.pose.orientation) + if(self.set_robot_init_pose is None): + self.set_robot_init_pose = np.array([msg.pose.pose.position.x, msg.pose.pose.position.y, yaw]) + self.robot_current_pose = self.set_robot_init_pose + + def send_vel(self, v, w): + msg = Twist() + msg.linear.x = v + msg.angular.z = w + self.control_pub.publish(msg) + + def timer_callback(self, ): + self.perform_action_diff_drive_one_step() + # self.inter_direction_diff_drive() + # self.inter_point_diff_drive() + return + +def main(args=None): + rclpy.init(args=args) + control_strategy = ControlStrategy(delta_t=0.03) + control_strategy.diff_drive_init(0.2, 0.2, duration=20) + # control_strategy.inter_direction_diff_drive_init() + # control_strategy.inter_point_diff_drive_init() + + while rclpy.ok(): + try: + rclpy.spin_once(control_strategy) + except KeyboardInterrupt: + break + control_strategy.destroy_node() + rclpy.shutdown() + +if __name__ == '__main__': + main() + +# To reset ros2 service call /reset_simulation std_srvs/srv/Empty From 3ed993479f380d4d1e4000deeed997f481b00767 Mon Sep 17 00:00:00 2001 From: GPrathap Date: Sat, 3 Feb 2024 11:52:27 +0000 Subject: [PATCH 2/5] changing package name --- uol_turtlebot_control/package.xml | 29 --- .../resource/uol_turtlebot_control | 0 uol_turtlebot_control/setup.cfg | 4 - uol_turtlebot_control/setup.py | 26 -- .../uol_turtlebot_control/__init__.py | 0 .../robot_control_strategy.py | 226 ------------------ 6 files changed, 285 deletions(-) delete mode 100644 uol_turtlebot_control/package.xml delete mode 100644 uol_turtlebot_control/resource/uol_turtlebot_control delete mode 100644 uol_turtlebot_control/setup.cfg delete mode 100644 uol_turtlebot_control/setup.py delete mode 100644 uol_turtlebot_control/uol_turtlebot_control/__init__.py delete mode 100644 uol_turtlebot_control/uol_turtlebot_control/robot_control_strategy.py diff --git a/uol_turtlebot_control/package.xml b/uol_turtlebot_control/package.xml deleted file mode 100644 index 7de2db3..0000000 --- a/uol_turtlebot_control/package.xml +++ /dev/null @@ -1,29 +0,0 @@ - - - - uol_turtlebot_control - 0.0.0 - Basic Diff drive robot control - op - TODO: License declaration - - geometry_msgs - rclcpp - sensor_msgs - - geometry_msgs - rclcpp - sensor_msgs - - ament_copyright - ament_flake8 - ament_pep257 - python3-pytest - - rclpy - std_msgs - - - ament_python - - diff --git a/uol_turtlebot_control/resource/uol_turtlebot_control b/uol_turtlebot_control/resource/uol_turtlebot_control deleted file mode 100644 index e69de29..0000000 diff --git a/uol_turtlebot_control/setup.cfg b/uol_turtlebot_control/setup.cfg deleted file mode 100644 index bcb64a6..0000000 --- a/uol_turtlebot_control/setup.cfg +++ /dev/null @@ -1,4 +0,0 @@ -[develop] -script_dir=$base/lib/uol_turtlebot_control -[install] -install_scripts=$base/lib/uol_turtlebot_control diff --git a/uol_turtlebot_control/setup.py b/uol_turtlebot_control/setup.py deleted file mode 100644 index c121c3f..0000000 --- a/uol_turtlebot_control/setup.py +++ /dev/null @@ -1,26 +0,0 @@ -from setuptools import setup - -package_name = 'uol_turtlebot_control' - -setup( - name=package_name, - version='0.0.0', - packages=[package_name], - data_files=[ - ('share/ament_index/resource_index/packages', - ['resource/' + package_name]), - ('share/' + package_name, ['package.xml']), - ], - install_requires=['setuptools'], - zip_safe=True, - maintainer='Geesara', - maintainer_email='ggeesara@gmail.com', - description='Basic Diff drive robot control', - license='TODO: License declaration', - tests_require=['pytest'], - entry_points={ - 'console_scripts': [ - 'control_strategy = uol_turtlebot_control.robot_control_strategy:main', - ], - }, -) diff --git a/uol_turtlebot_control/uol_turtlebot_control/__init__.py b/uol_turtlebot_control/uol_turtlebot_control/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/uol_turtlebot_control/uol_turtlebot_control/robot_control_strategy.py b/uol_turtlebot_control/uol_turtlebot_control/robot_control_strategy.py deleted file mode 100644 index 162a559..0000000 --- a/uol_turtlebot_control/uol_turtlebot_control/robot_control_strategy.py +++ /dev/null @@ -1,226 +0,0 @@ -import rclpy -from rclpy.node import Node -from std_msgs.msg import String -from sensor_msgs.msg import LaserScan -from geometry_msgs.msg import Twist -from nav_msgs.msg import Odometry -import time -import cv2 -import numpy as np -from sensor_msgs.msg import Image, CompressedImage, CameraInfo -import std_msgs.msg as std_msg -import rclpy.qos as qos - - -class ControlStrategy(Node): - def __init__(self, delta_t, ): - super().__init__('control_strategy') - self.control_pub = self.create_publisher(Twist, '/cmd_vel', 30) - self.control_sub = self.create_subscription(Twist, '/cmd_vel', self.listener_callback, 10) - self.odom_sub = self.create_subscription(Odometry, "/odom", self.set_pose, 20) - - self.i = 0 - self.set_robot_init_pose = None - self.robot_current_pose = None - self.robot_current_control = [] - self.r = 0.3 # Wheel radius - self.L = 1.25 # Axle length - self.D = 0.07 # Distance between the front front wheel and rear axle - self.Ts = delta_t # Sampling time - self.t = np.arange(0, 10, self.Ts) # Simulation time - self.end_controller = False - self.timer = self.create_timer(self.Ts, self.timer_callback) - - def euler_from_quaternion(self, quaternion): - """ - Converts quaternion (w in last place) to euler roll, pitch, yaw - quaternion = [x, y, z, w] - """ - x = quaternion.x - y = quaternion.y - z = quaternion.z - w = quaternion.w - - sinr_cosp = 2 * (w * x + y * z) - cosr_cosp = 1 - 2 * (x * x + y * y) - roll = np.arctan2(sinr_cosp, cosr_cosp) - - sinp = 2 * (w * y - z * x) - pitch = np.arcsin(sinp) - - siny_cosp = 2 * (w * z + x * y) - cosy_cosp = 1 - 2 * (y * y + z * z) - yaw = np.arctan2(siny_cosp, cosy_cosp) - - return roll, pitch, yaw - - def listener_callback(self, msg): - self.robot_current_control = [msg.linear.x, msg.angular.z] - - def stop_vehicle(self, ): - self.send_vel(0.0, 0.0) - - def wrap_to_pi(self, x): - x = np.array([x]) - xwrap = np.remainder(x, 2*np.pi) - mask = np.abs(xwrap)>np.pi - xwrap[mask] -= 2*np.pi * np.sign(xwrap[mask]) - return xwrap[0] - - def diff_drive_init(self, left_wheel_v, right_wheel_v, duration=5): - self.duration = duration - self.wL = left_wheel_v # Left wheel velocity - self.wR = right_wheel_v # Right wheel velocity - self.time_utilized = 0.0 - - def inter_point_diff_drive_init(self, duration=10, r_distance=1.3 - , refPose=np.array([3,6,0]), k_p=0.5, k_w=0.7, dmin=0.7): - - self.duration = duration - self.r_distance = r_distance - self.refPose = refPose - self.k_p = k_p - self.k_w = k_w - self.dmin = dmin - self.time_utilized = 0.0 - self.xT = self.refPose[0] - self.r_distance*np.cos(self.refPose[2]) - self.yT = self.refPose[1] - self.r_distance*np.sin(self.refPose[2]) - self.state = 0 - - def inter_direction_diff_drive_init(self, duration=10, r_distance=1.3 - , refPose=np.array([3,6,0]), k_p=0.5, k_w=0.7, dmin=0.7): - self.duration = duration - self.r_distance = r_distance - self.refPose = refPose - self.k_p = k_p - self.k_w = k_w - self.dmin = dmin - self.time_utilized = 0.0 - - def inter_direction_diff_drive(self, ): - if(self.robot_current_pose is not None): - if(self.duration < self.time_utilized): - self.get_logger().info(f'End of simulation', once=True) - self.stop_vehicle() - self.end_controller = True - return - - self.D = np.sqrt((self.robot_current_pose[0]-self.refPose[0])**2 - + (self.robot_current_pose[1]-self.refPose[1])**2) - - if(self.D < self.dmin): - self.get_logger().info(f'Reach to the goal pose', once=True) - self.stop_vehicle() - self.end_controller = True - return - - beta = np.arctan(self.r_distance/self.D) - phiR = np.arctan2(self.refPose[1]-self.robot_current_pose[1], self.refPose[0]-self.robot_current_pose[0]) - alpha = self.wrap_to_pi(phiR-self.refPose[2]) - - if(alpha <0): - beta = -beta - ##Controller - if(np.abs(alpha) < np.abs(beta)): - ePhi = self.wrap_to_pi(phiR - self.robot_current_pose[2] + alpha) - else: - ePhi = self.wrap_to_pi(phiR - self.robot_current_pose[2] + beta) - - v = self.k_p*self.D - w = self.k_w*ePhi - print("Distance to the goal: ", self.D) - dq = np.array([v*np.cos(self.robot_current_pose[2]+self.Ts*w/2) - , v*np.sin(self.robot_current_pose[2]+self.Ts*w/2), w]) - self.robot_current_pose = self.robot_current_pose + self.Ts*dq # Integration - self.robot_current_pose[2] = self.wrap_to_pi(self.robot_current_pose[2]) # Map orientation angle to [-pi, pi] - self.send_vel(v, w) - self.time_utilized = self.time_utilized + self.Ts - - def inter_point_diff_drive(self, ): - if(self.robot_current_pose is not None): - if(self.duration < self.time_utilized): - self.stop_vehicle() - self.get_logger().info(f'End of simulation', once=True) - self.end_controller = True - return - - self.D = np.sqrt((self.robot_current_pose[0]-self.refPose[0])**2 - + (self.robot_current_pose[1]-self.refPose[1])**2) - - if(self.D < self.dmin): - self.stop_vehicle() - self.get_logger().info(f'Reach to the goal pose', once=True) - self.end_controller = True - return - - if self.state == 0: - d = np.sqrt((self.yT-self.robot_current_pose[1])**2 - + (self.xT-self.robot_current_pose[0])**2) - if(d < self.dmin): - self.state = 1 - self.phiT = np.arctan2(self.yT-self.robot_current_pose[1], self.xT-self.robot_current_pose[0]) - self.ePhi = self.phiT - self.robot_current_pose[2] - else: - self.ePhi = self.refPose[2] - self.robot_current_pose[2] - - v = self.k_p*self.D - w = self.k_w*self.ePhi - print("Distance to the goal: ", self.D) - dq = np.array([v*np.cos(self.robot_current_pose[2]+self.Ts*w/2), v*np.sin(self.robot_current_pose[2]+self.Ts*w/2), w]) - self.robot_current_pose = self.robot_current_pose + self.Ts*dq # Integration - self.robot_current_pose[2] = self.wrap_to_pi(self.robot_current_pose[2]) # Map orientation angle to [-pi, pi] - self.send_vel(v, w) - self.time_utilized = self.time_utilized + self.Ts - - def perform_action_diff_drive_one_step(self): - if(self.robot_current_pose is not None): - if(self.duration < self.time_utilized): - self.stop_vehicle() - self.get_logger().info(f'End of simulation', once=True) - self.end_controller = True - return - v = self.r/2*(self.wR+self.wL) # Robot velocity - w = self.r/self.L*(self.wR-self.wL) # Robot angular velocity - dq = np.array([v*np.cos(self.robot_current_pose[2]+self.Ts*w/2), v*np.sin(self.robot_current_pose[2]+self.Ts*w/2), w]) - self.robot_current_pose = self.robot_current_pose + self.Ts*dq # Integration - self.robot_current_pose[2] = self.wrap_to_pi(self.robot_current_pose[2]) # Map orientation angle to [-pi, pi] - self.send_vel(v, w) - self.time_utilized = self.time_utilized + self.Ts - - def set_pose(self, msg): - _, _, yaw = self.euler_from_quaternion(msg.pose.pose.orientation) - if(self.set_robot_init_pose is None): - self.set_robot_init_pose = np.array([msg.pose.pose.position.x, msg.pose.pose.position.y, yaw]) - self.robot_current_pose = self.set_robot_init_pose - - def send_vel(self, v, w): - msg = Twist() - msg.linear.x = v - msg.angular.z = w - self.control_pub.publish(msg) - - def timer_callback(self, ): - self.perform_action_diff_drive_one_step() - # self.inter_direction_diff_drive() - # self.inter_point_diff_drive() - return - -def main(args=None): - rclpy.init(args=args) - control_strategy = ControlStrategy(delta_t=0.03) - control_strategy.diff_drive_init(0.2, 0.2, duration=20) - # control_strategy.inter_direction_diff_drive_init() - # control_strategy.inter_point_diff_drive_init() - - while rclpy.ok(): - try: - rclpy.spin_once(control_strategy) - except KeyboardInterrupt: - break - control_strategy.destroy_node() - rclpy.shutdown() - -if __name__ == '__main__': - main() - -# To reset ros2 service call /reset_simulation std_srvs/srv/Empty From 589fca57928ea9f433479b655d779df30c502d08 Mon Sep 17 00:00:00 2001 From: GPrathap Date: Thu, 15 Feb 2024 08:06:53 +0000 Subject: [PATCH 3/5] adding task for week 4 --- control_strategy/control_strategy/__init__.py | 0 .../robot_control_strategy.py | 226 ++++++++++++++++++ .../robot_control_strategy_todo.py | 126 ++++++++++ control_strategy/package.xml | 29 +++ control_strategy/resource/control_strategy | 0 control_strategy/setup.cfg | 4 + control_strategy/setup.py | 26 ++ 7 files changed, 411 insertions(+) create mode 100644 control_strategy/control_strategy/__init__.py create mode 100644 control_strategy/control_strategy/robot_control_strategy.py create mode 100644 control_strategy/control_strategy/robot_control_strategy_todo.py create mode 100644 control_strategy/package.xml create mode 100644 control_strategy/resource/control_strategy create mode 100644 control_strategy/setup.cfg create mode 100644 control_strategy/setup.py diff --git a/control_strategy/control_strategy/__init__.py b/control_strategy/control_strategy/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/control_strategy/control_strategy/robot_control_strategy.py b/control_strategy/control_strategy/robot_control_strategy.py new file mode 100644 index 0000000..162a559 --- /dev/null +++ b/control_strategy/control_strategy/robot_control_strategy.py @@ -0,0 +1,226 @@ +import rclpy +from rclpy.node import Node +from std_msgs.msg import String +from sensor_msgs.msg import LaserScan +from geometry_msgs.msg import Twist +from nav_msgs.msg import Odometry +import time +import cv2 +import numpy as np +from sensor_msgs.msg import Image, CompressedImage, CameraInfo +import std_msgs.msg as std_msg +import rclpy.qos as qos + + +class ControlStrategy(Node): + def __init__(self, delta_t, ): + super().__init__('control_strategy') + self.control_pub = self.create_publisher(Twist, '/cmd_vel', 30) + self.control_sub = self.create_subscription(Twist, '/cmd_vel', self.listener_callback, 10) + self.odom_sub = self.create_subscription(Odometry, "/odom", self.set_pose, 20) + + self.i = 0 + self.set_robot_init_pose = None + self.robot_current_pose = None + self.robot_current_control = [] + self.r = 0.3 # Wheel radius + self.L = 1.25 # Axle length + self.D = 0.07 # Distance between the front front wheel and rear axle + self.Ts = delta_t # Sampling time + self.t = np.arange(0, 10, self.Ts) # Simulation time + self.end_controller = False + self.timer = self.create_timer(self.Ts, self.timer_callback) + + def euler_from_quaternion(self, quaternion): + """ + Converts quaternion (w in last place) to euler roll, pitch, yaw + quaternion = [x, y, z, w] + """ + x = quaternion.x + y = quaternion.y + z = quaternion.z + w = quaternion.w + + sinr_cosp = 2 * (w * x + y * z) + cosr_cosp = 1 - 2 * (x * x + y * y) + roll = np.arctan2(sinr_cosp, cosr_cosp) + + sinp = 2 * (w * y - z * x) + pitch = np.arcsin(sinp) + + siny_cosp = 2 * (w * z + x * y) + cosy_cosp = 1 - 2 * (y * y + z * z) + yaw = np.arctan2(siny_cosp, cosy_cosp) + + return roll, pitch, yaw + + def listener_callback(self, msg): + self.robot_current_control = [msg.linear.x, msg.angular.z] + + def stop_vehicle(self, ): + self.send_vel(0.0, 0.0) + + def wrap_to_pi(self, x): + x = np.array([x]) + xwrap = np.remainder(x, 2*np.pi) + mask = np.abs(xwrap)>np.pi + xwrap[mask] -= 2*np.pi * np.sign(xwrap[mask]) + return xwrap[0] + + def diff_drive_init(self, left_wheel_v, right_wheel_v, duration=5): + self.duration = duration + self.wL = left_wheel_v # Left wheel velocity + self.wR = right_wheel_v # Right wheel velocity + self.time_utilized = 0.0 + + def inter_point_diff_drive_init(self, duration=10, r_distance=1.3 + , refPose=np.array([3,6,0]), k_p=0.5, k_w=0.7, dmin=0.7): + + self.duration = duration + self.r_distance = r_distance + self.refPose = refPose + self.k_p = k_p + self.k_w = k_w + self.dmin = dmin + self.time_utilized = 0.0 + self.xT = self.refPose[0] - self.r_distance*np.cos(self.refPose[2]) + self.yT = self.refPose[1] - self.r_distance*np.sin(self.refPose[2]) + self.state = 0 + + def inter_direction_diff_drive_init(self, duration=10, r_distance=1.3 + , refPose=np.array([3,6,0]), k_p=0.5, k_w=0.7, dmin=0.7): + self.duration = duration + self.r_distance = r_distance + self.refPose = refPose + self.k_p = k_p + self.k_w = k_w + self.dmin = dmin + self.time_utilized = 0.0 + + def inter_direction_diff_drive(self, ): + if(self.robot_current_pose is not None): + if(self.duration < self.time_utilized): + self.get_logger().info(f'End of simulation', once=True) + self.stop_vehicle() + self.end_controller = True + return + + self.D = np.sqrt((self.robot_current_pose[0]-self.refPose[0])**2 + + (self.robot_current_pose[1]-self.refPose[1])**2) + + if(self.D < self.dmin): + self.get_logger().info(f'Reach to the goal pose', once=True) + self.stop_vehicle() + self.end_controller = True + return + + beta = np.arctan(self.r_distance/self.D) + phiR = np.arctan2(self.refPose[1]-self.robot_current_pose[1], self.refPose[0]-self.robot_current_pose[0]) + alpha = self.wrap_to_pi(phiR-self.refPose[2]) + + if(alpha <0): + beta = -beta + ##Controller + if(np.abs(alpha) < np.abs(beta)): + ePhi = self.wrap_to_pi(phiR - self.robot_current_pose[2] + alpha) + else: + ePhi = self.wrap_to_pi(phiR - self.robot_current_pose[2] + beta) + + v = self.k_p*self.D + w = self.k_w*ePhi + print("Distance to the goal: ", self.D) + dq = np.array([v*np.cos(self.robot_current_pose[2]+self.Ts*w/2) + , v*np.sin(self.robot_current_pose[2]+self.Ts*w/2), w]) + self.robot_current_pose = self.robot_current_pose + self.Ts*dq # Integration + self.robot_current_pose[2] = self.wrap_to_pi(self.robot_current_pose[2]) # Map orientation angle to [-pi, pi] + self.send_vel(v, w) + self.time_utilized = self.time_utilized + self.Ts + + def inter_point_diff_drive(self, ): + if(self.robot_current_pose is not None): + if(self.duration < self.time_utilized): + self.stop_vehicle() + self.get_logger().info(f'End of simulation', once=True) + self.end_controller = True + return + + self.D = np.sqrt((self.robot_current_pose[0]-self.refPose[0])**2 + + (self.robot_current_pose[1]-self.refPose[1])**2) + + if(self.D < self.dmin): + self.stop_vehicle() + self.get_logger().info(f'Reach to the goal pose', once=True) + self.end_controller = True + return + + if self.state == 0: + d = np.sqrt((self.yT-self.robot_current_pose[1])**2 + + (self.xT-self.robot_current_pose[0])**2) + if(d < self.dmin): + self.state = 1 + self.phiT = np.arctan2(self.yT-self.robot_current_pose[1], self.xT-self.robot_current_pose[0]) + self.ePhi = self.phiT - self.robot_current_pose[2] + else: + self.ePhi = self.refPose[2] - self.robot_current_pose[2] + + v = self.k_p*self.D + w = self.k_w*self.ePhi + print("Distance to the goal: ", self.D) + dq = np.array([v*np.cos(self.robot_current_pose[2]+self.Ts*w/2), v*np.sin(self.robot_current_pose[2]+self.Ts*w/2), w]) + self.robot_current_pose = self.robot_current_pose + self.Ts*dq # Integration + self.robot_current_pose[2] = self.wrap_to_pi(self.robot_current_pose[2]) # Map orientation angle to [-pi, pi] + self.send_vel(v, w) + self.time_utilized = self.time_utilized + self.Ts + + def perform_action_diff_drive_one_step(self): + if(self.robot_current_pose is not None): + if(self.duration < self.time_utilized): + self.stop_vehicle() + self.get_logger().info(f'End of simulation', once=True) + self.end_controller = True + return + v = self.r/2*(self.wR+self.wL) # Robot velocity + w = self.r/self.L*(self.wR-self.wL) # Robot angular velocity + dq = np.array([v*np.cos(self.robot_current_pose[2]+self.Ts*w/2), v*np.sin(self.robot_current_pose[2]+self.Ts*w/2), w]) + self.robot_current_pose = self.robot_current_pose + self.Ts*dq # Integration + self.robot_current_pose[2] = self.wrap_to_pi(self.robot_current_pose[2]) # Map orientation angle to [-pi, pi] + self.send_vel(v, w) + self.time_utilized = self.time_utilized + self.Ts + + def set_pose(self, msg): + _, _, yaw = self.euler_from_quaternion(msg.pose.pose.orientation) + if(self.set_robot_init_pose is None): + self.set_robot_init_pose = np.array([msg.pose.pose.position.x, msg.pose.pose.position.y, yaw]) + self.robot_current_pose = self.set_robot_init_pose + + def send_vel(self, v, w): + msg = Twist() + msg.linear.x = v + msg.angular.z = w + self.control_pub.publish(msg) + + def timer_callback(self, ): + self.perform_action_diff_drive_one_step() + # self.inter_direction_diff_drive() + # self.inter_point_diff_drive() + return + +def main(args=None): + rclpy.init(args=args) + control_strategy = ControlStrategy(delta_t=0.03) + control_strategy.diff_drive_init(0.2, 0.2, duration=20) + # control_strategy.inter_direction_diff_drive_init() + # control_strategy.inter_point_diff_drive_init() + + while rclpy.ok(): + try: + rclpy.spin_once(control_strategy) + except KeyboardInterrupt: + break + control_strategy.destroy_node() + rclpy.shutdown() + +if __name__ == '__main__': + main() + +# To reset ros2 service call /reset_simulation std_srvs/srv/Empty diff --git a/control_strategy/control_strategy/robot_control_strategy_todo.py b/control_strategy/control_strategy/robot_control_strategy_todo.py new file mode 100644 index 0000000..2c1926b --- /dev/null +++ b/control_strategy/control_strategy/robot_control_strategy_todo.py @@ -0,0 +1,126 @@ +import rclpy +from rclpy.node import Node +from std_msgs.msg import String +from geometry_msgs.msg import Twist +from nav_msgs.msg import Odometry +import numpy as np + +import std_msgs.msg as std_msg +import rclpy.qos as qos + + +class ControlStrategy(Node): + def __init__(self, delta_t, ): + super().__init__('control_strategy') + self.control_pub = self.create_publisher(Twist, '/cmd_vel', 30) + self.control_sub = self.create_subscription(Twist, '/cmd_vel', self.listener_callback, 10) + self.odom_sub = self.create_subscription(Odometry, "/odom", self.set_pose, 20) + + self.i = 0 + self.set_robot_init_pose = None + self.robot_current_pose = None + self.robot_current_control = [] + self.r = 0.3 # Wheel radius + self.L = 1.25 # Axle length + self.D = 0.07 # Distance between the front front wheel and rear axle + self.Ts = delta_t # Sampling time + self.t = np.arange(0, 10, self.Ts) # Simulation time + self.end_controller = False + self.timer = self.create_timer(self.Ts, self.timer_callback) + + def euler_from_quaternion(self, quaternion): + """ + Converts quaternion (w in last place) to euler roll, pitch, yaw + quaternion = [x, y, z, w] + """ + x = quaternion.x + y = quaternion.y + z = quaternion.z + w = quaternion.w + + sinr_cosp = 2 * (w * x + y * z) + cosr_cosp = 1 - 2 * (x * x + y * y) + roll = np.arctan2(sinr_cosp, cosr_cosp) + + sinp = 2 * (w * y - z * x) + pitch = np.arcsin(sinp) + + siny_cosp = 2 * (w * z + x * y) + cosy_cosp = 1 - 2 * (y * y + z * z) + yaw = np.arctan2(siny_cosp, cosy_cosp) + + return roll, pitch, yaw + + def listener_callback(self, msg): + self.robot_current_control = [msg.linear.x, msg.angular.z] + + def stop_vehicle(self, ): + self.send_vel(0.0, 0.0) + + def wrap_to_pi(self, x): + x = np.array([x]) + xwrap = np.remainder(x, 2*np.pi) + mask = np.abs(xwrap)>np.pi + xwrap[mask] -= 2*np.pi * np.sign(xwrap[mask]) + return xwrap[0] + + def diff_drive_init(self, left_wheel_v, right_wheel_v, duration=5): + self.duration = duration + self.wL = left_wheel_v # Left wheel velocity + self.wR = right_wheel_v # Right wheel velocity + self.time_utilized = 0.0 + + + def perform_action_diff_drive_one_step(self): + if(self.robot_current_pose is not None): + if(self.duration < self.time_utilized): + self.stop_vehicle() + self.get_logger().info(f'End of simulation', once=True) + self.end_controller = True + return + + #TODO Estimate the desired robot linear and angular velocity + # v = + # w = + #TODO Estimate robot dynamics, the robot current pose (x_{t}) can be obtained by self.robot_current_pose + # dq = + #TODO Update the self.robot_current_pose, i.e., x_{t+1} = x_{t} + self.Ts*dq + #TODO Normalize the robot orientation [-pi, pi], you may use the self.wrap_to_pi(self.robot_current_pose[2]) + #TODO Send the control commands to via self.send_vel(v, w) + + self.time_utilized = self.time_utilized + self.Ts + + def set_pose(self, msg): + # Setting the current pose of the robot + _, _, yaw = self.euler_from_quaternion(msg.pose.pose.orientation) + if(self.set_robot_init_pose is None): + self.set_robot_init_pose = np.array([msg.pose.pose.position.x, msg.pose.pose.position.y, yaw]) + self.robot_current_pose = self.set_robot_init_pose + + def send_vel(self, v, w): + # TODO Control the robot's movement by specifying its linear and angular velocities + # on the /cmd_vel (self.control_pub) topic + pass + + def timer_callback(self, ): + # Use this timer callback to send the commands to robot + self.perform_action_diff_drive_one_step() + return + +def main(args=None): + rclpy.init(args=args) + control_strategy = ControlStrategy(delta_t=0.03) + # TODO Utilize the control_strategy.diff_drive_init function to set the angular velocities + # of the left and right side wheels of the robot + while rclpy.ok(): + try: + rclpy.spin_once(control_strategy) + except KeyboardInterrupt: + break + control_strategy.destroy_node() + rclpy.shutdown() + +if __name__ == '__main__': + main() + +# To reset ros2 service call /reset_simulation std_srvs/srv/Empty diff --git a/control_strategy/package.xml b/control_strategy/package.xml new file mode 100644 index 0000000..a1402ed --- /dev/null +++ b/control_strategy/package.xml @@ -0,0 +1,29 @@ + + + + control_strategy + 0.0.0 + Basic Diff drive robot control + op + TODO: License declaration + + geometry_msgs + rclcpp + sensor_msgs + + geometry_msgs + rclcpp + sensor_msgs + + ament_copyright + ament_flake8 + ament_pep257 + python3-pytest + + rclpy + std_msgs + + + ament_python + + diff --git a/control_strategy/resource/control_strategy b/control_strategy/resource/control_strategy new file mode 100644 index 0000000..e69de29 diff --git a/control_strategy/setup.cfg b/control_strategy/setup.cfg new file mode 100644 index 0000000..7869238 --- /dev/null +++ b/control_strategy/setup.cfg @@ -0,0 +1,4 @@ +[develop] +script_dir=$base/lib/control_strategy +[install] +install_scripts=$base/lib/control_strategy diff --git a/control_strategy/setup.py b/control_strategy/setup.py new file mode 100644 index 0000000..f2e0fae --- /dev/null +++ b/control_strategy/setup.py @@ -0,0 +1,26 @@ +from setuptools import setup + +package_name = 'control_strategy' + +setup( + name=package_name, + version='0.0.0', + packages=[package_name], + data_files=[ + ('share/ament_index/resource_index/packages', + ['resource/' + package_name]), + ('share/' + package_name, ['package.xml']), + ], + install_requires=['setuptools'], + zip_safe=True, + maintainer='Geesara', + maintainer_email='ggeesara@gmail.com', + description='Basic Diff drive robot control', + license='TODO: License declaration', + tests_require=['pytest'], + entry_points={ + 'console_scripts': [ + 'control_strategy = control_strategy.robot_control_strategy:main', + ], + }, +) From a929a5a35ded03f832efe7820743b5dedf5150a8 Mon Sep 17 00:00:00 2001 From: GPrathap Date: Sun, 25 Feb 2024 12:03:01 +0000 Subject: [PATCH 4/5] adding feedback control tasks --- .../robot_feedback_control_todo.py | 214 ++++++++++++++++++ 1 file changed, 214 insertions(+) create mode 100644 cmp3103m_ros2_code_fragments/cmp3103m_ros2_code_fragments/robot_feedback_control_todo.py diff --git a/cmp3103m_ros2_code_fragments/cmp3103m_ros2_code_fragments/robot_feedback_control_todo.py b/cmp3103m_ros2_code_fragments/cmp3103m_ros2_code_fragments/robot_feedback_control_todo.py new file mode 100644 index 0000000..549d9a5 --- /dev/null +++ b/cmp3103m_ros2_code_fragments/cmp3103m_ros2_code_fragments/robot_feedback_control_todo.py @@ -0,0 +1,214 @@ +import rclpy +from rclpy.node import Node +from std_msgs.msg import String +from sensor_msgs.msg import LaserScan +from geometry_msgs.msg import Twist +from nav_msgs.msg import Odometry +import time +import cv2 +import numpy as np +from sensor_msgs.msg import Image, CompressedImage, CameraInfo +import std_msgs.msg as std_msg +import rclpy.qos as qos + + +class ControlStrategy(Node): + def __init__(self, delta_t, ): + super().__init__('control_strategy') + self.control_pub = self.create_publisher(Twist, '/cmd_vel', 30) + self.control_sub = self.create_subscription(Twist, '/cmd_vel', self.listener_callback, 10) + self.odom_sub = self.create_subscription(Odometry, "/odom", self.set_pose, 20) + + self.i = 0 + self.set_robot_init_pose = None + self.robot_current_pose = None + self.robot_current_control = [] + self.r = 0.3 # Wheel radius + self.L = 1.25 # Axle length + self.D = 0.07 # Distance between the front front wheel and rear axle + self.Ts = delta_t # Sampling time + self.t = np.arange(0, 10, self.Ts) # Simulation time + self.end_controller = False + self.timer = self.create_timer(self.Ts, self.timer_callback) + + + def euler_from_quaternion(self, quaternion): + """ + Converts quaternion (w in last place) to euler roll, pitch, yaw + quaternion = [x, y, z, w] + """ + x = quaternion.x + y = quaternion.y + z = quaternion.z + w = quaternion.w + + sinr_cosp = 2 * (w * x + y * z) + cosr_cosp = 1 - 2 * (x * x + y * y) + roll = np.arctan2(sinr_cosp, cosr_cosp) + + sinp = 2 * (w * y - z * x) + pitch = np.arcsin(sinp) + + siny_cosp = 2 * (w * z + x * y) + cosy_cosp = 1 - 2 * (y * y + z * z) + yaw = np.arctan2(siny_cosp, cosy_cosp) + + return roll, pitch, yaw + + def listener_callback(self, msg): + self.robot_current_control = [msg.linear.x, msg.angular.z] + + def stop_vehicle(self, ): + self.send_vel(0.0, 0.0) + + def wrap_to_pi(self, x): + x = np.array([x]) + xwrap = np.remainder(x, 2*np.pi) + mask = np.abs(xwrap)>np.pi + xwrap[mask] -= 2*np.pi * np.sign(xwrap[mask]) + return xwrap[0] + + def diff_drive_init(self, left_wheel_v, right_wheel_v, duration=5): + self.duration = duration + self.wL = left_wheel_v # Left wheel velocity + self.wR = right_wheel_v # Right wheel velocity + self.time_utilized = 0.0 + + def inter_point_diff_drive_init(self, duration=10, r_distance=0.5 + , refPose=np.array([1,1,0]), k_p=0.5, k_w=0.7, dmin=0.7): + + self.duration = duration + self.r_distance = r_distance + self.refPose = refPose + self.k_p = k_p + self.k_w = k_w + self.dmin = dmin + self.time_utilized = 0.0 + self.xT = self.refPose[0] - self.r_distance*np.cos(self.refPose[2]) + self.yT = self.refPose[1] - self.r_distance*np.sin(self.refPose[2]) + self.state = 0 + + def inter_direction_diff_drive_init(self, duration=10, r_distance=1.3 + , refPose=np.array([1.5,1.5,np.pi/2]), k_p=0.5, k_w=0.7, dmin=0.7): + self.duration = duration + self.r_distance = r_distance + self.refPose = refPose + self.k_p = k_p + self.k_w = k_w + self.dmin = dmin + self.time_utilized = 0.0 + + def inter_direction_diff_drive(self, ): + if(self.robot_current_pose is not None): + if(self.duration < self.time_utilized): + self.get_logger().info(f'End of simulation', once=True) + self.stop_vehicle() + self.end_controller = True + return + + # TODO Estimate the distance (self.D) between current pose (self.robot_current_pose) # + # and reference pose (self.refPose) + # self.D = + + if(self.D < self.dmin): + self.get_logger().info(f'Reach to the goal pose', once=True) + self.stop_vehicle() + self.end_controller = True + return + + # TODO Estimate orientation residual (self.ePhi) + + v = self.k_p*self.D + w = self.k_w*self.ePhi + print("Distance to the goal: ", self.D) + dq = np.array([v*np.cos(self.robot_current_pose_real[2]+self.Ts*w/2) + , v*np.sin(self.robot_current_pose_real[2]+self.Ts*w/2), w]) + self.robot_current_pose = self.robot_current_pose + self.Ts*dq # Integration + self.robot_current_pose[2] = self.wrap_to_pi(self.robot_current_pose[2]) # Map orientation angle to [-pi, pi] + self.send_vel(v, w) + self.time_utilized = self.time_utilized + self.Ts + + def inter_point_diff_drive(self, ): + if(self.robot_current_pose is not None): + if(self.duration < self.time_utilized): + self.stop_vehicle() + self.get_logger().info(f'End of simulation', once=True) + self.end_controller = True + return + + # TODO Estimate the distance (self.D) between current pose (self.robot_current_pose) # + # and reference pose (self.refPose) + # self.D = + + if(self.D < self.dmin): + self.stop_vehicle() + self.get_logger().info(f'Reach to the goal pose', once=True) + self.end_controller = True + return + + # TODO Estimate orientation residual (self.ePhi) + + v = self.k_p*self.D + w = self.k_w*self.ePhi + print("Distance to the goal: ", self.D) + dq = np.array([v*np.cos(self.robot_current_pose_real[2]+self.Ts*w/2), v*np.sin(self.robot_current_pose_real[2]+self.Ts*w/2), w]) + self.robot_current_pose = self.robot_current_pose + self.Ts*dq # Integration + self.robot_current_pose[2] = self.wrap_to_pi(self.robot_current_pose[2]) # Map orientation angle to [-pi, pi] + self.send_vel(v, w) + self.time_utilized = self.time_utilized + self.Ts + + def perform_action_diff_drive_one_step(self): + if(self.robot_current_pose is not None): + if(self.duration < self.time_utilized): + self.stop_vehicle() + self.get_logger().info(f'End of simulation', once=True) + self.end_controller = True + return + v = self.r/2*(self.wR+self.wL) # Robot velocity + w = self.r/self.L*(self.wR-self.wL) # Robot angular velocity + dq = np.array([v*np.cos(self.robot_current_pose[2]+self.Ts*w/2), v*np.sin(self.robot_current_pose[2]+self.Ts*w/2), w]) + self.robot_current_pose = self.robot_current_pose + self.Ts*dq # Integration + self.robot_current_pose[2] = self.wrap_to_pi(self.robot_current_pose[2]) # Map orientation angle to [-pi, pi] + self.send_vel(v, w) + self.time_utilized = self.time_utilized + self.Ts + + def set_pose(self, msg): + _, _, yaw = self.euler_from_quaternion(msg.pose.pose.orientation) + if(self.set_robot_init_pose is None): + self.set_robot_init_pose = np.array([msg.pose.pose.position.x, msg.pose.pose.position.y, yaw]) + self.robot_current_pose = self.set_robot_init_pose + self.robot_current_pose_real = np.array([msg.pose.pose.position.x, msg.pose.pose.position.y, yaw]) + + def send_vel(self, v, w): + msg = Twist() + msg.linear.x = v + msg.angular.z = w + self.control_pub.publish(msg) + + def timer_callback(self, ): + # self.perform_action_diff_drive_one_step() + self.inter_point_diff_drive() + # self.inter_direction_diff_drive() + return + +def main(args=None): + rclpy.init(args=args) + control_strategy = ControlStrategy(delta_t=0.03) + # control_strategy.diff_drive_init(0.2, 0.2, duration=20) + # control_strategy.inter_direction_diff_drive_init(r_distance=0.6 + # , refPose=np.array([1.0,1.0,0]), k_p=0.1, k_w=0.5, dmin=0.7) + control_strategy.inter_point_diff_drive_init(r_distance=0.5 + , refPose=np.array([1,1,0]), k_p=0.1, k_w=0.5, dmin=0.7) + + while rclpy.ok(): + try: + rclpy.spin_once(control_strategy) + except KeyboardInterrupt: + break + control_strategy.destroy_node() + rclpy.shutdown() + +if __name__ == '__main__': + main() + +# To reset ros2 service call /reset_simulation std_srvs/srv/Empty From 655424fbc9584905983fd9873b766fa3cda3e354 Mon Sep 17 00:00:00 2001 From: GPrathap Date: Sun, 25 Feb 2024 12:06:37 +0000 Subject: [PATCH 5/5] removing unused package --- control_strategy/control_strategy/__init__.py | 0 .../robot_control_strategy.py | 226 ------------------ .../robot_control_strategy_todo.py | 126 ---------- control_strategy/package.xml | 29 --- control_strategy/resource/control_strategy | 0 control_strategy/setup.cfg | 4 - control_strategy/setup.py | 26 -- 7 files changed, 411 deletions(-) delete mode 100644 control_strategy/control_strategy/__init__.py delete mode 100644 control_strategy/control_strategy/robot_control_strategy.py delete mode 100644 control_strategy/control_strategy/robot_control_strategy_todo.py delete mode 100644 control_strategy/package.xml delete mode 100644 control_strategy/resource/control_strategy delete mode 100644 control_strategy/setup.cfg delete mode 100644 control_strategy/setup.py diff --git a/control_strategy/control_strategy/__init__.py b/control_strategy/control_strategy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/control_strategy/control_strategy/robot_control_strategy.py b/control_strategy/control_strategy/robot_control_strategy.py deleted file mode 100644 index 162a559..0000000 --- a/control_strategy/control_strategy/robot_control_strategy.py +++ /dev/null @@ -1,226 +0,0 @@ -import rclpy -from rclpy.node import Node -from std_msgs.msg import String -from sensor_msgs.msg import LaserScan -from geometry_msgs.msg import Twist -from nav_msgs.msg import Odometry -import time -import cv2 -import numpy as np -from sensor_msgs.msg import Image, CompressedImage, CameraInfo -import std_msgs.msg as std_msg -import rclpy.qos as qos - - -class ControlStrategy(Node): - def __init__(self, delta_t, ): - super().__init__('control_strategy') - self.control_pub = self.create_publisher(Twist, '/cmd_vel', 30) - self.control_sub = self.create_subscription(Twist, '/cmd_vel', self.listener_callback, 10) - self.odom_sub = self.create_subscription(Odometry, "/odom", self.set_pose, 20) - - self.i = 0 - self.set_robot_init_pose = None - self.robot_current_pose = None - self.robot_current_control = [] - self.r = 0.3 # Wheel radius - self.L = 1.25 # Axle length - self.D = 0.07 # Distance between the front front wheel and rear axle - self.Ts = delta_t # Sampling time - self.t = np.arange(0, 10, self.Ts) # Simulation time - self.end_controller = False - self.timer = self.create_timer(self.Ts, self.timer_callback) - - def euler_from_quaternion(self, quaternion): - """ - Converts quaternion (w in last place) to euler roll, pitch, yaw - quaternion = [x, y, z, w] - """ - x = quaternion.x - y = quaternion.y - z = quaternion.z - w = quaternion.w - - sinr_cosp = 2 * (w * x + y * z) - cosr_cosp = 1 - 2 * (x * x + y * y) - roll = np.arctan2(sinr_cosp, cosr_cosp) - - sinp = 2 * (w * y - z * x) - pitch = np.arcsin(sinp) - - siny_cosp = 2 * (w * z + x * y) - cosy_cosp = 1 - 2 * (y * y + z * z) - yaw = np.arctan2(siny_cosp, cosy_cosp) - - return roll, pitch, yaw - - def listener_callback(self, msg): - self.robot_current_control = [msg.linear.x, msg.angular.z] - - def stop_vehicle(self, ): - self.send_vel(0.0, 0.0) - - def wrap_to_pi(self, x): - x = np.array([x]) - xwrap = np.remainder(x, 2*np.pi) - mask = np.abs(xwrap)>np.pi - xwrap[mask] -= 2*np.pi * np.sign(xwrap[mask]) - return xwrap[0] - - def diff_drive_init(self, left_wheel_v, right_wheel_v, duration=5): - self.duration = duration - self.wL = left_wheel_v # Left wheel velocity - self.wR = right_wheel_v # Right wheel velocity - self.time_utilized = 0.0 - - def inter_point_diff_drive_init(self, duration=10, r_distance=1.3 - , refPose=np.array([3,6,0]), k_p=0.5, k_w=0.7, dmin=0.7): - - self.duration = duration - self.r_distance = r_distance - self.refPose = refPose - self.k_p = k_p - self.k_w = k_w - self.dmin = dmin - self.time_utilized = 0.0 - self.xT = self.refPose[0] - self.r_distance*np.cos(self.refPose[2]) - self.yT = self.refPose[1] - self.r_distance*np.sin(self.refPose[2]) - self.state = 0 - - def inter_direction_diff_drive_init(self, duration=10, r_distance=1.3 - , refPose=np.array([3,6,0]), k_p=0.5, k_w=0.7, dmin=0.7): - self.duration = duration - self.r_distance = r_distance - self.refPose = refPose - self.k_p = k_p - self.k_w = k_w - self.dmin = dmin - self.time_utilized = 0.0 - - def inter_direction_diff_drive(self, ): - if(self.robot_current_pose is not None): - if(self.duration < self.time_utilized): - self.get_logger().info(f'End of simulation', once=True) - self.stop_vehicle() - self.end_controller = True - return - - self.D = np.sqrt((self.robot_current_pose[0]-self.refPose[0])**2 - + (self.robot_current_pose[1]-self.refPose[1])**2) - - if(self.D < self.dmin): - self.get_logger().info(f'Reach to the goal pose', once=True) - self.stop_vehicle() - self.end_controller = True - return - - beta = np.arctan(self.r_distance/self.D) - phiR = np.arctan2(self.refPose[1]-self.robot_current_pose[1], self.refPose[0]-self.robot_current_pose[0]) - alpha = self.wrap_to_pi(phiR-self.refPose[2]) - - if(alpha <0): - beta = -beta - ##Controller - if(np.abs(alpha) < np.abs(beta)): - ePhi = self.wrap_to_pi(phiR - self.robot_current_pose[2] + alpha) - else: - ePhi = self.wrap_to_pi(phiR - self.robot_current_pose[2] + beta) - - v = self.k_p*self.D - w = self.k_w*ePhi - print("Distance to the goal: ", self.D) - dq = np.array([v*np.cos(self.robot_current_pose[2]+self.Ts*w/2) - , v*np.sin(self.robot_current_pose[2]+self.Ts*w/2), w]) - self.robot_current_pose = self.robot_current_pose + self.Ts*dq # Integration - self.robot_current_pose[2] = self.wrap_to_pi(self.robot_current_pose[2]) # Map orientation angle to [-pi, pi] - self.send_vel(v, w) - self.time_utilized = self.time_utilized + self.Ts - - def inter_point_diff_drive(self, ): - if(self.robot_current_pose is not None): - if(self.duration < self.time_utilized): - self.stop_vehicle() - self.get_logger().info(f'End of simulation', once=True) - self.end_controller = True - return - - self.D = np.sqrt((self.robot_current_pose[0]-self.refPose[0])**2 - + (self.robot_current_pose[1]-self.refPose[1])**2) - - if(self.D < self.dmin): - self.stop_vehicle() - self.get_logger().info(f'Reach to the goal pose', once=True) - self.end_controller = True - return - - if self.state == 0: - d = np.sqrt((self.yT-self.robot_current_pose[1])**2 - + (self.xT-self.robot_current_pose[0])**2) - if(d < self.dmin): - self.state = 1 - self.phiT = np.arctan2(self.yT-self.robot_current_pose[1], self.xT-self.robot_current_pose[0]) - self.ePhi = self.phiT - self.robot_current_pose[2] - else: - self.ePhi = self.refPose[2] - self.robot_current_pose[2] - - v = self.k_p*self.D - w = self.k_w*self.ePhi - print("Distance to the goal: ", self.D) - dq = np.array([v*np.cos(self.robot_current_pose[2]+self.Ts*w/2), v*np.sin(self.robot_current_pose[2]+self.Ts*w/2), w]) - self.robot_current_pose = self.robot_current_pose + self.Ts*dq # Integration - self.robot_current_pose[2] = self.wrap_to_pi(self.robot_current_pose[2]) # Map orientation angle to [-pi, pi] - self.send_vel(v, w) - self.time_utilized = self.time_utilized + self.Ts - - def perform_action_diff_drive_one_step(self): - if(self.robot_current_pose is not None): - if(self.duration < self.time_utilized): - self.stop_vehicle() - self.get_logger().info(f'End of simulation', once=True) - self.end_controller = True - return - v = self.r/2*(self.wR+self.wL) # Robot velocity - w = self.r/self.L*(self.wR-self.wL) # Robot angular velocity - dq = np.array([v*np.cos(self.robot_current_pose[2]+self.Ts*w/2), v*np.sin(self.robot_current_pose[2]+self.Ts*w/2), w]) - self.robot_current_pose = self.robot_current_pose + self.Ts*dq # Integration - self.robot_current_pose[2] = self.wrap_to_pi(self.robot_current_pose[2]) # Map orientation angle to [-pi, pi] - self.send_vel(v, w) - self.time_utilized = self.time_utilized + self.Ts - - def set_pose(self, msg): - _, _, yaw = self.euler_from_quaternion(msg.pose.pose.orientation) - if(self.set_robot_init_pose is None): - self.set_robot_init_pose = np.array([msg.pose.pose.position.x, msg.pose.pose.position.y, yaw]) - self.robot_current_pose = self.set_robot_init_pose - - def send_vel(self, v, w): - msg = Twist() - msg.linear.x = v - msg.angular.z = w - self.control_pub.publish(msg) - - def timer_callback(self, ): - self.perform_action_diff_drive_one_step() - # self.inter_direction_diff_drive() - # self.inter_point_diff_drive() - return - -def main(args=None): - rclpy.init(args=args) - control_strategy = ControlStrategy(delta_t=0.03) - control_strategy.diff_drive_init(0.2, 0.2, duration=20) - # control_strategy.inter_direction_diff_drive_init() - # control_strategy.inter_point_diff_drive_init() - - while rclpy.ok(): - try: - rclpy.spin_once(control_strategy) - except KeyboardInterrupt: - break - control_strategy.destroy_node() - rclpy.shutdown() - -if __name__ == '__main__': - main() - -# To reset ros2 service call /reset_simulation std_srvs/srv/Empty diff --git a/control_strategy/control_strategy/robot_control_strategy_todo.py b/control_strategy/control_strategy/robot_control_strategy_todo.py deleted file mode 100644 index 2c1926b..0000000 --- a/control_strategy/control_strategy/robot_control_strategy_todo.py +++ /dev/null @@ -1,126 +0,0 @@ -import rclpy -from rclpy.node import Node -from std_msgs.msg import String -from geometry_msgs.msg import Twist -from nav_msgs.msg import Odometry -import numpy as np - -import std_msgs.msg as std_msg -import rclpy.qos as qos - - -class ControlStrategy(Node): - def __init__(self, delta_t, ): - super().__init__('control_strategy') - self.control_pub = self.create_publisher(Twist, '/cmd_vel', 30) - self.control_sub = self.create_subscription(Twist, '/cmd_vel', self.listener_callback, 10) - self.odom_sub = self.create_subscription(Odometry, "/odom", self.set_pose, 20) - - self.i = 0 - self.set_robot_init_pose = None - self.robot_current_pose = None - self.robot_current_control = [] - self.r = 0.3 # Wheel radius - self.L = 1.25 # Axle length - self.D = 0.07 # Distance between the front front wheel and rear axle - self.Ts = delta_t # Sampling time - self.t = np.arange(0, 10, self.Ts) # Simulation time - self.end_controller = False - self.timer = self.create_timer(self.Ts, self.timer_callback) - - def euler_from_quaternion(self, quaternion): - """ - Converts quaternion (w in last place) to euler roll, pitch, yaw - quaternion = [x, y, z, w] - """ - x = quaternion.x - y = quaternion.y - z = quaternion.z - w = quaternion.w - - sinr_cosp = 2 * (w * x + y * z) - cosr_cosp = 1 - 2 * (x * x + y * y) - roll = np.arctan2(sinr_cosp, cosr_cosp) - - sinp = 2 * (w * y - z * x) - pitch = np.arcsin(sinp) - - siny_cosp = 2 * (w * z + x * y) - cosy_cosp = 1 - 2 * (y * y + z * z) - yaw = np.arctan2(siny_cosp, cosy_cosp) - - return roll, pitch, yaw - - def listener_callback(self, msg): - self.robot_current_control = [msg.linear.x, msg.angular.z] - - def stop_vehicle(self, ): - self.send_vel(0.0, 0.0) - - def wrap_to_pi(self, x): - x = np.array([x]) - xwrap = np.remainder(x, 2*np.pi) - mask = np.abs(xwrap)>np.pi - xwrap[mask] -= 2*np.pi * np.sign(xwrap[mask]) - return xwrap[0] - - def diff_drive_init(self, left_wheel_v, right_wheel_v, duration=5): - self.duration = duration - self.wL = left_wheel_v # Left wheel velocity - self.wR = right_wheel_v # Right wheel velocity - self.time_utilized = 0.0 - - - def perform_action_diff_drive_one_step(self): - if(self.robot_current_pose is not None): - if(self.duration < self.time_utilized): - self.stop_vehicle() - self.get_logger().info(f'End of simulation', once=True) - self.end_controller = True - return - - #TODO Estimate the desired robot linear and angular velocity - # v = - # w = - #TODO Estimate robot dynamics, the robot current pose (x_{t}) can be obtained by self.robot_current_pose - # dq = - #TODO Update the self.robot_current_pose, i.e., x_{t+1} = x_{t} + self.Ts*dq - #TODO Normalize the robot orientation [-pi, pi], you may use the self.wrap_to_pi(self.robot_current_pose[2]) - #TODO Send the control commands to via self.send_vel(v, w) - - self.time_utilized = self.time_utilized + self.Ts - - def set_pose(self, msg): - # Setting the current pose of the robot - _, _, yaw = self.euler_from_quaternion(msg.pose.pose.orientation) - if(self.set_robot_init_pose is None): - self.set_robot_init_pose = np.array([msg.pose.pose.position.x, msg.pose.pose.position.y, yaw]) - self.robot_current_pose = self.set_robot_init_pose - - def send_vel(self, v, w): - # TODO Control the robot's movement by specifying its linear and angular velocities - # on the /cmd_vel (self.control_pub) topic - pass - - def timer_callback(self, ): - # Use this timer callback to send the commands to robot - self.perform_action_diff_drive_one_step() - return - -def main(args=None): - rclpy.init(args=args) - control_strategy = ControlStrategy(delta_t=0.03) - # TODO Utilize the control_strategy.diff_drive_init function to set the angular velocities - # of the left and right side wheels of the robot - while rclpy.ok(): - try: - rclpy.spin_once(control_strategy) - except KeyboardInterrupt: - break - control_strategy.destroy_node() - rclpy.shutdown() - -if __name__ == '__main__': - main() - -# To reset ros2 service call /reset_simulation std_srvs/srv/Empty diff --git a/control_strategy/package.xml b/control_strategy/package.xml deleted file mode 100644 index a1402ed..0000000 --- a/control_strategy/package.xml +++ /dev/null @@ -1,29 +0,0 @@ - - - - control_strategy - 0.0.0 - Basic Diff drive robot control - op - TODO: License declaration - - geometry_msgs - rclcpp - sensor_msgs - - geometry_msgs - rclcpp - sensor_msgs - - ament_copyright - ament_flake8 - ament_pep257 - python3-pytest - - rclpy - std_msgs - - - ament_python - - diff --git a/control_strategy/resource/control_strategy b/control_strategy/resource/control_strategy deleted file mode 100644 index e69de29..0000000 diff --git a/control_strategy/setup.cfg b/control_strategy/setup.cfg deleted file mode 100644 index 7869238..0000000 --- a/control_strategy/setup.cfg +++ /dev/null @@ -1,4 +0,0 @@ -[develop] -script_dir=$base/lib/control_strategy -[install] -install_scripts=$base/lib/control_strategy diff --git a/control_strategy/setup.py b/control_strategy/setup.py deleted file mode 100644 index f2e0fae..0000000 --- a/control_strategy/setup.py +++ /dev/null @@ -1,26 +0,0 @@ -from setuptools import setup - -package_name = 'control_strategy' - -setup( - name=package_name, - version='0.0.0', - packages=[package_name], - data_files=[ - ('share/ament_index/resource_index/packages', - ['resource/' + package_name]), - ('share/' + package_name, ['package.xml']), - ], - install_requires=['setuptools'], - zip_safe=True, - maintainer='Geesara', - maintainer_email='ggeesara@gmail.com', - description='Basic Diff drive robot control', - license='TODO: License declaration', - tests_require=['pytest'], - entry_points={ - 'console_scripts': [ - 'control_strategy = control_strategy.robot_control_strategy:main', - ], - }, -)