-
Notifications
You must be signed in to change notification settings - Fork 3
/
lab2_example.py
executable file
·87 lines (66 loc) · 2.87 KB
/
lab2_example.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Image
import cv2
import numpy as np
from intro_to_robotics.image_converter import ToOpenCV, depthToOpenCV
#this function does our image processing
#returns the location and "size" of the detected object
def process_image(image):
#convert color space from BGR to HSV
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
#create bounds for our color filter
lower_bound = np.array([0, 10, 10])
upper_bound = np.array([10,255,255])
#execute the color filter, returns a binary black/white image
mask = cv2.inRange(hsv_image, lower_bound, upper_bound)
#display the results of the color filter
cv2.imshow("image_mask", mask)
#calculate the centroid of the results of the color filer
M = cv2.moments(mask)
location = None
magnitude = 0
if M['m00'] > 0:
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
magnitude = M['m00']
location = (cx-320, cy-240) #scale so that 0,0 is center of screen
#draw a circle image where we detected the centroid of the object
cv2.circle(image, (cx,cy), 3, (0,0,255), -1)
#display the original image with the centroid drawn on the image
cv2.imshow("processing result", image)
#waitKey() is necessary for making all the cv2.imshow() commands work
cv2.waitKey(1)
return location, magnitude
class Node:
def __init__(self):
#register a subscriber callback that receives images
self.image_sub = rospy.Subscriber('/camera/rgb/image_raw', Image, self.image_callback, queue_size=1)
#create a publisher for sending commands to turtlebot
self.movement_pub = rospy.Publisher('cmd_vel_mux/input/teleop', Twist, queue_size=1)
#this function wll get called every time a new image comes in
#all logic occurs in this function
def image_callback(self, ros_image):
# convert the ros image to a format openCV can use
cv_image = np.asarray(ToOpenCV(ros_image))
#run our vision processing algorithm to pick out the object
#returns the location (x,y) of the object on the screen, and the
#"size" of the discovered object. Size can be used to estimate
#distance
#None/0 is returned if no object is seen
location, magnitude = process_image(cv_image)
#log the processing results
rospy.logdebug("image location: {}\tmagnitude: {}".format(location, magnitude))
###########
# Insert turtlebot controlling logic here!
###########
cmd = Twist()
#publish command to the turtlebot
self.movement_pub.publish(cmd)
if __name__ == "__main__":
rospy.init_node("lab2_example")
node = Node()
#this function loops and returns when the node shuts down
#all logic should occur in the callback function
rospy.spin()