diff --git a/image_tools_py/README b/image_tools_py/README new file mode 100644 index 000000000..42230ae69 --- /dev/null +++ b/image_tools_py/README @@ -0,0 +1,109 @@ +This is a demonstration of the Quality of Service (QoS) features of ROS 2 using Python. +There are two programs implemented here: cam2image_py, and showimage_py. Note that in +order for these programs to work, an OpenCV binding for Python3 must be available. As +of this writing (January 11, 2017), only OpenCV 3 or later supports Python3. Instructions +for compiling OpenCV3 for Python3 are available here: + +http://stackoverflow.com/questions/20953273/install-opencv-for-python-3-3 + +The condensed rundown that works on Ubuntu16.04 and will install to /usr/local is: +$ sudo apt install python3 build-essential cmake git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev python3.5-dev libpython3-dev python3-numpy libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libjasper-dev libdc1394-22-dev +$ git clone https://github.com/opencv/opencv +$ cd opencv +$ git checkout 3.2.0 +$ mkdir release +$ cd release +$ cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local .. +$ make -j8 +$ sudo make install + +CAM2IMAGE_PY +------------ +This is a Python program that will take data from an attached camera, and publish the +data to a topic called "image", with the type sensor_msgs::msg::Image. If a camera +isn't available, this program can also generate a default image and smoothly "move" +it across the screen, simulating motion. The usage output from the program looks like +this: + +usage: cam2image_py.py [-h] [-b] [-d QUEUE_DEPTH] [-f FREQUENCY] [-k {0,1}] + [-r {0,1}] [-s {0,1}] [-x WIDTH] [-y HEIGHT] + +optional arguments: + -h, --help show this help message and exit + -b, --burger Produce images of burgers rather than connecting to a + camera (default: False) + -d QUEUE_DEPTH, --depth QUEUE_DEPTH + Queue depth (default: 10) + -f FREQUENCY, --frequency FREQUENCY + Publish frequency in Hz (default: 30) + -k {0,1}, --keep {0,1} + History QoS setting, 0 - keep last sample, 1 - keep + all the samples (default: 1) + -r {0,1}, --reliability {0,1} + Reliability QoS setting, 0 - best effort, 1 - reliable + (default: 1) + -s {0,1}, --show {0,1} + Show the camera stream (default: 0) + -x WIDTH, --width WIDTH + Image width (default: 320) + -y HEIGHT, --height HEIGHT + Image height (default: 240) + +The -d, -k, and -r parameters control various aspects of the QoS implementation, and +are the most interesting to play with when testing out QoS. + +Note that this program also subscribes to a topic called "flip_image" of type +std_msgs::msg::Bool. If flip_image is set to False, the data coming out of the camera +is sent as usual. If flip_image is set to True, the data coming out of the camera is +flipped around the Y axis. + +If the -s parameter is set to 1, then this program opens up a (local) window to show +the images that are being published. However, these images are *not* coming in through +the ROS 2 pub/sub model, so this window cannot show off the QoS parameters (it is mostly +useful for debugging). See SHOWIMAGE_PY below for a program that can show QoS over the +pub/sub model. + +SHOWIMAGE_PY +------------ +This is a Python program that subscribes to the "image" topic, waiting for data. As +new data comes in, this program accepts the data and can optionally render it to +the screen. The usage output from the program looks like this: + +usage: showimage_py.py [-h] [-d QUEUE_DEPTH] [-k {0,1}] [-r {0,1}] [-s {0,1}] + +optional arguments: + -h, --help show this help message and exit + -d QUEUE_DEPTH, --depth QUEUE_DEPTH + Queue depth (default: 10) + -k {0,1}, --keep {0,1} + History QoS setting, 0 - keep last sample, 1 - keep + all the samples (default: 1) + -r {0,1}, --reliability {0,1} + Reliability QoS setting, 0 - best effort, 1 - reliable + (default: 1) + -s {0,1}, --show {0,1} + Show the camera stream (default: 0) + +The -d, -k, and -r parameters control various aspects of the QoS implementation, and +are the most interesting to play with when testing out QoS. + +If the -s parameter is set to 1, then this program opens up a window to show the images +that have been received over the ROS 2 pub/sub model. This program should be used +in conjunction with CAM2IMAGE_PY to demonstrate the ROS 2 QoS capabilities over lossy/slow +links. + +EXAMPLE USAGE +------------- +To use the above programs, you would run them something like the following: + +# In the first terminal, run the data publisher. This will connect to the 1st camera +# available, and print out "Publishing image #" for each image it publishes. +$ python3 cam2image_py.py + +# In a second terminal, run the data subscriber. This will subscribe to the "image" +# topic and render any frames it receives. +$ python3 showimage_py.py -s 1 + +# If you don't have a local camera, you can use the -b parameter to generate data on +# the fly rather than get data from a camera: +$ python3 cam2image_py.py -b diff --git a/image_tools_py/burger_py.py b/image_tools_py/burger_py.py new file mode 100644 index 000000000..dc29e985b --- /dev/null +++ b/image_tools_py/burger_py.py @@ -0,0 +1,109 @@ +# Copyright 2017 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# System imports +import base64 +import random + +# OpenCV +import cv2 + +# Numpy +import numpy + +# THE FOLLOWING IS A BURGER IN BASE64. RESPECT IT + +BURGER_BASE64 = 'iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAYAAACqaXHeAAAABmJLR0QA/wD/AP+gvaeTAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH4AUYFx8cR++mBgAAABl0RVh0Q29tbWVudABDcmVhdGVkIHdpdGggR0lNUFeBDhcAABCPSURBVHja7Vt3UFTX235YtrC7FCkioKyKBWNBo6Aw1iEYx5IYQwxGY4xoLAQdRUej/qyDYxRiA0siYyZGoyFBUYwaRQWCJhAMBHtBBAxIF5Zl2WV3n++Pb7jjBmyA/vy+5J05A/ecc0957nnPW9cCAPEPJhH+4fQvAP8C8A8n8UudTCyGTCZ7bDtJmEwm6PV6mEym/x8AeHp6YsiQIRg4cCBUKhWsrKxA8qkA6HQ6PHz4EEVFRcjLy8Pt27dx5coVlJaWtvoa2dqlZ8+ejIiIYE5ODquqqlhXV8fmksFgoE6nY01NDSsrK5mTk8OdO3dy2LBhrbJWi9bQA0QiEezs7DB58mSEhYXBw8NDaDMYDNDr9dDr9dBqtcjJyUFubi6Ki4tRXV2Nuro6kIRMJoO1tTUcHR3h4uIClUoFV1dXSCQSiMVioUgkEmFsjUaDXbt2YefOnbh//z7q6+ufe+0tBqBLly6YMmUKFixYAHt7e6H+1q1buHnzJtLS0pCeno6srKwnHl8XFxc4OTlBp9Ph7t27MBqNAAAbGxu4uLigS5cu8PT0hLe3N3r27IlevXqZ3ScxMTHYs2cP0tPTXx4LhISE8MaNG2ZHNjY2lhMnTmS3bt0e+16/fv24cuVK9uvXT6jr06cPL168yKNHj3Lz5s1CvUwm4+LFi7lkyRL6+PgQAF1dXRkQEMAvvviCDx48EOYuKirimjVrnncfzeAbCwtGR0eb8fa+ffvYuXNnWllZmfUVi8VmdQ4ODkxPT+eWLVsYHx9v1rdt27YcMWIEnZ2dzeYaO3YsN2zYwLi4uEZjOzo6ct68eVSr1STJ+vp6fvnllxSJRC8GALFYzPDwcOGCSkpKooeHR6N+MpmMS5YsYXJyMjMzM6lSqQiAEomE7733HmfMmEG5XN7oPblczhMnTlCr1TI4OLhJ8Jtal0ql4rVr14QPMn369BcDwLRp00iSOp2On3322WP7BQcHMzIykqGhoZwxY0ajdnd3dw4ePJg+Pj60tLQU6hUKBVetWsVNmzZx8ODBQn2bNm04fPhwDh482OyEPFpsbGx46dIlkuTVq1dpbW3dugB07NhROPbLli0T6jt37sxVq1axe/fuQp1SqaSvry87dOjQaJyJEycyOTmZ586d47lz55qcS6FQmD0vX76c2dnZjImJobu7+2PX2L17dz58+JAkOWnSpNYF4KeffiJJnj592uz4zp49m3PmzGFKSgo7depk9s7YsWMZFRXF3r17C3WjRo1ifHw8p06d+qxfiTY2NnR3d6eNjc0T+4lEIq5Zs4YkmZub23oA9OnThzU1NaypqeHIkSPN2rp27cqEhAQmJSXRyclJ4NXjx4/zyJEjTE5O5tixY584/uDBg3nkyBEmJCRw7dq17Ny5c7Ol09ChQ1lVVUWSZsC3CIBt27aRJH/77bdn6t+rVy8eOHCAW7duFcTXoxfdo892dnaMj4/nnj17OGbMGM6fP592dnbNBkClUvHPP/9sxKrNBsDNzY1//PEHSfKDDz5okeo5a9Ysbt26lcHBwZRIJGYisHfv3uzSpUuL1Vt7e3umpqaSJA8ePNhyAPz9/VlTU0OSzyNfmyx+fn7MzMxkRkYG7e3t+SJsEWtra54/f54kH3vJCmL9WVTFDh06QKlUIiMjo8Vm6q+//ooRI0ZAIpGgsrLyhVmhFhYWAICqqqqWmcMWFhZwdXUFAJw4caJVFve0RbWG38HS0hIAkJ2d3TKPkEgkgq2tLQDg2rVr/ye8PFZWVrCysgIAZGZmttwl1mCCNlhorzq1adMGdnZ2AIC0tLSWA/Cy3FOtRV26dEGnTp0AAAMGDIBIJGo+ACRRW1sr/P/KOznFYkyfPl04tQkJCYiKioJSqWweACaTCTU1NQAAqVT6ygMQFhaGwMBAhIeHY+LEiSgsLERISAgCAwOb5xD55JNPBAOooKDghcjt1irr16+nyWRifHw8ZTIZAbCoqIgkuWDBgudThKytrRkbG0uSNBqNLCgoIEmeOHHildq0SCSiSqViQkICSTI9PV3YPADeuXOHJDllypRnB8DDw4OZmZkkyerqan7++ee0tLQU7IFjx47R39+/kcn6MotcLqevry/Dw8NZW1tLjUbDmJgYM/UaAJ2dnalWq7l9+3aKxeKnAyCVSrljxw6SZGVlJf39/c3QXrt2LUmytraW58+f54oVK+jp6fnCN2xpacm+ffty2rRp3LZtG8+dO0eNRkOSjImJoa+vb6N3QkNDeebMGep0OkZHRzcCp0kA2rRpI/DNwIEDBTeWt7e3YOYGBATw/v37gltMrVbz0qVLDAsLY/v27Vtlw3369OHMmTMZExPDK1eusLy8nNXV1dTr9SRJvV7PLVu2sEOHDk1+2dmzZwsAkeTGjRufLS7g5OSE0tJS6HQ6nD59Gt27d4enpycA4OHDhxg6dCiuXLkCAJgzZw4WLlwINzc3WFlZQSz+X826pKQESUlJyMvLe6wOQVLQ1y0tLeHk5IROnTqhR48ecHFxaSSJ6urqoNfrkZWVhZiYGBw8ePCxY48cORJxcXGoq6tDaWkpevbsCQA4c+YMgoKCzGyQRgA4ODigpKRE0KX/TjqdDosWLUJSUhLUajUkEglsbW3h5+cHLy8vtGvXDnZ2dnBwcIC1tTUUCgWsrKwglUohlUohFouFjRuNRuh0OtTV1UGr1aK2thYajQYajQZarRZarRZqtRo3btzA77//jtTUVEEkW1paCoA3AGpnZ4dhw4Zhx44d0Gq1GDNmDO7cuYPdu3fjww8/hFQqRVFREaZOnYqzZ882DYBYLMbs2bMRFhaGv/76C2q1GmPGjAEA5OfnQ6VSAQAKCgqg0+kglUrxzTffYNWqVWa6uK2tLRQKRSMALC0tIRKJYGFhAYPBIADQ8LcBDK1Wi7q6uiY/wvvvv48ZM2YIQD4KwKBBg5CVlYWgoCDcunVLaP/444+xc+dOyOVypKenY+zYsSgrK2taD7C0tKS1tTVdXV1ZXFxMkoyIiGBkZKTAU2VlZayvr38qj7V2GTBgwBNjiSdPnmzkZ5RKpdy/fz+NRiNJ8sKFC7S1tX2yHuDg4MCcnByS5KFDhwiA8+fPF5yij960JSUlJMlFixa1SJ5LJBJaWVlRLpdToVBQoVBQLpebOWEWLVok6CZVVVWsqqpidXW1AMDatWvNxnVxceHZs2dJkiaTiZcvXzbzVDfpD2jXrh3i4uLg4eGBQ4cO4aOPPjKzrLy9vTF37lykpqYiOjoa0dHROHDgACIjI6FQKLBjxw5UVFQ8UWVVKpXo2rUr3Nzc4OLiAnd3d7Rt2xb29vZQKpWQyWQQi8VQq9XIzMxEYWEhDAYDhg4dKpi5kyZNgo+PD6KiooRL1dvbW5hj3Lhx2LVrFzp06ACNRoP9+/cjNDQUBoPh8aqwlZUVExMTSZLbt2+nVCo1ay8sLCRJ1tXVMS8vj3v37uXMmTO5fPly/vLLLyTJw4cP88cff+Tu3bu5efNmrlixgm5ubkJkafHixTx//jzz8vLM2Oh56M6dO/z000+FQEgDNfgaHw3d3bx5k+PGjXs2TbDBmbhx48Ym5evo0aPNJjQajdTpdNRoNNTpdDSZTIL/sOHYGQwGlpeXMyoqivfv32/2pv9ODTrBo3OJxWLu37/fLFj7CL8/GYBTp04JAcaUlBSGhISwffv2tLa2NgtfLViwwGyTrwrNmjWLGzZsEJ6vXr36fIGR7777rsmB09PTGR4eznHjxgnhLy8vL0ZFRTErK+u/vvFLly5x6tSp/M9//iOo8OXl5SRp9uGeCoBSqWRgYCCjoqKYnZ3d5GS3bt3i0aNHGRYWxu7du9Pe3p7Lli0zi9O/DLp27Rp37drFd999l05OTpw7dy6NRiONRiMnTJjAH374gSQZEBDw/HEBsVhMhUJBb29vgSVOnz7NlJQUQeQYDAZWV1czJyeHERER7NmzJz09Pblw4UKmpKS06mb1ej0TExO5evVq+vv7s23btrS1tRUuaH9/fyEU1hC42bRpE0kyPDy8+YERkUgkbPbReODbb7/NsrKyRgtNSkriO++8Q5lMRplMRm9vb4aGhvLrr7/mpUuXWFxczPLyclZWVgqlvLycZWVlLC0t5f3793n58mUmJSUxNjaW69ato5+f31OjVnl5eY2cHitXrqTJZOLx48ebHxgxmUwgKRgrDXTq1ClkZWXhjTfegF6vR0JCAhwdHWEymbBnzx44ODggLS0NSUlJuHjxIs6ePYs7d+4ISUxt2rSBXC6HwWAQ9P/mkLW1NQ4fPgyVSoXIyEhs3brVzDNsYWEBuVzeshyh69evkyQ3bdpkdqGMGjWKNTU1NJlMXLp0qRDMfO2111hcXMy9e/cKcfoHDx4wPT2d33//PcPCwjho0KBWUYvj4uIEUff3gGt8fDxJ8ttvv21ZbLDBOXLv3r1GOra7uzvT0tIEkbN582bGx8ezvLycnp6e/Pnnn5vkZ7VazZKSEh47dozBwcFCSP15yrJly2g0Gpmdnd1IWevRowdv375Nkpw3b17LAOjXr5+ZnG2qz9y5c1lQUMC6urpmKzm3bt3ixo0b6efnR1tbW8rl8sfmA/Xv35+FhYVUq9VUKpWN2hvsBZJ0dXVtWaKkVCrFoUOHMGHCBBgMBvTv3x+XL19u0h/fv39/eHl5YeTIkejatStsbW0hkUhgYWEBo9GI+vp6aLVaVFVVIS8vDwqFAt26dUO3bt3MeLW6uhrJyclISUlBdnY2cnNzkZOTIzhADh48iEmTJmHkyJFITEw0W4ePjw8SExNha2uLY8eOYfz48S3PExwyZAgrKioEvfpZfIBubm7s0aMHvby82LdvX/bu3Zuenp5UqVRmaS4qlYoBAQFcunQpExMTaTKZzE6GTqfj9evXefLkSa5bt46hoaEkyVWrVjWa09fXV5AIarXaLGepxRkikZGRwuKuXr36rIM/lzmsVCrp7OzMoKAgnjhx4rF5w6dOnWp06YWEhAgfiWSTmWktTpI6d+6cMEF5eTkDAgJeuGs8MDCQR44cYUlJCWtra5mfny/wtY2NDceMGSO48ElSo9Fw9erVLy5TdN++fWYZoufPn2dwcDD79u1rFpB4ESUgIIBvvfUW/fz8uH79eubm5grrqK2tZWpqKoOCgl58quzo0aPNTE6SzM/P55kzZxgREcHx48ezXbt2rbZxe3t7BgYGMjo6mhcuXDCzRCsqKrht2zaOGjXqqSl0rZouL5PJ0LZtW8yfPx8hISFC9NVkMkGn00Gv1+Phw4fIyMhAdnY2cnJy8ODBAxQWFuLu3bvQ6XSPHdvR0RGDBg3C0KFD8eabb6Jr166QSCRmkiIjIwMRERE4deoUtFpts1LlWyVdvoEmT56MpUuXwsPDAzKZzCyvvymqr6/HvXv3UFpaivr6eiiVSjg7Owte50ep4fcGpaWliI2NxVdffYW7d++2Ti5RawHQQK+//jqGDx8Ob29vdOzYES4uLnB2dhbSbJ4lHF9eXo6ysjI8ePAA+fn5SEtLQ0pKCq5evdr6yVStDcCjZG9vLwDg4OAAZ2dnAQyZTCb8Pkij0aCyshKlpaWoqqpCRUUFSktLUVRUJCRnvLBsshcJwOOSrv6esmIymf5raTgvHYBXjf795ei/APzD6X8AnLiHel4uyPcAAAAASUVORK5CYII=' # noqa + + +class Burger(object): + + def __init__(self): + burger_png = base64.b64decode(BURGER_BASE64) + np_img = numpy.fromstring(burger_png, dtype=numpy.uint8) + self.burger_template = cv2.imdecode(np_img, cv2.IMREAD_COLOR) + + # We flood fill the burger template with "1,1,1" (BGR) so that we can + # remove the borders during rendering. + h, w = self.burger_template.shape[:2] + mask = numpy.zeros((h + 2, w + 2), numpy.uint8) + cv2.floodFill(self.burger_template, mask, (1, 1), (1, 1, 1)) + + random.seed() + + self.width = 0 + self.height = 0 + self.num_burgers = 0 + self.burger_list = [] + + def render_burger(self, width, height): + # The basic idea here is to render a number of burgers into a OpenCV mat, + # moving them on each successful iteration. So when the requested + # resolution changes (this includes the very first time we are called), + # we generate a random number of burgers, at random starting locations, + # moving at random speeds, and store that list of burgers in the object. + # We then render the burgers onto the mat and return it. On subsequent + # render_burger() method calls, we keep the same list of burgers from + # before, but move them according to their X and Y speed, "bouncing" them + # off of the side of the mat if they run into it. + class OneBurger(object): + + def __init__(self, x, y, x_inc, y_inc): + self.x = x + self.y = y + self.x_inc = x_inc + self.y_inc = y_inc + + if self.width != width or self.height != height: + num_burgers = random.randrange(2, 10) + width_max = width - self.burger_template.shape[1] - 1 + height_max = height - self.burger_template.shape[0] - 1 + for b in range(0, num_burgers): + x = random.randrange(0, width_max) + y = random.randrange(0, height_max) + x_inc = random.randrange(1, 3) + y_inc = random.randrange(1, 3) + self.burger_list.append(OneBurger(x, y, x_inc, y_inc)) + self.width = width + self.height = height + + # We want an OpenCV Mat with CV_8UC3, which is 3 channels + burger_buf = numpy.zeros((height, width, 3), numpy.uint8) + + # TODO(clalancette): This is the slow way to do this, in that we iterate + # over every pixel by hand, looking for the flood fill and thus whether + # we should render that pixel. However, I was not able to figure out the + # OpenCV python calls to do this in a nicer way, so we leave this for now. + for b in self.burger_list: + for y in range(0, self.burger_template.shape[0]): + for x in range(0, self.burger_template.shape[1]): + bitem = self.burger_template.item(y, x, 0) + gitem = self.burger_template.item(y, x, 1) + ritem = self.burger_template.item(y, x, 2) + if bitem != 1 or gitem != 1 or ritem != 1: + burger_buf.itemset((b.y + y, b.x + x, 0), bitem) + burger_buf.itemset((b.y + y, b.x + x, 1), gitem) + burger_buf.itemset((b.y + y, b.x + x, 2), ritem) + b.x += b.x_inc + b.y += b.y_inc + + # Bounce if needed + if b.x < 0 or b.x > (width - self.burger_template.shape[1] - 1): + b.x_inc *= -1 + b.x += 2 * b.x_inc + if b.y < 0 or b.y > (height - self.burger_template.shape[0] - 1): + b.y_inc *= -1 + b.y += 2 * b.y_inc + + return burger_buf diff --git a/image_tools_py/cam2image_py.py b/image_tools_py/cam2image_py.py new file mode 100644 index 000000000..d6990b811 --- /dev/null +++ b/image_tools_py/cam2image_py.py @@ -0,0 +1,195 @@ +# Copyright 2017 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# System imports +import argparse +import sys + +# Local imports +import burger_py + +# OpenCV imports +import cv2 + +# ROS2 imports +import rclpy +from rclpy.qos import qos_profile_default, qos_profile_sensor_data +import sensor_msgs.msg +import std_msgs.msg + + +# Convert an OpenCV matrix encoding type to a string format recognized by +# sensor_msgs::Image +def mat2encoding(frame): + encoding = '' + + encodings = {1: 'mono', 3: 'bgr', 4: 'rgba'} + for channels, prefix in encodings.items(): + if frame.shape[2] == channels: + encoding += prefix + break + else: + raise ValueError('Unsupported frame shape %d' % (frame.shape[2])) + + types = {'uint8': '8', 'int16': '16'} + for dtype, num in types.items(): + if frame.dtype == dtype: + encoding += num + break + else: + raise ValueError('Unsupported frame type ' + frame.dtype) + + return encoding + + +# Convert an OpenCV matrix to a ROS Image message. +def convert_frame_to_message(frame, frame_id, msg): + msg.height = frame.shape[0] + msg.width = frame.shape[1] + msg.encoding = mat2encoding(frame) + msg.data = frame.data.tobytes() + msg.step = int(len(msg.data) / msg.height) + msg.header.frame_id = str(frame_id) + + +def main(args=None): + # Pass command-line arguments to rclpy. + rclpy.init(args=args) + + # Parse the command-line options. + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument( + '-b', '--burger', dest='burger_mode', action='store_true', default=False, + help='Produce images of burgers rather than connecting to a camera') + parser.add_argument( + '-d', '--depth', dest='depth', action='store', default=qos_profile_default.depth, type=int, + help='Queue depth') + parser.add_argument( + '-f', '--frequency', dest='frequency', action='store', default=30, type=int, + help='Publish frequency in Hz') + parser.add_argument( + '-k', '--keep', dest='history_policy', action='store', default=qos_profile_default.history, + type=int, choices=[0, 1], + help='History QoS setting, 0 - keep last sample, 1 - keep all the samples') + parser.add_argument( + '-r', '--reliability', dest='reliability_policy', action='store', + default=qos_profile_default.reliability, type=int, choices=[0, 1], + help='Reliability QoS setting, 0 - best effort, 1 - reliable') + parser.add_argument( + '-s', '--show', dest='show_camera', action='store', default=0, type=int, choices=[0, 1], + help='Show the camera stream') + parser.add_argument( + '-x', '--width', dest='width', action='store', default=320, type=int, + help='Image width') + parser.add_argument( + '-y', '--height', dest='height', action='store', default=240, type=int, + help='Image height') + args = parser.parse_args() + + # Initialize a ROS 2 node to publish images read from the OpenCV interface to + # the camera. + node = rclpy.create_node('cam2imagepy') + + # Set the parameters of the quality of service profile. Initialize as the + # default profile and set the QoS parameters specified on the command line. + custom_camera_qos_profile = qos_profile_default + + # Depth represents how many messages to store in history when the history policy is KEEP_LAST + custom_camera_qos_profile.depth = args.depth + + # The reliability policy can be reliable, meaning that the underlying transport layer will try + # ensure that every message gets received in order, or best effort, meaning that the transport + # makes no guarantees about the order or reliability of delivery. + custom_camera_qos_profile.reliability = args.reliability_policy + + # The history policy determines how messages are saved until the message is taken by the reader + # KEEP_ALL saves all messages until they are taken. + # KEEP_LAST enforces a limit on the number of messages that are saved, specified by the "depth" + # parameter. + custom_camera_qos_profile.history = args.history_policy + + # Create the image publisher with our custom QoS profile. + pub = node.create_publisher( + sensor_msgs.msg.Image, 'image', + qos_profile=custom_camera_qos_profile) + + is_flipped = False + + msg = sensor_msgs.msg.Image() + msg.is_bigendian = False + + def flip_image_cb(msg): + nonlocal is_flipped + + is_flipped = msg.data + + output = 'on' if is_flipped else 'off' + + print('Set flip mode to: ' + output) + + custom_flip_qos_profile = qos_profile_sensor_data + custom_flip_qos_profile.depth = 10 + node.create_subscription( + std_msgs.msg.Bool, 'flip_image', flip_image_cb, qos_profile=custom_flip_qos_profile) + + if args.burger_mode: + burger_cap = burger_py.Burger() + else: + # Initialize OpenCV video capture stream. Always open device 0. + cam_cap = cv2.VideoCapture(0) + + # Set the width and height based on command-line arguments. + cam_cap.set(cv2.CAP_PROP_FRAME_WIDTH, args.width) + cam_cap.set(cv2.CAP_PROP_FRAME_HEIGHT, args.height) + if not cam_cap.isOpened(): + print('Could not open video stream') + sys.exit(1) + + # Our main event loop will spin until the user presses CTRL-C to exit. + frame_number = 1 + while rclpy.ok(): + # Get the frame from the video capture. + frame = None + if args.burger_mode: + frame = burger_cap.render_burger(args.width, args.height) + else: + ret, frame = cam_cap.read() + + # Check if the frame was grabbed correctly. + if frame is not None: + # Convert to a ROS image + if is_flipped: + # Flip the frame if needed + flipped_frame = cv2.flip(frame, 1) + convert_frame_to_message(flipped_frame, frame_number, msg) + else: + convert_frame_to_message(frame, frame_number, msg) + + if args.show_camera == 1: + # Show the image in a window called 'cam2image_py', if requested. + cv2.imshow('cam2image_py', frame) + # Draw the image to the screen and wait 1 millisecond + cv2.waitKey(1) + + # Publish the image message and increment the frame_id. + print('Publishing image #%d' % (frame_number)) + pub.publish(msg) + frame_number += 1 + + # Do some work in rclpy and wait for more to come in. + rclpy.spin_once(node, timeout_sec=1.0 / float(args.frequency)) + + +if __name__ == '__main__': + main() diff --git a/image_tools_py/package.xml b/image_tools_py/package.xml new file mode 100644 index 000000000..f5ba062b6 --- /dev/null +++ b/image_tools_py/package.xml @@ -0,0 +1,26 @@ + + + + image_tools_py + 0.0.0 + Python tools to capture/play back images to/from DDS subscriptions/publications. + Chris Lalancette + Apache License 2.0 + Chris Lalancette + + python3-opencv + rclpy + std_msgs + sensor_msgs + + + ament_copyright + ament_pep257 + ament_pep8 + ament_pyflakes + + + ament_python + + diff --git a/image_tools_py/resource/image_tools_py b/image_tools_py/resource/image_tools_py new file mode 100644 index 000000000..e69de29bb diff --git a/image_tools_py/setup.cfg b/image_tools_py/setup.cfg new file mode 100644 index 000000000..3778bb622 --- /dev/null +++ b/image_tools_py/setup.cfg @@ -0,0 +1,4 @@ +[develop] +script-dir=$base/lib/image_tools_py +[install] +install-scripts=$base/lib/image_tools_py diff --git a/image_tools_py/setup.py b/image_tools_py/setup.py new file mode 100644 index 000000000..6a9f95949 --- /dev/null +++ b/image_tools_py/setup.py @@ -0,0 +1,41 @@ +from setuptools import setup + +package_name = 'image_tools_py' + +setup( + name=package_name, + version='0.0.0', + packages=[], + py_modules=[ + 'burger_py', + 'cam2image_py', + 'showimage_py', + ], + data_files=[ + ('share/ament_index/resource_index/packages', + ['resource/' + package_name]), + ('share/' + package_name, ['package.xml']), + ], + install_requires=['setuptools'], + zip_safe=True, + author='Chris Lalancette', + author_email='clalancette@osrfoundation.org', + maintainer='Chris Lalancette', + maintainer_email='clalancette@osrfoundation.org', + keywords=['ROS'], + classifiers=[ + 'Intended Audience :: Developers', + 'License :: OSI Approved :: Apache Software License', + 'Programming Language :: Python', + 'Topic :: Software Development', + ], + description='Python tools to capture/play back images to/from DDS subscriptions/publications.', + license='Apache License, Version 2.0', + tests_require=['pytest'], + entry_points={ + 'console_scripts': [ + 'cam2image_py = cam2image_py:main', + 'showimage_py = showimage_py:main', + ], + }, +) diff --git a/image_tools_py/showimage_py.py b/image_tools_py/showimage_py.py new file mode 100644 index 000000000..7c2aeeaa4 --- /dev/null +++ b/image_tools_py/showimage_py.py @@ -0,0 +1,123 @@ +# Copyright 2017 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# System imports +import argparse + +# OpenCV imports +import cv2 + +# Numpy imports +import numpy + +# ROS2 imports +import rclpy +from rclpy.qos import qos_profile_default +import sensor_msgs.msg + + +def encoding2mat(encoding): + encodings = {'mono': 1, 'bgr': 3, 'rgba': 4} + for prefix, channels in encodings.items(): + if encoding.startswith(prefix): + break + else: + raise ValueError('Unsupported encoding ' + encoding) + + types = {'8': 'uint8', '16': 'int16'} + for prefix, dtype in types.items(): + if encoding[channels:] == prefix: + break + else: + raise ValueError('Unsupported encoding ' + encoding) + + return dtype, channels + + +def main(args=None): + # Pass command-line arguments to rclpy. + rclpy.init(args=args) + + # Parse the command-line options. + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument( + '-d', '--depth', dest='depth', action='store', default=qos_profile_default.depth, type=int, + help='Queue depth') + parser.add_argument( + '-k', '--keep', dest='history_policy', action='store', default=qos_profile_default.history, + type=int, choices=[0, 1], + help='History QoS setting, 0 - keep last sample, 1 - keep all the samples') + parser.add_argument( + '-r', '--reliability', dest='reliability_policy', action='store', + default=qos_profile_default.reliability, type=int, choices=[0, 1], + help='Reliability QoS setting, 0 - best effort, 1 - reliable') + parser.add_argument( + '-s', '--show', dest='show_camera', action='store', default=1, type=int, choices=[0, 1], + help='Show the camera stream') + args = parser.parse_args() + + if args.show_camera == 1: + cv2.namedWindow('showimage_py') + + # Initialize a ROS 2 node to subscribe to images read from the OpenCV interface to + # the camera. + node = rclpy.create_node('showimagepy') + + custom_qos_profile = qos_profile_default + + # Depth represents how many messages to store in history when the history policy is KEEP_LAST. + custom_qos_profile.depth = args.depth + + # The reliability policy can be reliable, meaning that the underlying transport layer will try + # ensure that every message gets received in order, or best effort, meaning that the transport + # makes no guarantees about the order or reliability of delivery. + custom_qos_profile.reliability = args.reliability_policy + + # The history policy determines how messages are saved until the message is taken by the reader + # KEEP_ALL saves all messages until they are taken. + # KEEP_LAST enforces a limit on the number of messages that are saved, specified by the "depth" + # parameter. + custom_qos_profile.history = args.history_policy + + def image_cb(msg): + print('Received image #' + msg.header.frame_id) + + if args.show_camera: + dtype, n_channels = encoding2mat(msg.encoding) + dtype = numpy.dtype(dtype) + dtype = dtype.newbyteorder('>' if msg.is_bigendian else '<') + if n_channels == 1: + frame = numpy.ndarray( + shape=(msg.height, msg.width), + dtype=dtype, + buffer=bytes(msg.data)) + else: + frame = numpy.ndarray( + shape=(msg.height, msg.width, n_channels), + dtype=dtype, + buffer=bytes(msg.data)) + + cv2.imshow('showimage_py', frame) + # Draw the image to the screen and wait 1 millisecond + cv2.waitKey(1) + + node.create_subscription( + sensor_msgs.msg.Image, 'image', image_cb, qos_profile=custom_qos_profile) + + while rclpy.ok(): + rclpy.spin_once(node) + + +if __name__ == '__main__': + main() diff --git a/image_tools_py/test/test_copyright.py b/image_tools_py/test/test_copyright.py new file mode 100644 index 000000000..06965244d --- /dev/null +++ b/image_tools_py/test/test_copyright.py @@ -0,0 +1,23 @@ +# Copyright 2017 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ament_copyright.main import main +import pytest + + +@pytest.mark.copyright +@pytest.mark.linter +def test_copyright(): + rc = main(argv=['.']) + assert rc == 0, 'Found errors' diff --git a/image_tools_py/test/test_pep257.py b/image_tools_py/test/test_pep257.py new file mode 100644 index 000000000..c0b87bd81 --- /dev/null +++ b/image_tools_py/test/test_pep257.py @@ -0,0 +1,23 @@ +# Copyright 2017 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ament_pep257.main import main +import pytest + + +@pytest.mark.linter +@pytest.mark.pep257 +def test_pep257(): + rc = main(argv=['.']) + assert rc == 0, 'Found code style errors / warnings'