Skip to content

Commit c8c91a4

Browse files
authored
Initial TurtleBot subscriber demo (#1523)
* Initial TurtleBot subscriber demo * Standard example arg-parsing * Simplify TF lookups by storing a map * Use qualified calls for rerun_urdf for clarity * Add comment about why we're patching points.fields * Also log scalars for linear and angular velocity * Links to new github issues
1 parent 3566bc9 commit c8c91a4

File tree

5 files changed

+425
-1
lines changed

5 files changed

+425
-1
lines changed

.mypy.ini

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[mypy]
22
files = rerun_py/rerun_sdk/rerun, rerun_py/tests, examples/python
3-
exclude = examples/python/objectron/proto
3+
exclude = examples/python/objectron/proto|examples/python/ros
44
namespace_packages = True
55
show_error_codes = True
66
strict = True

examples/python/ros/README.md

+47
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
# Overview
2+
3+
A minimal example of creating a ROS node that subscribes to topics and converts the messages to rerun log calls.
4+
5+
The solution here is mostly a toy example to show how ROS concepts can be mapped to Rerun. Fore more information on
6+
future improved ROS support, see the tracking issue: [#1527](https://github.com/rerun-io/rerun/issues/1537)
7+
8+
NOTE: Unlike many of the other examples, this example requires a system installation of ROS
9+
in addition to the packages from requirements.txt.
10+
11+
# Dependencies
12+
13+
This example was developed and tested on top of [ROS2 Humble Hawksbill](https://docs.ros.org/en/humble/index.html)
14+
and the the [turtlebot3 navigation example](https://navigation.ros.org/getting_started/index.html).
15+
16+
Installing ROS is outside the scope of this example, but you will need the equivalent of the following packages:
17+
```
18+
sudo apt install ros-humble-desktop gazebo ros-humble-navigation2 ros-humble-turtlebot3 ros-humble-turtlebot3-gazebo
19+
```
20+
21+
In addition to installing the dependencies from `requirements.txt` into a venv you will also need to source the
22+
ROS setup script:
23+
```
24+
source venv/bin/active
25+
source /opt/ros/humble/setup.bash
26+
```
27+
28+
29+
# Running
30+
31+
First, in one terminal launch the nav2 turtlebot demo:
32+
```
33+
source /opt/ros/humble/setup.bash
34+
export TURTLEBOT3_MODEL=waffle
35+
export GAZEBO_MODEL_PATH=$GAZEBO_MODEL_PATH:/opt/ros/humble/share/turtlebot3_gazebo/models
36+
37+
ros2 launch nav2_bringup tb3_simulation_launch.py headless:=False
38+
```
39+
40+
As described in the nav demo, use the rviz window to initialize the pose estimate and set a navigation goal.
41+
42+
You can now connect to the running ROS system by running:
43+
```
44+
python3 examples/python/ros/main.py
45+
```
46+
47+

examples/python/ros/main.py

+289
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,289 @@
1+
#!/usr/bin/env python3
2+
"""
3+
Simple example of a ROS node that republishes some common types to Rerun.
4+
5+
The solution here is mostly a toy example to show how ROS concepts can be
6+
mapped to Rerun. Fore more information on future improved ROS support,
7+
see the tracking issue: https://github.com/rerun-io/rerun/issues/1537
8+
9+
NOTE: Unlike many of the other examples, this example requires a system installation of ROS
10+
in addition to the packages from requirements.txt.
11+
"""
12+
13+
import argparse
14+
import sys
15+
16+
import numpy as np
17+
import rerun as rr
18+
19+
try:
20+
import cv_bridge
21+
import laser_geometry
22+
import rclpy
23+
import rerun_urdf
24+
import trimesh
25+
from image_geometry import PinholeCameraModel
26+
from nav_msgs.msg import Odometry
27+
from numpy.lib.recfunctions import structured_to_unstructured
28+
from rclpy.callback_groups import ReentrantCallbackGroup
29+
from rclpy.node import Node
30+
from rclpy.qos import QoSDurabilityPolicy, QoSProfile
31+
from rclpy.time import Duration, Time
32+
from sensor_msgs.msg import CameraInfo, Image, LaserScan, PointCloud2, PointField
33+
from sensor_msgs_py import point_cloud2
34+
from std_msgs.msg import String
35+
from tf2_ros import TransformException
36+
from tf2_ros.buffer import Buffer
37+
from tf2_ros.transform_listener import TransformListener
38+
39+
except ImportError:
40+
print(
41+
"""
42+
Could not import the required ROS2 packages.
43+
44+
Make sure you have installed ROS2 (https://docs.ros.org/en/humble/index.html)
45+
and sourced /opt/ros/humble/setup.bash
46+
47+
See: README.md for more details.
48+
"""
49+
)
50+
sys.exit(1)
51+
52+
53+
class TurtleSubscriber(Node): # type: ignore[misc]
54+
def __init__(self) -> None:
55+
super().__init__("rr_turtlebot")
56+
57+
# Used for subscribing to latching topics
58+
latching_qos = QoSProfile(depth=1, durability=QoSDurabilityPolicy.TRANSIENT_LOCAL)
59+
60+
# Allow concurrent callbacks
61+
self.callback_group = ReentrantCallbackGroup()
62+
63+
# Subscribe to TF topics
64+
self.tf_buffer = Buffer()
65+
self.tf_listener = TransformListener(self.tf_buffer, self)
66+
67+
# Define a mapping for transforms
68+
self.path_to_frame = {
69+
"map": "map",
70+
"map/points": "camera_depth_frame",
71+
"map/robot": "base_footprint",
72+
"map/robot/scan": "base_scan",
73+
"map/robot/camera": "camera_rgb_optical_frame",
74+
"map/robot/camera/points": "camera_depth_frame",
75+
}
76+
77+
# Assorted helpers for data conversions
78+
self.model = PinholeCameraModel()
79+
self.cv_bridge = cv_bridge.CvBridge()
80+
self.laser_proj = laser_geometry.laser_geometry.LaserProjection()
81+
82+
# Log a bounding box as a visual placeholder for the map
83+
# # TODO(jleibs): Log the real map once [#1531](https://github.com/rerun-io/rerun/issues/1531) is merged
84+
rr.log_obb(
85+
"map/box",
86+
half_size=[6, 6, 2],
87+
position=[0, 0, 1],
88+
color=[255, 255, 255, 255],
89+
timeless=True,
90+
)
91+
92+
# Subscriptions
93+
self.info_sub = self.create_subscription(
94+
CameraInfo,
95+
"/intel_realsense_r200_depth/camera_info",
96+
self.cam_info_callback,
97+
10,
98+
callback_group=self.callback_group,
99+
)
100+
101+
self.odom_sub = self.create_subscription(
102+
Odometry,
103+
"/odom",
104+
self.odom_callback,
105+
10,
106+
callback_group=self.callback_group,
107+
)
108+
109+
self.img_sub = self.create_subscription(
110+
Image,
111+
"/intel_realsense_r200_depth/image_raw",
112+
self.image_callback,
113+
10,
114+
callback_group=self.callback_group,
115+
)
116+
117+
self.points_sub = self.create_subscription(
118+
PointCloud2,
119+
"/intel_realsense_r200_depth/points",
120+
self.points_callback,
121+
10,
122+
callback_group=self.callback_group,
123+
)
124+
125+
self.scan_sub = self.create_subscription(
126+
LaserScan,
127+
"/scan",
128+
self.scan_callback,
129+
10,
130+
callback_group=self.callback_group,
131+
)
132+
133+
# The urdf is published as latching
134+
self.urdf_sub = self.create_subscription(
135+
String,
136+
"/robot_description",
137+
self.urdf_callback,
138+
qos_profile=latching_qos,
139+
callback_group=self.callback_group,
140+
)
141+
142+
def log_tf_as_rigid3(self, path: str, time: Time) -> None:
143+
"""
144+
Helper to look up a transform with tf and log using `log_rigid3`.
145+
146+
Note: we do the lookup on the client side instead of re-logging the raw transforms until
147+
Rerun has support for Derived Transforms [#1533](https://github.com/rerun-io/rerun/issues/1533)
148+
"""
149+
# Get the parent path
150+
parent_path = path.rsplit("/", 1)[0]
151+
152+
# Find the corresponding frames from the mapping
153+
child_frame = self.path_to_frame[path]
154+
parent_frame = self.path_to_frame[parent_path]
155+
156+
# Do the TF lookup to get transform from child (source) -> parent (target)
157+
try:
158+
tf = self.tf_buffer.lookup_transform(parent_frame, child_frame, time, timeout=Duration(seconds=0.1))
159+
t = tf.transform.translation
160+
q = tf.transform.rotation
161+
rr.log_rigid3(path, parent_from_child=([t.x, t.y, t.z], [q.x, q.y, q.z, q.w]))
162+
except TransformException as ex:
163+
print("Failed to get transform: {}".format(ex))
164+
165+
def cam_info_callback(self, info: CameraInfo) -> None:
166+
"""Log a `CameraInfo` with `log_pinhole`."""
167+
time = Time.from_msg(info.header.stamp)
168+
rr.set_time_nanos("ros_time", time.nanoseconds)
169+
170+
self.model.fromCameraInfo(info)
171+
172+
rr.log_pinhole(
173+
"map/robot/camera/img",
174+
child_from_parent=self.model.intrinsicMatrix(),
175+
width=self.model.width,
176+
height=self.model.height,
177+
)
178+
179+
def odom_callback(self, odom: Odometry) -> None:
180+
"""Update transforms when odom is updated."""
181+
time = Time.from_msg(odom.header.stamp)
182+
rr.set_time_nanos("ros_time", time.nanoseconds)
183+
184+
# Capture time-series data for the linear and angular velocities
185+
rr.log_scalar("odometry/vel", odom.twist.twist.linear.x)
186+
rr.log_scalar("odometry/ang_vel", odom.twist.twist.angular.z)
187+
188+
# Update the robot pose itself via TF
189+
self.log_tf_as_rigid3("map/robot", time)
190+
191+
def image_callback(self, img: Image) -> None:
192+
"""Log an `Image` with `log_image` using `cv_bridge`."""
193+
time = Time.from_msg(img.header.stamp)
194+
rr.set_time_nanos("ros_time", time.nanoseconds)
195+
196+
rr.log_image("map/robot/camera/img", self.cv_bridge.imgmsg_to_cv2(img))
197+
self.log_tf_as_rigid3("map/robot/camera", time)
198+
199+
def points_callback(self, points: PointCloud2) -> None:
200+
"""Log a `PointCloud2` with `log_points`."""
201+
time = Time.from_msg(points.header.stamp)
202+
rr.set_time_nanos("ros_time", time.nanoseconds)
203+
204+
pts = point_cloud2.read_points(points, field_names=["x", "y", "z"], skip_nans=True)
205+
206+
# The realsense driver exposes a float field called 'rgb', but the data is actually stored
207+
# as bytes within the payload (not a float at all). Patch points.field to use the correct
208+
# r,g,b, offsets so we can extract them with read_points.
209+
points.fields = [
210+
PointField(name="r", offset=16, datatype=PointField.UINT8, count=1),
211+
PointField(name="g", offset=17, datatype=PointField.UINT8, count=1),
212+
PointField(name="b", offset=18, datatype=PointField.UINT8, count=1),
213+
]
214+
215+
colors = point_cloud2.read_points(points, field_names=["r", "g", "b"], skip_nans=True)
216+
217+
pts = structured_to_unstructured(pts)
218+
colors = colors = structured_to_unstructured(colors)
219+
220+
# Log points once rigidly under robot/camera/points. This is a robot-centric
221+
# view of the world.
222+
rr.log_points("map/robot/camera/points", positions=pts, colors=colors)
223+
self.log_tf_as_rigid3("map/robot/camera/points", time)
224+
225+
# Log points a second time after transforming to the map frame. This is a map-centric
226+
# view of the world.
227+
#
228+
# Once Rerun supports fixed-frame aware transforms [#1522](https://github.com/rerun-io/rerun/issues/1522)
229+
# this will no longer be necessary.
230+
rr.log_points("map/points", positions=pts, colors=colors)
231+
self.log_tf_as_rigid3("map/points", time)
232+
233+
def scan_callback(self, scan: LaserScan) -> None:
234+
"""
235+
Log a LaserScan after transforming it to line-segments.
236+
237+
Note: we do a client-side transformation of the LaserScan data into Rerun
238+
points / lines until Rerun has native support for LaserScan style projections:
239+
[#1534](https://github.com/rerun-io/rerun/issues/1534)
240+
"""
241+
time = Time.from_msg(scan.header.stamp)
242+
rr.set_time_nanos("ros_time", time.nanoseconds)
243+
244+
# Project the laser scan to a collection of points
245+
points = self.laser_proj.projectLaser(scan)
246+
pts = point_cloud2.read_points(points, field_names=["x", "y", "z"], skip_nans=True)
247+
pts = structured_to_unstructured(pts)
248+
249+
# Turn every pt into a line-segment from the origin to the point.
250+
origin = (pts / np.linalg.norm(pts, axis=1).reshape(-1, 1)) * 0.3
251+
segs = np.hstack([origin, pts]).reshape(pts.shape[0] * 2, 3)
252+
253+
rr.log_line_segments("map/robot/scan", segs, stroke_width=0.005)
254+
self.log_tf_as_rigid3("map/robot/scan", time)
255+
256+
def urdf_callback(self, urdf_msg: String) -> None:
257+
"""Log a URDF using `log_scene` from `rerun_urdf`."""
258+
urdf = rerun_urdf.load_urdf_from_msg(urdf_msg)
259+
260+
# The turtlebot URDF appears to have scale set incorrectly for the camera-link
261+
# Although rviz loads it properly `yourdfpy` does not.
262+
orig, _ = urdf.scene.graph.get("camera_link")
263+
scale = trimesh.transformations.scale_matrix(0.00254)
264+
urdf.scene.graph.update(frame_to="camera_link", matrix=orig.dot(scale))
265+
scaled = urdf.scene.scaled(1.0)
266+
267+
rerun_urdf.log_scene(scene=scaled, node=urdf.base_link, path="map/robot/urdf", timeless=True)
268+
269+
270+
def main() -> None:
271+
parser = argparse.ArgumentParser(description="Simple example of a ROS node that republishes to Rerun.")
272+
rr.script_add_args(parser)
273+
args, unknownargs = parser.parse_known_args()
274+
rr.script_setup(args, "turtlebot_viz")
275+
276+
# Any remaining args go to rclpy
277+
rclpy.init(args=unknownargs)
278+
279+
turtle_subscriber = TurtleSubscriber()
280+
281+
# Use the MultiThreadedExecutor so that calls to `lookup_transform` don't block the other threads
282+
rclpy.spin(turtle_subscriber, executor=rclpy.executors.MultiThreadedExecutor())
283+
284+
turtle_subscriber.destroy_node()
285+
rclpy.shutdown()
286+
287+
288+
if __name__ == "__main__":
289+
main()

examples/python/ros/requirements.txt

+4
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
numpy
2+
opencv-python
3+
rerun-sdk
4+
yourdfpy

0 commit comments

Comments
 (0)