diff --git a/.env b/.env index 518ce83b3..d60b33832 100644 --- a/.env +++ b/.env @@ -1,6 +1,9 @@ +# This top-level .env file under AirStack/ defines variables that are propagated through docker-compose.yaml PROJECT_NAME="airstack" -PROJECT_VERSION="1.0.5" +PROJECT_VERSION="1.1.0" # can replace with your docker hub username PROJECT_DOCKER_REGISTRY="airlab-storage.andrew.cmu.edu:5001/shared" DEFAULT_ISAAC_SCENE="omniverse://airlab-storage.andrew.cmu.edu:8443/Projects/AirStack/fire_academy.scene.usd" PLAY_SIM_ON_START="true" +# the file under robot/docker/ that contains the robot's environment variables +ROBOT_ENV_FILE_NAME="robot.env" \ No newline at end of file diff --git a/.gitmodules b/.gitmodules index f51a3e648..f0f65c11e 100644 --- a/.gitmodules +++ b/.gitmodules @@ -7,3 +7,6 @@ [submodule "ground_control_station/ros_ws/src/ros-gst-bridge"] path = ground_control_station/ros_ws/src/ros-gst-bridge url = https://github.com/BrettRD/ros-gst-bridge.git +[submodule "robot/ros_ws/src/autonomy/2_perception/macvo/macvo/src"] + path = robot/ros_ws/src/autonomy/2_perception/macvo/macvo/src + url = https://github.com/MAC-VO/MAC-VO.git diff --git a/docs/development/project_configuration.md b/docs/development/project_configuration.md index 0309a8688..d274c0adc 100644 --- a/docs/development/project_configuration.md +++ b/docs/development/project_configuration.md @@ -1,4 +1,12 @@ # Project Configuration The project as a whole can be configured using the `.env` file under the project root. -The variables in the `.env` file gets populated into the root `docker-compose.yml`. \ No newline at end of file +The variables in the `.env` file get propagated into the `docker-compose.yml` files. + + +The top-level env file is reproduced below: +```bash +--8<-- ".env" +``` + + diff --git a/docs/robot/common_topics.md b/docs/robot/common_topics.md deleted file mode 100644 index c18d5b8cc..000000000 --- a/docs/robot/common_topics.md +++ /dev/null @@ -1,11 +0,0 @@ - - - - -| Topic | Type | Description | -| -------------------------------| ------------------| ---------------------------------------------------------------------------------------------------------------------------| -| `/$ROBOT_NAME/odometry` | [nav_msgs/Odometry](https://docs.ros.org/en/rolling/p/nav_msgs/interfaces/msg/Odometry.html) | Best estimate of robot odometry -| `/$ROBOT_NAME/global_plan` | [nav_msgs/Path](https://docs.ros.org/en/rolling/p/nav_msgs/interfaces/msg/Path.html) | Current target global trajectory for the robot to follow. See [global planning](4_global/planning.md) for more details. - -## System Diagram -![AirStack System Diagram](airstack_system_diagram.png) \ No newline at end of file diff --git a/docs/robot/configuration/index.md b/docs/robot/configuration/index.md new file mode 100644 index 000000000..94c18d7eb --- /dev/null +++ b/docs/robot/configuration/index.md @@ -0,0 +1,2 @@ +# Configuration + diff --git a/docs/robot/index.md b/docs/robot/index.md index e04173434..bc7d17445 100644 --- a/docs/robot/index.md +++ b/docs/robot/index.md @@ -1,4 +1,48 @@ # Robot +## Directory Structure +Underneath `AirStack/robot`, there are these directories: +- `docker/`: Contains files related to building and launching the robot Docker container. +- `installation/`: Contains files related to installing the robot software on a physical robot (TODO). +- `ros_ws/`: Contains the ROS 2 workspace for the robot. + ## Launch Structure -Each high-level module has a `*_bringup` package that contains the launch files for that module. The launch files are located in the `launch` directory of the `*_bringup` package. The launch files are named `*.launch.(xml/yaml/py)` and can be launched with `ros2 launch _bringup .launch.(xml/yaml/py)`. +Each high-level module under `ros_ws/` has a `[module]_bringup` package that contains the launch files for that module. The launch files are located in the `launch` directory of the `[module]_bringup` package. The launch files are named `*.launch.(xml/yaml/py)` and can be launched with `ros2 launch _bringup .launch.(xml/yaml/py)`. + +At a high level, the launch files are organized as follows: + + +``` +- robot_bringup/: robot.launch.xml + - autonomy_bringup/: autonomy.launch.xml + - interface_bringup/: interface.launch.xml + - sensors_bringup/: sensors.launch.xml + - perception_bringup/: perception.launch.xml + - local_bringup/: local.launch.xml + - global_bringup/: global.launch.xml + - behavior_bringup/: behavior.launch.xml +``` + +## Configuration + +### Desktop vs Jetson +If you look at the `robot/docker/docker-compose.yaml` file, you'll see it contains two services. `robot` is meant for x86-64 desktop development whereas `robot_l4t` is meant to run on NVIDIA Jetson devices. Both extend a base service in `robot/docker/robot-base-docker-compose.yaml`. + +### Environment Variables +Environment variables are used to configure the robot Docker container. The top level `AirStack/.env` file points to a `ROBOT_ENV_FILE_NAME` (default: `robot.env`), that in turn is used to load environment variables for the robot Docker container. The file that `ROBOT_ENV_FILE_NAME` points to gets added into the container under `robot/docker/robot-base-docker-compose.yaml`. + +The environment variables can be used to trigger nodes to launch. For example, the `USE_MACVO` environmental variable is checked by `perception.launch.xml` to determine whether to launch the `macvo` node. + +The file `robot.env` is reproduced below: +```bash +--8<-- "robot/docker/robot.env" +``` + +## Common Topics +| Topic | Type | Description | +| -------------------------------| ------------------| ---------------------------------------------------------------------------------------------------------------------------| +| `/$ROBOT_NAME/odometry` | [nav_msgs/Odometry](https://docs.ros.org/en/rolling/p/nav_msgs/interfaces/msg/Odometry.html) | Best estimate of robot odometry +| `/$ROBOT_NAME/global_plan` | [nav_msgs/Path](https://docs.ros.org/en/rolling/p/nav_msgs/interfaces/msg/Path.html) | Current target global trajectory for the robot to follow. See [global planning](4_global/planning.md) for more details. + +### Rough System Diagram +![AirStack System Diagram](airstack_system_diagram.png) \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index 9fc0fe6cc..e3696f69b 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -56,7 +56,6 @@ nav: - docs/development/frame_conventions.md - Robot: - docs/robot/index.md - - docs/robot/common_topics.md - Autonomy Modules: - Robot Interface: - docs/robot/autonomy/0_interface/index.md @@ -130,7 +129,7 @@ theme: - navigation.indexes - navigation.path - navigation.tabs - - navigation.expand + # - navigation.expand - navigation.footer - navigation.top - navigation.sections diff --git a/robot/docker/Dockerfile.robot b/robot/docker/Dockerfile.robot index b0c4f85d7..c3531959c 100644 --- a/robot/docker/Dockerfile.robot +++ b/robot/docker/Dockerfile.robot @@ -38,7 +38,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ RUN sudo add-apt-repository universe \ && curl -sSL https://raw.githubusercontent.com/ros/rosdistro/master/ros.key -o /usr/share/keyrings/ros-archive-keyring.gpg \ && echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/ros-archive-keyring.gpg] http://packages.ros.org/ros2/ubuntu $(. /etc/os-release && echo $UBUNTU_CODENAME) main" | sudo tee /etc/apt/sources.list.d/ros2.list > /dev/null \ - && apt-get update && apt-get install -y --no-install-recommends \ + && apt-get update && apt upgrade -y && apt-get install -y --no-install-recommends \ ros-humble-desktop \ python3-argcomplete \ && rm -rf /var/lib/apt/lists/* @@ -63,6 +63,7 @@ RUN apt update && apt install -y \ cmake build-essential \ less htop jq \ python3-pip \ + python3-rosdep \ tmux \ gdb @@ -78,16 +79,16 @@ RUN apt update -y && apt install -y \ ros-humble-domain-bridge \ libcgal-dev \ python3-colcon-common-extensions -RUN /opt/ros/humble/lib/mavros/install_geographiclib_datasets.sh +RUN /opt/ros/humble/lib/mavros/install_geographiclib_datasets.sh # Install Python dependencies RUN pip3 install \ empy \ future \ lxml \ - matplotlib \ - numpy \ + matplotlib==3.8.4 \ + numpy==1.24.0 \ pkgconfig \ psutil \ pygments \ @@ -98,8 +99,22 @@ RUN pip3 install \ setuptools \ six \ toml \ - scipy - + scipy \ + torch \ + torchvision \ + pypose \ + rich \ + tqdm \ + pillow \ + flow_vis \ + h5py \ + evo \ + tabulate \ + einops \ + timm==0.9.12 \ + rerun-sdk==0.17 \ + yacs \ + wandb # Override install newer openvdb 9.1.0 for compatibility with Ubuntu 22.04 https://bugs.launchpad.net/bugs/1970108 RUN apt remove -y libopenvdb*; \ @@ -124,18 +139,29 @@ EXPOSE 22 ARG REAL_ROBOT=false RUN if [ "$REAL_ROBOT" = "true" ]; then \ # Put commands here that should run for the real robot but not the sim - echo "REAL_ROBOT is true"; \ apt-get update && apt-get install -y libimath-dev; \ else \ # Put commands here that should be run for the sim but not the real robot - echo "REAL_ROBOT is false"; \ fi +# Downloading model weights for MACVO +WORKDIR /root/model_weights +RUN wget -r "https://github.com/MAC-VO/MAC-VO/releases/download/model/MACVO_FrontendCov.pth" && \ + mv /root/model_weights/github.com/MAC-VO/MAC-VO/releases/download/model/MACVO_FrontendCov.pth /root/model_weights/MACVO_FrontendCov.pth && \ + rm -rf /root/model_weights/github.com + +WORKDIR /root/ros_ws # Cleanup. Prevent people accidentally doing git commits as root in Docker RUN apt purge git -y \ && apt autoremove -y \ && apt clean -y \ && rm -rf /var/lib/apt/lists/* +# Install colcon, seems to be getting removed +RUN pip install -U colcon-common-extensions + +# Fixes for MACVO Integration +RUN pip install huggingface_hub +RUN pip uninstall matplotlib -y \ No newline at end of file diff --git a/robot/docker/robot-base-docker-compose.yaml b/robot/docker/robot-base-docker-compose.yaml index 394f4ee34..1f9fff6c9 100644 --- a/robot/docker/robot-base-docker-compose.yaml +++ b/robot/docker/robot-base-docker-compose.yaml @@ -10,6 +10,8 @@ services: environment: - DISPLAY - QT_X11_NO_MITSHM=1 + env_file: + - ${ROBOT_ENV_FILE_NAME} deploy: # let it use the GPU resources: diff --git a/robot/docker/robot.env b/robot/docker/robot.env new file mode 100644 index 000000000..fe9021d34 --- /dev/null +++ b/robot/docker/robot.env @@ -0,0 +1,2 @@ +# These become environment variables in the robot container +USE_MACVO="false" \ No newline at end of file diff --git a/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/README.md b/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/README.md new file mode 100644 index 000000000..1f17a4c74 --- /dev/null +++ b/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/README.md @@ -0,0 +1,51 @@ + +# Camera Parameter Server + +## Summary + +The camera parameter server was designed to eliminate the need for multiple nodes to subscribe to each camera individually, reducing unnecessary subscribers and callbacks. Its sole purpose is to listen to the camera info topics, store relevant information about the cameras, and provide it on demand for other nodes to query. + +## Configuration + +The camera parameter server is currently configurable through a non-ROS configuration file. This file allows users to define a list of cameras, specifying their types and topic names. At the top level of the configuration, a base link name is provided to indicate the `tf` name for the robot's center. Additionally, a parameter called `camera_list` contains a list of dictionaries, with each dictionary representing an individual camera. Currently, two camera types are supported: monocular and stereo. + +## Parameters + +Below are the parameters needed for the meta level camera parameter server configuration, as well as the camera fields needed to specify individual camera types. + +### Meta Level Parameters + +|
Parameter
| Description +|----------------------------|--------------------------------------------------------------- +| `base_link_frame_id` | The frame name of the base link, or center frame of the robot| +| `camera_list` | A list of dictionaries that define each camera of the system| + +### Monocular Camera Parameters + +|
Parameter
| Description +|----------------------------|--------------------------------------------------------------- +| `camera_name` | The name of the camera| +| `camera_type` | The type of camera, for monocular being `mono` | +| `camera_info_sub_topic` | The info topic name for the camera, normally `camera_info`| +| `camera_frame_id` | The frame name of the camera to find its tf | + +### Stereo Camera Parameters + +|
Parameter
| Description +|----------------------------|--------------------------------------------------------------- +| `camera_name` | The name of the camera| +| `camera_type` | The type of camera, for stereo being `stereo` | +| `camera_info_sub_topic` | The info topic name for the camera, normally `camera_info`| +| `left_camera_frame_id` | The frame name of the left camera for find its tf | +| `right_camera_frame_id` | The frame name of the right camera to find its tf | + +## Services +|
Parameter
| Type | Description +|----------------------------|----------------------------------------|-----------------------| +| `~/get_camera_params` | sensor_interfaces/GetCameraParams | The service to get info about the desired camera. This provides camera intrinsics, transform frame ids, and baseline if the camera type is a stereo| + +## Subscriptions +|
Parameter
| Type | Description +|----------------------------|----------------------------------------|-----------------------| +| `tf/` | tfMessage | Listens for the tf for the specified cameras | +| `~/camera_info` | sensor_msgs/CameraInfo | Listens for the info for specified cameras| \ No newline at end of file diff --git a/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/camera_param_server/__init__.py b/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/camera_param_server/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/camera_param_server/camera_param_server.py b/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/camera_param_server/camera_param_server.py new file mode 100644 index 000000000..ce3dace32 --- /dev/null +++ b/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/camera_param_server/camera_param_server.py @@ -0,0 +1,208 @@ +import rclpy +from rclpy.node import Node +import tf2_ros + +from sensor_msgs.msg import CameraInfo +from sensor_interfaces.srv import GetCameraParams + +import yaml +import os +import numpy as np + +class StereoCameraInfo: + def __init__(self, node, camera_name, camera_info_topic, left_camera_frame_id, right_camera_frame_id): + self.left_camera_info = CameraInfo() + self.right_camera_info = CameraInfo() + self.camera_type = 'stereo' + + self.tf_initialized = False + self.left_info_initialized = False + self.right_info_initialized = False + self.info_initialized = False + + left_info_topic_merged = camera_name + '/left/' + camera_info_topic + right_info_topic_merged = camera_name + '/right/' + camera_info_topic + self.left_camera_info_sub = node.create_subscription(CameraInfo, left_info_topic_merged, self.left_camera_info_callback, 10) + self.right_camera_info_sub = node.create_subscription(CameraInfo, right_info_topic_merged, self.right_camera_info_callback, 10) + + self.camera_name = camera_name + self.left_camera_frame_id = left_camera_frame_id + self.right_camera_frame_id = right_camera_frame_id + self.base_link_frame_id = node.base_link_frame_id + self.left_camera_transform_to_baselink = None + self.right_camera_transform_to_baselink = None + self.baseline = None + + def left_camera_info_callback(self, msg): + self.left_camera_info = msg + self.left_info_initialized = True + if self.left_info_initialized and self.right_info_initialized: + self.info_initialized = True + + def right_camera_info_callback(self, msg): + self.right_camera_info = msg + self.right_info_initialized = True + if self.left_info_initialized and self.right_info_initialized: + self.info_initialized = True + +class MonoCameraInfo: + def __init__(self, node, camera_name, camera_info_topic, camera_frame_id): + self.camera_info = CameraInfo() + self.camera_type = 'mono' + + self.info_initialized = False + + info_topic_merged = camera_name + '/' + camera_info_topic + self.camera_info_sub = node.create_subscription(CameraInfo, info_topic_merged, self.camera_info_callback, 10) + + self.camera_name = camera_name + self.camera_frame_id = camera_frame_id + self.base_link_frame_id = node.base_link_frame_id + self.camera_transform_to_baselink = None + + def camera_info_callback(self, msg): + self.camera_info = msg + self.info_initialized = True + +class CameraParamServer(Node): + def __init__(self): + super().__init__('cam_param_server') + + self.camera_dict = {} + self.base_link_frame_id = None + + self.tfs_initialized = False + self.info_initialized = False + self.server_initialized = False + + self.declare_parameter('camera_config', rclpy.parameter.Parameter.Type.STRING) + camera_config_file = self.get_parameter('camera_config').value + if not os.path.exists(camera_config_file): + self.get_logger().error('Camera configuration file not found: %s' % camera_config_file) + return + + self.parse_camera_config(camera_config_file) + + self.tf_buffer = tf2_ros.Buffer() + self.tf_listener = tf2_ros.TransformListener(self.tf_buffer, self) + self.tf_checking_timer = self.create_timer(0.1, self.check_tf) + + def parse_camera_config(self, config_file_path): + try: + config_file = self.load_config(config_file_path) + camera_list = config_file.get('camera_list') + self.base_link_frame_id = config_file.get('base_link_frame_id') + except Exception as e: + self.get_logger().error(f"Error parsing camera config file: {e}") + + for camera in camera_list: + camera_name = camera.get('camera_name') + if camera.get('camera_type') == 'stereo': + self.camera_dict[camera_name] = StereoCameraInfo(self, camera_name, + camera.get('camera_info_sub_topic'), + camera.get('left_camera_frame_id'), + camera.get('right_camera_frame_id')) + elif camera.get('camera_type') == 'mono': + self.camera_dict[camera_name] = MonoCameraInfo(self, camera_name, camera.get('camera_info_sub_topic'), camera.get('camera_frame_id')) + else: + self.get_logger().error('Invalid camera type: %s' % camera.get('type')) + + def load_config(self, file): + try: + with open(file, 'r') as file: + config = yaml.safe_load(file) + return config + except FileNotFoundError: + self.get_logger().error(f"Error: The file '{file}' does not exist.") + raise + except yaml.YAMLError as e: + self.get_logger().error(f"Error parsing YAML file '{file}': {e}") + raise + + def check_tf(self): + if not self.tfs_initialized: + for camera in self.camera_dict.values(): + if isinstance(camera, StereoCameraInfo): + try: + camera.left_camera_transform_to_baselink = self.tf_buffer.lookup_transform(camera.left_camera_frame_id, camera.base_link_frame_id, rclpy.time.Time()) + camera.right_camera_transform_to_baselink = self.tf_buffer.lookup_transform(camera.right_camera_frame_id, camera.base_link_frame_id, rclpy.time.Time()) + left_cam_location = np.array([camera.left_camera_transform_to_baselink.transform.translation.x, camera.left_camera_transform_to_baselink.transform.translation.y, camera.left_camera_transform_to_baselink.transform.translation.z]) + right_cam_location = np.array([camera.right_camera_transform_to_baselink.transform.translation.x, camera.right_camera_transform_to_baselink.transform.translation.y, camera.right_camera_transform_to_baselink.transform.translation.z]) + camera.baseline = np.linalg.norm(left_cam_location - right_cam_location) + camera.tf_initialized = True + except Exception as e: + self.get_logger().error(f"Error looking up transform: {e}") + elif isinstance(camera, MonoCameraInfo): + try: + camera.camera_transform_to_baselink = self.tf_buffer.lookup_transform(camera.camera_frame_id, camera.base_link_frame_id, rclpy.time.Time()) + except Exception as e: + self.get_logger().error(f"Error looking up transform: {e}") + + if all(camera.tf_initialized for camera in self.camera_dict.values()): + self.tfs_initialized = True + self.get_logger().info('Camera transforms initialized') + + if not self.info_initialized: + if all(camera.info_initialized for camera in self.camera_dict.values()): + self.info_initialized = True + self.get_logger().info('Camera parameters initialized') + + if self.tfs_initialized and self.info_initialized and not self.server_initialized: + self.camera_params_srv = self.create_service(GetCameraParams, 'get_camera_params', self.get_camera_params) + self.server_initialized = True + self.get_logger().info('Camera parameter server initialized') + + + def get_camera_params(self, request, response): + # Check if camera parameters are initialized + if self.tfs_initialized and self.info_initialized: + camera_name_list = request.camera_names + camera_type_list = request.camera_types + if len(camera_name_list) != len(camera_type_list): + self.get_logger().error('Camera name and type list length mismatch') + response.success = False + return response + for i, camera_name in enumerate(camera_name_list): + camera_type = camera_type_list[i] + response = self.get_camera_params_single(response, camera_name, camera_type) + return response + else: + self.get_logger().error('Camera parameters not initialized') + response.success = False + return response + + def get_camera_params_single(self, response, incoming_camera_name, incoming_camera_type): + if incoming_camera_name in self.camera_dict: + camera = self.camera_dict[incoming_camera_name] + if camera.camera_type == incoming_camera_type: + if isinstance(camera, StereoCameraInfo): + response.camera_frame_ids.append(camera.left_camera_frame_id) + response.camera_infos.append(camera.left_camera_info) + response.camera_frame_ids.append(camera.right_camera_frame_id) + response.camera_infos.append(camera.right_camera_info) + response.baselines.append(camera.baseline) + response.success = True + elif isinstance(camera, MonoCameraInfo): + response.camera_info = camera.camera_info + response.camera_transform_to_baselink = camera.camera_transform_to_baselink + response.success = True + else: + self.get_logger().error('Camera type mismatch: %s' % incoming_camera_name) + response.success = False + return response + else: + self.get_logger().error('Camera not found: %s' % incoming_camera_name) + return response + + + +def main(args=None): + rclpy.init(args=args) + + camera_param_server = CameraParamServer() + + rclpy.spin(camera_param_server) + + camera_param_server.destroy_node() + rclpy.shutdown() + diff --git a/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/config/camera_config.yaml b/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/config/camera_config.yaml new file mode 100644 index 000000000..629c4e9e7 --- /dev/null +++ b/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/config/camera_config.yaml @@ -0,0 +1,7 @@ +base_link_frame_id: "base_link" +camera_list: + - camera_name: "front_stereo" + camera_type: "stereo" + camera_info_sub_topic: "camera_info" + left_camera_frame_id: "left_camera" # not sure if this is needed because it should be standardized + right_camera_frame_id: "right_camera" diff --git a/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/launch/camera_param_server.launch.xml b/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/launch/camera_param_server.launch.xml new file mode 100644 index 000000000..288a50f0b --- /dev/null +++ b/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/launch/camera_param_server.launch.xml @@ -0,0 +1,5 @@ + + + + + \ No newline at end of file diff --git a/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/package.xml b/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/package.xml new file mode 100644 index 000000000..f46cc6053 --- /dev/null +++ b/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/package.xml @@ -0,0 +1,20 @@ + + + + camera_param_server + 0.0.0 + TODO: Package description + root + TODO: License declaration + + sensor_interfaces + + ament_copyright + ament_flake8 + ament_pep257 + python3-pytest + + + ament_python + + diff --git a/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/resource/camera_param_server b/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/resource/camera_param_server new file mode 100644 index 000000000..e69de29bb diff --git a/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/setup.cfg b/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/setup.cfg new file mode 100644 index 000000000..2e62fcd72 --- /dev/null +++ b/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/setup.cfg @@ -0,0 +1,4 @@ +[develop] +script_dir=$base/lib/camera_param_server +[install] +install_scripts=$base/lib/camera_param_server diff --git a/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/setup.py b/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/setup.py new file mode 100644 index 000000000..bad637f9d --- /dev/null +++ b/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/setup.py @@ -0,0 +1,28 @@ +from setuptools import find_packages, setup + +package_name = 'camera_param_server' + +setup( + name=package_name, + version='0.0.0', + packages=find_packages(exclude=['test']), + data_files=[ + ('share/ament_index/resource_index/packages', + ['resource/' + package_name]), + ('share/' + package_name, ['package.xml']), + ('share/' + package_name + '/launch', ['launch/camera_param_server.launch.xml']), + ('share/' + package_name + '/config', ['config/camera_config.yaml']), + ], + install_requires=['setuptools'], + zip_safe=True, + maintainer='root', + maintainer_email='root@todo.todo', + description='TODO: Package description', + license='TODO: License declaration', + tests_require=['pytest'], + entry_points={ + 'console_scripts': [ + 'camera_param_server = camera_param_server.camera_param_server:main', + ], + }, +) diff --git a/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/test/test_copyright.py b/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/test/test_copyright.py new file mode 100644 index 000000000..97a39196e --- /dev/null +++ b/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/test/test_copyright.py @@ -0,0 +1,25 @@ +# Copyright 2015 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ament_copyright.main import main +import pytest + + +# Remove the `skip` decorator once the source file(s) have a copyright header +@pytest.mark.skip(reason='No copyright header has been placed in the generated source file.') +@pytest.mark.copyright +@pytest.mark.linter +def test_copyright(): + rc = main(argv=['.', 'test']) + assert rc == 0, 'Found errors' diff --git a/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/test/test_flake8.py b/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/test/test_flake8.py new file mode 100644 index 000000000..27ee1078f --- /dev/null +++ b/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/test/test_flake8.py @@ -0,0 +1,25 @@ +# Copyright 2017 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ament_flake8.main import main_with_errors +import pytest + + +@pytest.mark.flake8 +@pytest.mark.linter +def test_flake8(): + rc, errors = main_with_errors(argv=[]) + assert rc == 0, \ + 'Found %d code style errors / warnings:\n' % len(errors) + \ + '\n'.join(errors) diff --git a/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/test/test_pep257.py b/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/test/test_pep257.py new file mode 100644 index 000000000..b234a3840 --- /dev/null +++ b/robot/ros_ws/src/autonomy/1_sensors/camera_param_server/test/test_pep257.py @@ -0,0 +1,23 @@ +# Copyright 2015 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ament_pep257.main import main +import pytest + + +@pytest.mark.linter +@pytest.mark.pep257 +def test_pep257(): + rc = main(argv=['.', 'test']) + assert rc == 0, 'Found code style errors / warnings' diff --git a/robot/ros_ws/src/autonomy/1_sensors/sensor_interfaces/CMakeLists.txt b/robot/ros_ws/src/autonomy/1_sensors/sensor_interfaces/CMakeLists.txt new file mode 100644 index 000000000..ea4cbdf62 --- /dev/null +++ b/robot/ros_ws/src/autonomy/1_sensors/sensor_interfaces/CMakeLists.txt @@ -0,0 +1,20 @@ +cmake_minimum_required(VERSION 3.5) +project(sensor_interfaces) +# Default to C++14 +if(NOT CMAKE_CXX_STANDARD) + set(CMAKE_CXX_STANDARD 14) +endif() +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Wall -Wextra -Wpedantic) +endif() +find_package(ament_cmake REQUIRED) +find_package(rosidl_default_generators REQUIRED) +find_package(sensor_msgs REQUIRED) + +rosidl_generate_interfaces(${PROJECT_NAME} + "srv/GetCameraParams.srv" + DEPENDENCIES sensor_msgs + ) + +ament_export_dependencies(rosidl_default_runtime) +ament_package() \ No newline at end of file diff --git a/robot/ros_ws/src/autonomy/1_sensors/sensor_interfaces/package.xml b/robot/ros_ws/src/autonomy/1_sensors/sensor_interfaces/package.xml new file mode 100644 index 000000000..c3df3349b --- /dev/null +++ b/robot/ros_ws/src/autonomy/1_sensors/sensor_interfaces/package.xml @@ -0,0 +1,24 @@ + + + + sensor_interfaces + 0.0.0 + TODO: Package description + user + TODO: License declaration + + ament_cmake + rosidl_default_generators + + rosidl_default_runtime + + rosidl_interface_packages + ament_lint_auto + ament_lint_common + + sensor_msgs + + + ament_cmake + + diff --git a/robot/ros_ws/src/autonomy/1_sensors/sensor_interfaces/srv/GetCameraParams.srv b/robot/ros_ws/src/autonomy/1_sensors/sensor_interfaces/srv/GetCameraParams.srv new file mode 100644 index 000000000..2972d8d83 --- /dev/null +++ b/robot/ros_ws/src/autonomy/1_sensors/sensor_interfaces/srv/GetCameraParams.srv @@ -0,0 +1,7 @@ +string[] camera_names +string[] camera_types +--- +bool success +string[] camera_frame_ids +sensor_msgs/CameraInfo[] camera_infos +float64[] baselines diff --git a/robot/ros_ws/src/autonomy/1_sensors/sensors_bringup/launch/sensors.launch.xml b/robot/ros_ws/src/autonomy/1_sensors/sensors_bringup/launch/sensors.launch.xml index b5bebe8eb..d59762074 100644 --- a/robot/ros_ws/src/autonomy/1_sensors/sensors_bringup/launch/sensors.launch.xml +++ b/robot/ros_ws/src/autonomy/1_sensors/sensors_bringup/launch/sensors.launch.xml @@ -1,4 +1,7 @@ - + + + + \ No newline at end of file diff --git a/robot/ros_ws/src/autonomy/2_perception/macvo/README.md b/robot/ros_ws/src/autonomy/2_perception/macvo/README.md new file mode 100644 index 000000000..f9b1db473 --- /dev/null +++ b/robot/ros_ws/src/autonomy/2_perception/macvo/README.md @@ -0,0 +1,51 @@ + +# MAC-VO + +## Summary + +Currently, the state estimation of our robot relies on [MAC-VO](https://mac-vo.github.io/), a learning-based stereo visual odometry algorithm. This is purely camera based, and does not rely on additional sensors. On initialization, the node will load the model weights, and then allocate the required memory to store the model on first inference. This process may take some time. Once this process is complete, the inference time should be able to run at around 3 Hz. Documentation on the MAC-VO model can be found [here](https://mac-vo.github.io/wiki/category/extend-mac-vo/) + +This node is also setup to retrieve camera info on node initialization. This includes camera intrinsics and baseline of the desired camera, and is setup using a service call to the camera parameter server. + +The output from this node should give the pose estimates of the model, the feature points in 3D space used to estimate the pose, and a visualization image of the points projected onto the RGB image. **The pose is currently given in the perspective of the left camera frame.** + +## Configuration + +The wrapper that is currently used for interfacing with the non-ROS MAC-VO logic is modified from the one provided [here](https://github.com/MAC-VO/MAC-VO-ROS2). For our purposes, we wanted modularity in interfacing with the node, so we now have two configuration files: + +- `interface_config.yaml`: This file specifies the desired camera name, the subscriber and publisher topics, and the size of the image when being fed through inference. This was designed specifically for Airstack +- `model_config.yaml`: This file is sourced from the official MAC-VO ROS wrapper and defines the structure for creating the MAC-VO model. It also specifies the location of the model weights, currently stored at /root/model_weights/MACVO_FrontendCov.pth within the Docker container. + +## Parameters + +Below are the parameters for the `interface_config.yaml`. To find out more about the parameters for the `model_config.yaml`, please consult the [MAC-VO Documentation](https://mac-vo.github.io/wiki/category/extend-mac-vo/). + +|
Parameter
| Description +|----------------------------|--------------------------------------------------------------- +| `camera_name` | The name of the camera that the visual odometry should process from| +| `camera_param_server_client_topic` | Topic name for the camera parameter server| +| `imageL_sub_topic` | Topic name for the left stereo image, appended with the camera name| +| `imageR_sub_topic` | Topic name for the right stereo image, appended with the camera name| +| `pose_pub_topic` | Topic name for the pose estimate output from the MAC-VO model| +| `point_pub_topic` | Topic name for the point cloud of feature points with covariances used to estimate pose | +| `img_pub_topic` | Topic name for the visualization of the feature points over the rgb for debugging | +| `inference_dim_u` | The width of the images fed into the MAC-VO model, which affects inference rate | +| `inference_dim_v` | The height of the images fed into the MAC-VO model, which affects inference rate | + +## Services +|
Parameter
| Type | Description +|----------------------------|----------------------------------------|-----------------------| +| `~/get_camera_params` | sensor_interfaces/GetCameraParams | A service to get info about the desired camera| + +## Subscriptions +|
Parameter
| Type | Description +|----------------------------|----------------------------------------|-----------------------| +| `~/left/image_rect` | sensor_msgs/Image | The left RGB image from the stereo camera | +| `~right/image_rect` | sensor_msgs/Image | The right RGB image from the stereo camera| + +## Publications +|
Parameter
| Type | Description +|----------------------------|----------------------------------------|-----------------------| +| `~/visual_odometry_pose` | nav_msgs/Path | Outputs the pose estimate output from the MAC-VO model.| +| `~/visual_odometry_points` | nav_msgs/Path | Outputs the point cloud of feature points with covariances used to estimate pose. | +| `~/visual_odometry_img` | nav_msgs/Path | Outputs the visualization of the feature points over the rgb for debugging. | \ No newline at end of file diff --git a/robot/ros_ws/src/autonomy/2_perception/macvo/config/interface_config.yaml b/robot/ros_ws/src/autonomy/2_perception/macvo/config/interface_config.yaml new file mode 100644 index 000000000..c1ff163ff --- /dev/null +++ b/robot/ros_ws/src/autonomy/2_perception/macvo/config/interface_config.yaml @@ -0,0 +1,11 @@ +/**: + ros__parameters: + camera_name: "front_stereo" + camera_param_server_client_topic: "sensors/get_camera_params" + imageL_sub_topic: "left/image_rect" + imageR_sub_topic: "right/image_rect" + pose_pub_topic: "visual_odometry_pose" + point_pub_topic: "visual_odometry_points" + img_pub_topic: "visual_odometry_img" + inference_dim_u: 450 + inference_dim_v: 450 diff --git a/robot/ros_ws/src/autonomy/2_perception/macvo/config/model_config.yaml b/robot/ros_ws/src/autonomy/2_perception/macvo/config/model_config.yaml new file mode 100644 index 000000000..453f38f46 --- /dev/null +++ b/robot/ros_ws/src/autonomy/2_perception/macvo/config/model_config.yaml @@ -0,0 +1,83 @@ +Common: + # Some configurations are shared across multiple modules in Odometry, so I write them here. + device: &device cuda + max_depth: &max_depth auto + +Odometry: + name: MACVO + args: + # Device directive to the VO system + # NOTE: the system may not follow this device config strictly since some module + # e.g. those rely on PWC-Net, only support running on cuda device. + device: *device + edgewidth: 32 + num_point: 200 # Upper bound of KPs in each frame + + # Match covariance for keypoint on first observation (sub-pixel uncertainty + # caused by the quantized pixel) + match_cov_default: 0.25 + + # Profiling the system using torch, generate chrome json trace file. + profile: false + + cov: + obs: + type: MatchCovariance + args: + device: *device + kernel_size: 31 + match_cov_default: 0.25 + min_depth_cov: 0.05 + min_flow_cov: 0.25 + + keypoint: + type: CovAwareSelector + args: + device: *device + kernel_size: 7 + mask_width: 32 + max_depth: *max_depth + max_depth_cov: 250.0 + max_match_cov: 100.0 + + frontend: + type: FlowFormerCovFrontend + args: + device: *device + weight: /root/model_weights/MACVO_FrontendCov.pth + use_jit: true + enforce_positive_disparity: false + dtype: fp32 + + motion: + type: StaticMotionModel + args: + + + outlier: + type: FilterCompose + args: + filter_args: + - type: CovarianceSanityFilter + args: + - type: SimpleDepthFilter + args: + min_depth: 0.05 + max_depth: *max_depth + - type: LikelyFrontOfCamFilter + args: + + postprocess: + type: DisplacementInterpolate + args: + + keyframe: + type: AllKeyframe + args: + + optimizer: + type: PyPoseTwoFramePGO + args: + device: cpu + vectorize: true + parallel: true \ No newline at end of file diff --git a/robot/ros_ws/src/autonomy/2_perception/macvo/config/rviz_macvo.rviz b/robot/ros_ws/src/autonomy/2_perception/macvo/config/rviz_macvo.rviz new file mode 100644 index 000000000..5c905f3d7 --- /dev/null +++ b/robot/ros_ws/src/autonomy/2_perception/macvo/config/rviz_macvo.rviz @@ -0,0 +1,204 @@ +Panels: + - Class: rviz_common/Displays + Help Height: 78 + Name: Displays + Property Tree Widget: + Expanded: + - /Global Options1 + - /Status1 + - /PointCloud1 + Splitter Ratio: 0.5 + Tree Height: 725 + - Class: rviz_common/Selection + Name: Selection + - Class: rviz_common/Tool Properties + Expanded: + - /2D Goal Pose1 + - /Publish Point1 + Name: Tool Properties + Splitter Ratio: 0.5886790156364441 + - Class: rviz_common/Views + Expanded: + - /Current View1 + Name: Views + Splitter Ratio: 0.5 + - Class: rviz_common/Time + Experimental: false + Name: Time + SyncMode: 0 + SyncSource: "" +Visualization Manager: + Class: "" + Displays: + - Alpha: 0.5 + Cell Size: 1 + Class: rviz_default_plugins/Grid + Color: 160; 160; 164 + Enabled: true + Line Style: + Line Width: 0.029999999329447746 + Value: Lines + Name: Grid + Normal Cell Count: 0 + Offset: + X: 0 + Y: 0 + Z: 0 + Plane: XY + Plane Cell Count: 10 + Reference Frame: + Value: true + - Alpha: 1 + Axes Length: 1 + Axes Radius: 0.10000000149011612 + Class: rviz_default_plugins/Pose + Color: 255; 25; 0 + Enabled: true + Head Length: 0.30000001192092896 + Head Radius: 0.10000000149011612 + Name: Pose + Shaft Length: 1 + Shaft Radius: 0.05000000074505806 + Shape: Arrow + Topic: + Depth: 5 + Durability Policy: Volatile + Filter size: 10 + History Policy: Keep Last + Reliability Policy: Reliable + Value: /robot_1/perception/visual_odometry_pose + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 4.518574237823486 + Min Value: -0.5837738513946533 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz_default_plugins/PointCloud + Color: 255; 255; 255 + Color Transformer: AxisColor + Decay Time: 0 + Enabled: true + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud + Position Transformer: XYZ + Selectable: true + Size (Pixels): 3 + Size (m): 0.5 + Style: Points + Topic: + Depth: 5 + Durability Policy: Volatile + Filter size: 10 + History Policy: Keep Last + Reliability Policy: Reliable + Value: /robot_1/perception/visual_odometry_points + Use Fixed Frame: true + Use rainbow: true + Value: true + - Class: rviz_default_plugins/Image + Enabled: true + Max Value: 1 + Median window: 5 + Min Value: 0 + Name: Image + Normalize Range: true + Topic: + Depth: 5 + Durability Policy: Volatile + History Policy: Keep Last + Reliability Policy: Reliable + Value: /robot_1/perception/visual_odometry_img + Value: true + Enabled: true + Global Options: + Background Color: 48; 48; 48 + Fixed Frame: left_camera + Frame Rate: 30 + Name: root + Tools: + - Class: rviz_default_plugins/Interact + Hide Inactive Objects: true + - Class: rviz_default_plugins/MoveCamera + - Class: rviz_default_plugins/Select + - Class: rviz_default_plugins/FocusCamera + - Class: rviz_default_plugins/Measure + Line color: 128; 128; 0 + - Class: rviz_default_plugins/SetInitialPose + Covariance x: 0.25 + Covariance y: 0.25 + Covariance yaw: 0.06853891909122467 + Topic: + Depth: 5 + Durability Policy: Volatile + History Policy: Keep Last + Reliability Policy: Reliable + Value: /initialpose + - Class: rviz_default_plugins/SetGoal + Topic: + Depth: 5 + Durability Policy: Volatile + History Policy: Keep Last + Reliability Policy: Reliable + Value: /goal_pose + - Class: rviz_default_plugins/PublishPoint + Single click: true + Topic: + Depth: 5 + Durability Policy: Volatile + History Policy: Keep Last + Reliability Policy: Reliable + Value: /clicked_point + Transformation: + Current: + Class: rviz_default_plugins/TF + Value: true + Views: + Current: + Class: rviz_default_plugins/Orbit + Distance: 24.403091430664062 + Enable Stereo Rendering: + Stereo Eye Separation: 0.05999999865889549 + Stereo Focal Distance: 1 + Swap Stereo Eyes: false + Value: false + Focal Point: + X: 0 + Y: 0 + Z: 0 + Focal Shape Fixed Size: true + Focal Shape Size: 0.05000000074505806 + Invert Z Axis: false + Name: Current View + Near Clip Distance: 0.009999999776482582 + Pitch: 0.3003981113433838 + Target Frame: + Value: Orbit (rviz) + Yaw: 2.8553879261016846 + Saved: ~ +Window Geometry: + Displays: + collapsed: false + Height: 1016 + Hide Left Dock: false + Hide Right Dock: false + Image: + collapsed: false + QMainWindow State: 000000ff00000000fd0000000400000000000001560000035efc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afc0000003b0000035e000000c700fffffffa000000010100000002fb0000000a0049006d0061006700650000000000ffffffff0000000000000000fb000000100044006900730070006c0061007900730100000000000001560000015600fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c0000026100000001000002760000035efc0200000005fb0000000a0049006d006100670065010000003b0000035e0000002800fffffffb0000000a0049006d006100670065010000003b000003790000000000000000fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003b00000379000000a000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000073a0000003efc0100000002fb0000000800540069006d006501000000000000073a0000025300fffffffb0000000800540069006d00650100000000000004500000000000000000000003620000035e00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + Selection: + collapsed: false + Time: + collapsed: false + Tool Properties: + collapsed: false + Views: + collapsed: false + Width: 1850 + X: 70 + Y: 27 diff --git a/robot/ros_ws/src/autonomy/2_perception/macvo/launch/macvo.launch.xml b/robot/ros_ws/src/autonomy/2_perception/macvo/launch/macvo.launch.xml new file mode 100644 index 000000000..287b11934 --- /dev/null +++ b/robot/ros_ws/src/autonomy/2_perception/macvo/launch/macvo.launch.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/robot/ros_ws/src/autonomy/2_perception/macvo/macvo/MessageFactory.py b/robot/ros_ws/src/autonomy/2_perception/macvo/macvo/MessageFactory.py new file mode 100644 index 000000000..d0c514d3e --- /dev/null +++ b/robot/ros_ws/src/autonomy/2_perception/macvo/macvo/MessageFactory.py @@ -0,0 +1,175 @@ +import std_msgs.msg as std_msgs +import sensor_msgs.msg as sensor_msgs +import geometry_msgs.msg as geometry_msgs +from builtin_interfaces.msg import Time + +import sys +import torch +import pypose as pp +import numpy as np + +_name_to_dtypes = { + "rgb8": (np.uint8, 3), + "rgba8": (np.uint8, 4), + "rgb16": (np.uint16, 3), + "rgba16": (np.uint16, 4), + "bgr8": (np.uint8, 3), + "bgra8": (np.uint8, 4), + "bgr16": (np.uint16, 3), + "bgra16": (np.uint16, 4), + "mono8": (np.uint8, 1), + "mono16": (np.uint16, 1), + + # for bayer image (based on cv_bridge.cpp) + "bayer_rggb8": (np.uint8, 1), + "bayer_bggr8": (np.uint8, 1), + "bayer_gbrg8": (np.uint8, 1), + "bayer_grbg8": (np.uint8, 1), + "bayer_rggb16": (np.uint16, 1), + "bayer_bggr16": (np.uint16, 1), + "bayer_gbrg16": (np.uint16, 1), + "bayer_grbg16": (np.uint16, 1), + + # OpenCV CvMat types + "8UC1": (np.uint8, 1), + "8UC2": (np.uint8, 2), + "8UC3": (np.uint8, 3), + "8UC4": (np.uint8, 4), + "8SC1": (np.int8, 1), + "8SC2": (np.int8, 2), + "8SC3": (np.int8, 3), + "8SC4": (np.int8, 4), + "16UC1": (np.uint16, 1), + "16UC2": (np.uint16, 2), + "16UC3": (np.uint16, 3), + "16UC4": (np.uint16, 4), + "16SC1": (np.int16, 1), + "16SC2": (np.int16, 2), + "16SC3": (np.int16, 3), + "16SC4": (np.int16, 4), + "32SC1": (np.int32, 1), + "32SC2": (np.int32, 2), + "32SC3": (np.int32, 3), + "32SC4": (np.int32, 4), + "32FC1": (np.float32, 1), + "32FC2": (np.float32, 2), + "32FC3": (np.float32, 3), + "32FC4": (np.float32, 4), + "64FC1": (np.float64, 1), + "64FC2": (np.float64, 2), + "64FC3": (np.float64, 3), + "64FC4": (np.float64, 4) +} + + +def to_stamped_pose(pose: pp.LieTensor | torch.Tensor, frame_id: str, time: Time) -> geometry_msgs.PoseStamped: + pose_ = pose.detach().cpu() + out_msg = geometry_msgs.PoseStamped() + out_msg.header = std_msgs.Header() + out_msg.header.stamp = time + out_msg.header.frame_id = frame_id + + out_msg.pose.position.x = pose_[0].item() + out_msg.pose.position.y = pose_[1].item() + out_msg.pose.position.z = pose_[2].item() + + out_msg.pose.orientation.x = pose_[3].item() + out_msg.pose.orientation.y = pose_[4].item() + out_msg.pose.orientation.z = pose_[5].item() + out_msg.pose.orientation.w = pose_[6].item() + return out_msg + + +def from_image(msg: sensor_msgs.Image) -> np.ndarray: + if msg.encoding not in _name_to_dtypes: + raise KeyError(f"Unsupported image encoding {msg.encoding}") + + dtype_name, channel = _name_to_dtypes[msg.encoding] + dtype = np.dtype(dtype_name) + dtype = dtype.newbyteorder('>' if msg.is_bigendian else '<') + shape = (msg.height, msg.width, channel) + + data = np.frombuffer(msg.data, dtype=dtype).reshape(shape) + data.strides = (msg.step, dtype.itemsize * channel, dtype.itemsize) + return data + + +def to_image(arr: np.ndarray, frame_id: str, time: Time, encoding: str = "bgra8") -> sensor_msgs.Image: + if not encoding in _name_to_dtypes: + raise TypeError('Unrecognized encoding {}'.format(encoding)) + + im = sensor_msgs.Image(encoding=encoding) + + # extract width, height, and channels + dtype_class, exp_channels = _name_to_dtypes[encoding] + dtype = np.dtype(dtype_class) + if len(arr.shape) == 2: + im.height, im.width, channels = arr.shape + (1,) + elif len(arr.shape) == 3: + im.height, im.width, channels = arr.shape + else: + raise TypeError("Array must be two or three dimensional") + + # check type and channels + if exp_channels != channels: + raise TypeError("Array has {} channels, {} requires {}".format( + channels, encoding, exp_channels + )) + if dtype_class != arr.dtype.type: + raise TypeError("Array is {}, {} requires {}".format( + arr.dtype.type, encoding, dtype_class + )) + + # make the array contiguous in memory, as mostly required by the format + contig = np.ascontiguousarray(arr) + im.data = contig.tobytes() + im.step = contig.strides[0] + im.header.stamp = time + im.header.frame_id = frame_id + im.is_bigendian = ( + arr.dtype.byteorder == '>' or + arr.dtype.byteorder == '=' and sys.byteorder == 'big' + ) + + return im + + +def to_pointcloud(position: torch.Tensor, keypoints: torch.Tensor, colors: torch.Tensor, frame_id: str, time: Time) -> sensor_msgs.PointCloud: + """ + position should be a Nx3 pytorch Tensor (dtype=float) + keypoints should be a Nx2 pytorch Tensor (dtype=float) + """ + assert position.size(0) == keypoints.size(0) + + out_msg = sensor_msgs.PointCloud() + position_ = position.detach().cpu().numpy() + keypoints_ = keypoints.detach().cpu().numpy() + colors_ = colors.detach().cpu().numpy() + + out_msg.header = std_msgs.Header() + out_msg.header.stamp = time + out_msg.header.frame_id = frame_id + + out_msg.points = [ + geometry_msgs.Point32(x=float(position_[pt_idx, 0]), y=float(position_[pt_idx, 1]), z=float(position_[pt_idx, 2])) + for pt_idx in range(position.size(0)) + ] + out_msg.channels = [ + sensor_msgs.ChannelFloat32( + name="kp_u", values=keypoints_[..., 0].tolist() + ), + sensor_msgs.ChannelFloat32( + name="kp_v", values=keypoints_[..., 1].tolist() + ), + sensor_msgs.ChannelFloat32( + name="r" , values=colors_[..., 0].tolist() + ), + sensor_msgs.ChannelFloat32( + name="g" , values=colors_[..., 1].tolist() + ), + sensor_msgs.ChannelFloat32( + name="b" , values=colors_[..., 2].tolist() + ) + ] + + return out_msg \ No newline at end of file diff --git a/robot/ros_ws/src/autonomy/2_perception/macvo/macvo/__init__.py b/robot/ros_ws/src/autonomy/2_perception/macvo/macvo/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/robot/ros_ws/src/autonomy/2_perception/macvo/macvo/macvo.py b/robot/ros_ws/src/autonomy/2_perception/macvo/macvo/macvo.py new file mode 100644 index 000000000..790c91466 --- /dev/null +++ b/robot/ros_ws/src/autonomy/2_perception/macvo/macvo/macvo.py @@ -0,0 +1,262 @@ +import rclpy +import torch +import numpy as np +import cv2 +from cv_bridge import CvBridge + +from rclpy.node import Node +from sensor_msgs.msg import Image, PointCloud +from geometry_msgs.msg import PoseStamped +from message_filters import ApproximateTimeSynchronizer, Subscriber + +from pathlib import Path +from typing import TYPE_CHECKING +from torchvision.transforms.functional import center_crop, resize +import os, sys +import argparse + +from .MessageFactory import to_stamped_pose, from_image, to_pointcloud, to_image +from sensor_interfaces.srv import GetCameraParams + +# Add the src directory to the Python path +src_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'src')) +sys.path.insert(0, src_path) +if TYPE_CHECKING: + # To make static type checker happy : ) + from src.Odometry.MACVO import MACVO + from src.DataLoader import SourceDataFrame, MetaInfo + from src.Utility.Config import load_config +else: + from Odometry.MACVO import MACVO + from DataLoader import SourceDataFrame, MetaInfo + from Utility.Config import load_config + +class MACVONode(Node): + def __init__(self): + super().__init__("macvo_node") + + self.bridge = None + self.time = None + self.prev_time = None + self.frame = None + self.camera_info = None + self.baseline = None + self.prev_frame = None + self.odometry = None + + # Declare subscriptions and publishers ---------------- + self.declare_parameter("imageL_sub_topic", rclpy.Parameter.Type.STRING) + self.declare_parameter("imageR_sub_topic", rclpy.Parameter.Type.STRING) + imageL_topic = self.get_parameter("imageL_sub_topic").get_parameter_value().string_value + imageR_topic = self.get_parameter("imageR_sub_topic").get_parameter_value().string_value + self.imageL_sub = Subscriber(self, Image, imageL_topic, qos_profile=1) + self.imageR_sub = Subscriber(self, Image, imageR_topic, qos_profile=1) + + self.sync_stereo = ApproximateTimeSynchronizer( + [self.imageL_sub, self.imageR_sub], queue_size=2, slop=0.1 + ) + self.sync_stereo.registerCallback(self.receive_frame) + + self.declare_parameter("pose_pub_topic", rclpy.Parameter.Type.STRING) + pose_topic = self.get_parameter("pose_pub_topic").get_parameter_value().string_value + self.pose_pipe = self.create_publisher(PoseStamped, pose_topic, qos_profile=1) + + self.declare_parameter("point_pub_topic", rclpy.Parameter.Type.STRING) + point_topic = self.get_parameter("point_pub_topic").get_parameter_value().string_value + if point_topic is not None: + self.point_pipe = self.create_publisher(PointCloud, point_topic, qos_profile=1) + else: + self.point_pipe = None + + self.declare_parameter("img_pub_topic", rclpy.Parameter.Type.STRING) + img_stream = self.get_parameter("img_pub_topic").get_parameter_value().string_value + if img_stream is not None: + self.img_pipes = self.create_publisher(Image, img_stream, qos_profile=1) + else: + self.img_pipes = None + + self.frame = "map" + + # Load the MACVO model ------------------------------------ + self.declare_parameter("camera_config", rclpy.Parameter.Type.STRING) + camera_config = self.get_parameter("camera_config").get_parameter_value().string_value + self.get_logger().info(f"Loading macvo model from {camera_config}, this might take a while...") + cfg, _ = load_config(Path(camera_config)) + self.frame_idx = 0 + self.odometry = MACVO.from_config(cfg) + self.declare_parameter("camera_name", rclpy.Parameter.Type.STRING) + self.camera_name = self.get_parameter("camera_name").get_parameter_value().string_value + + self.odometry.register_on_optimize_finish(self.publish_latest_pose) + self.odometry.register_on_optimize_finish(self.publish_latest_points) + # self.odometry.register_on_optimize_finish(self.publish_latest_stereo) + self.odometry.register_on_optimize_finish(self.publish_latest_matches) + self.get_logger().info(f"MACVO Model loaded successfully! Initializing MACVO node ...") + + self.declare_parameter("inference_dim_u", rclpy.Parameter.Type.INTEGER) + self.declare_parameter("inference_dim_v", rclpy.Parameter.Type.INTEGER) + u_dim = self.get_parameter("inference_dim_u").get_parameter_value().integer_value + v_dim = self.get_parameter("inference_dim_v").get_parameter_value().integer_value + + self.time , self.prev_time = None, None + self.meta = None + + # get camera params from the camera param server ---------------------------------------------------------- + self.declare_parameter("camera_param_server_client_topic", rclpy.Parameter.Type.STRING) + camera_param_server_topic = self.get_parameter("camera_param_server_client_topic").get_parameter_value().string_value + self.camera_param_client = self.create_client(GetCameraParams, camera_param_server_topic) + self.get_camera_params(camera_param_server_topic) + + self.bridge = CvBridge() + self.scale_u = float(self.camera_info.width / u_dim) + self.scale_v = float(self.camera_info.height / v_dim) + + self.rot_correction_matrix = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]]) + self.rot_correction_matrix = np.eye(3) + + # self.get_logger().info(f"scale u: {self.scale_u}, scale v: {self.scale_v}, u_dim: {u_dim}, v_dim: {v_dim}, width: {self.camera_info.width}, height: {self.camera_info.height}") + + self.get_logger().info(f"MACVO Node initialized with camera config: {camera_config}") + + def get_camera_params(self, camera_param_server_topic, client_time_out: int = 1): + while True: + while not self.camera_param_client.wait_for_service(timeout_sec=client_time_out): + self.get_logger().error(f"Service {camera_param_server_topic} not available, waiting again...") + req = GetCameraParams.Request() + req.camera_names.append(self.camera_name) + req.camera_types.append("stereo") + future = self.camera_param_client.call_async(req) + rclpy.spin_until_future_complete(self, future) + if future.result() is not None: + self.camera_info = future.result().camera_infos[0] + self.baseline = future.result().baselines[0] + self.get_logger().info(f"Received camera config from server!") + return + else: + self.get_logger().error(f"Failed to get camera params from server") + + def publish_latest_pose(self, system: MACVO): + pose = system.gmap.frames.pose[-1] + frame = self.frame + time = self.time if self.prev_time is None else self.prev_time + assert frame is not None and time is not None + + out_msg = to_stamped_pose(pose, frame, time) + + # Correction for the camera coordinate frame + out_msg.pose.position.x, out_msg.pose.position.y, out_msg.pose.position.z = np.dot(self.rot_correction_matrix, np.array([out_msg.pose.position.x, out_msg.pose.position.y, out_msg.pose.position.z])) + + self.pose_pipe.publish(out_msg) + + def publish_latest_points(self, system: MACVO): + if self.point_pipe is None: return + + latest_frame = system.gmap.frames[-1] + latest_points = system.gmap.get_frame_points(latest_frame) + latest_obs = system.gmap.get_frame_observes(latest_frame) + + frame = self.frame + time = self.time if self.prev_time is None else self.prev_time + assert frame is not None and time is not None + + out_msg = to_pointcloud( + position = latest_points.position, + keypoints = latest_obs.pixel_uv, + frame_id = frame, + colors = latest_points.color, + time = time + ) + + # Correction for the camera coordinate frame + for pt in out_msg.points: + pt.x, pt.y, pt.z = np.dot(self.rot_correction_matrix, np.array([pt.x, pt.y, pt.z])) + + self.point_pipe.publish(out_msg) + + def publish_latest_stereo(self, system: MACVO): + if self.img_pipes is None: return + + source = system.prev_frame + if source is None: return + frame = self.frame + time = self.time if self.prev_time is None else self.prev_time + assert frame is not None and time is not None + + img = (source.imageL[0].permute(1, 2, 0).numpy() * 255).copy().astype(np.uint8) + msg = self.bridge.cv2_to_imgmsg(img, encoding="bgr8") + msg.header.frame_id = frame + msg.header.stamp = time + + self.img_pipes.publish(msg) + + def publish_latest_matches(self, system: MACVO): + if self.img_pipes is None: return + + source = system.prev_frame + if source is None: return + frame = self.frame + time = self.time if self.prev_time is None else self.prev_time + assert frame is not None and time is not None + latest_frame = system.gmap.frames[-1] + + # pixels are given from the reduced image size, need to be scaled back to original size + pixels_uv = system.gmap.get_frame_observes(latest_frame).pixel_uv.int().numpy() + img = (source.imageL[0].permute(1, 2, 0).numpy() * 255).copy().astype(np.uint8) + if pixels_uv.size > 0: + for i in range(pixels_uv.shape[0]): + x, y = pixels_uv[i] + img = cv2.circle(img, (x, y), 2, (0, 255, 0), -1) + msg = self.bridge.cv2_to_imgmsg(img, encoding="bgr8") + msg.header.frame_id = frame + msg.header.stamp = time + self.img_pipes.publish(msg) + + def receive_frame(self, msg_L: Image, msg_R: Image) -> None: + if self.frame is None or self.bridge is None: + self.get_logger().error("MACVO Node not initialized yet, skipping frame") + return + + self.prev_frame, self.prev_time = self.frame, self.time + + self.frame = msg_L.header.frame_id + + self.time = msg_L.header.stamp + imageL = self.bridge.imgmsg_to_cv2(msg_L, desired_encoding="passthrough") + imageR = self.bridge.imgmsg_to_cv2(msg_R, desired_encoding="passthrough") + + camera_fx, camera_fy = self.camera_info.k[0], self.camera_info.k[4] + camera_cx, camera_cy = self.camera_info.k[2], self.camera_info.k[5] + meta = MetaInfo( + idx=self.frame_idx, + baseline=self.baseline, + width=self.camera_info.width, + height=self.camera_info.height, + K=torch.tensor([[camera_fx, 0., camera_cx], + [0., camera_fy, camera_cy], + [0., 0., 1.]]).float()) + + frame = SourceDataFrame( + meta=meta, + imageL=torch.tensor(imageL)[..., :3].float().permute(2, 0, 1).unsqueeze(0) / 255., + imageR=torch.tensor(imageR)[..., :3].float().permute(2, 0, 1).unsqueeze(0) / 255., + imu=None, + gtFlow=None, gtDepth=None, gtPose=None, flowMask=None + ).resize_image(scale_u=self.scale_u, scale_v=self.scale_v) + + start_time = self.get_clock().now() + self.odometry.run(frame) + end_time = self.get_clock().now() + # self.get_logger().info(f"Frame {self.frame_idx} processed in {end_time - start_time}") + self.frame_idx += 1 + +def main(): + rclpy.init() + node = MACVONode() + rclpy.spin(node) + + node.destroy_node() + rclpy.shutdown() + + +if __name__ == '__main__': + main() diff --git a/robot/ros_ws/src/autonomy/2_perception/macvo/macvo/src b/robot/ros_ws/src/autonomy/2_perception/macvo/macvo/src new file mode 160000 index 000000000..6c02b6ae9 --- /dev/null +++ b/robot/ros_ws/src/autonomy/2_perception/macvo/macvo/src @@ -0,0 +1 @@ +Subproject commit 6c02b6ae9c2817b60c65d52485ce117111dd03ab diff --git a/robot/ros_ws/src/autonomy/2_perception/macvo/package.xml b/robot/ros_ws/src/autonomy/2_perception/macvo/package.xml new file mode 100644 index 000000000..b5019c082 --- /dev/null +++ b/robot/ros_ws/src/autonomy/2_perception/macvo/package.xml @@ -0,0 +1,20 @@ + + + + macvo + 0.0.0 + TODO: Package description + yutian + TODO: License declaration + + sensor_interfaces + + ament_copyright + ament_flake8 + ament_pep257 + python3-pytest + + + ament_python + + diff --git a/robot/ros_ws/src/autonomy/2_perception/macvo/resource/macvo b/robot/ros_ws/src/autonomy/2_perception/macvo/resource/macvo new file mode 100644 index 000000000..e69de29bb diff --git a/robot/ros_ws/src/autonomy/2_perception/macvo/setup.cfg b/robot/ros_ws/src/autonomy/2_perception/macvo/setup.cfg new file mode 100644 index 000000000..9c4fab297 --- /dev/null +++ b/robot/ros_ws/src/autonomy/2_perception/macvo/setup.cfg @@ -0,0 +1,4 @@ +[develop] +script_dir=$base/lib/macvo +[install] +install_scripts=$base/lib/macvo diff --git a/robot/ros_ws/src/autonomy/2_perception/macvo/setup.py b/robot/ros_ws/src/autonomy/2_perception/macvo/setup.py new file mode 100644 index 000000000..5ba2f6905 --- /dev/null +++ b/robot/ros_ws/src/autonomy/2_perception/macvo/setup.py @@ -0,0 +1,39 @@ +import os +from setuptools import find_packages, setup + +package_name = 'macvo' + +def package_files(directory): + paths = [] + for (path, directories, filenames) in os.walk(directory): + for filename in filenames: + paths.append(os.path.join('..', path, filename)) + return paths + +extra_files = package_files('macvo/src') + +setup( + name=package_name, + version='0.0.0', + packages=[package_name], + data_files=[ + ('share/ament_index/resource_index/packages', + ['resource/' + package_name]), + ('share/' + package_name, ['package.xml']), + ('share/' + package_name, ['launch/macvo.launch.xml']), + ('share/' + package_name + '/config', ['config/interface_config.yaml','config/model_config.yaml']), + ], + package_data={package_name: extra_files}, + install_requires=['setuptools'], + zip_safe=True, + maintainer='yutian', + maintainer_email='markchenyutian@gmail.com', + description='TODO: Package description', + license='TODO: License declaration', + tests_require=['pytest'], + entry_points={ + 'console_scripts': [ + 'macvo = macvo.macvo:main', + ], + }, +) diff --git a/robot/ros_ws/src/autonomy/2_perception/macvo/test/test_copyright.py b/robot/ros_ws/src/autonomy/2_perception/macvo/test/test_copyright.py new file mode 100644 index 000000000..97a39196e --- /dev/null +++ b/robot/ros_ws/src/autonomy/2_perception/macvo/test/test_copyright.py @@ -0,0 +1,25 @@ +# Copyright 2015 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ament_copyright.main import main +import pytest + + +# Remove the `skip` decorator once the source file(s) have a copyright header +@pytest.mark.skip(reason='No copyright header has been placed in the generated source file.') +@pytest.mark.copyright +@pytest.mark.linter +def test_copyright(): + rc = main(argv=['.', 'test']) + assert rc == 0, 'Found errors' diff --git a/robot/ros_ws/src/autonomy/2_perception/macvo/test/test_flake8.py b/robot/ros_ws/src/autonomy/2_perception/macvo/test/test_flake8.py new file mode 100644 index 000000000..27ee1078f --- /dev/null +++ b/robot/ros_ws/src/autonomy/2_perception/macvo/test/test_flake8.py @@ -0,0 +1,25 @@ +# Copyright 2017 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ament_flake8.main import main_with_errors +import pytest + + +@pytest.mark.flake8 +@pytest.mark.linter +def test_flake8(): + rc, errors = main_with_errors(argv=[]) + assert rc == 0, \ + 'Found %d code style errors / warnings:\n' % len(errors) + \ + '\n'.join(errors) diff --git a/robot/ros_ws/src/autonomy/2_perception/macvo/test/test_pep257.py b/robot/ros_ws/src/autonomy/2_perception/macvo/test/test_pep257.py new file mode 100644 index 000000000..b234a3840 --- /dev/null +++ b/robot/ros_ws/src/autonomy/2_perception/macvo/test/test_pep257.py @@ -0,0 +1,23 @@ +# Copyright 2015 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ament_pep257.main import main +import pytest + + +@pytest.mark.linter +@pytest.mark.pep257 +def test_pep257(): + rc = main(argv=['.', 'test']) + assert rc == 0, 'Found code style errors / warnings' diff --git a/robot/ros_ws/src/autonomy/2_perception/perception_bringup/launch/perception.launch.xml b/robot/ros_ws/src/autonomy/2_perception/perception_bringup/launch/perception.launch.xml index 0363a73b3..e53991382 100644 --- a/robot/ros_ws/src/autonomy/2_perception/perception_bringup/launch/perception.launch.xml +++ b/robot/ros_ws/src/autonomy/2_perception/perception_bringup/launch/perception.launch.xml @@ -2,6 +2,25 @@ + + + + + + + + + + + + + + diff --git a/robot/ros_ws/src/robot_bringup/rviz/robot.rviz b/robot/ros_ws/src/robot_bringup/rviz/robot.rviz index 5479e91b8..6482cd95e 100644 --- a/robot/ros_ws/src/robot_bringup/rviz/robot.rviz +++ b/robot/ros_ws/src/robot_bringup/rviz/robot.rviz @@ -5,7 +5,7 @@ Panels: Property Tree Widget: Expanded: - /TF1/Frames1 - - /Sensors1 + - /Perception1 - /Local1 - /Local1/DROAN1 - /Local1/DROAN1/Trimmed Global Plan for DROAN1 @@ -13,7 +13,7 @@ Panels: - /Local1/Trajectory Controller1 - /Global1 Splitter Ratio: 0.590062141418457 - Tree Height: 1085 + Tree Height: 701 - Class: rviz_common/Selection Name: Selection - Class: rviz_common/Tool Properties @@ -103,6 +103,8 @@ Visualization Manager: world: map_FLU: base_link_ground_truth: + {} + map: base_link: base_link_frd: {} @@ -113,7 +115,6 @@ Visualization Manager: {} ouster: {} - map: base_link_stabilized: {} look_ahead_point: @@ -233,6 +234,44 @@ Visualization Manager: Value: false Enabled: true Name: Sensors + - Class: rviz_common/Group + Displays: + - Class: rviz_default_plugins/Image + Enabled: true + Max Value: 1 + Median window: 5 + Min Value: 0 + Name: Visual Odometry Features + Normalize Range: true + Topic: + Depth: 5 + Durability Policy: Volatile + History Policy: Keep Last + Reliability Policy: Reliable + Value: /robot_1/perception/visual_odometry_img + Value: true + - Alpha: 1 + Axes Length: 1 + Axes Radius: 0.10000000149011612 + Class: rviz_default_plugins/Pose + Color: 255; 25; 0 + Enabled: true + Head Length: 0.30000001192092896 + Head Radius: 0.10000000149011612 + Name: Visual Odometry + Shaft Length: 1 + Shaft Radius: 0.05000000074505806 + Shape: Arrow + Topic: + Depth: 5 + Durability Policy: Volatile + Filter size: 10 + History Policy: Keep Last + Reliability Policy: Reliable + Value: /robot_1/perception/visual_odometry_pose + Value: true + Enabled: true + Name: Perception - Class: rviz_common/Group Displays: - Class: rviz_common/Group @@ -361,17 +400,17 @@ Visualization Manager: Value: trajectory_controller/trajectory_vis Value: true - Class: rviz_default_plugins/MarkerArray - Enabled: true + Enabled: false Name: Traj Debug Namespaces: - default: true + {} Topic: Depth: 5 Durability Policy: Volatile History Policy: Keep Last Reliability Policy: Reliable Value: trajectory_controller/trajectory_controller_debug_markers - Value: true + Value: false Enabled: true Name: Trajectory Controller Enabled: true @@ -467,25 +506,25 @@ Visualization Manager: Views: Current: Class: rviz_default_plugins/Orbit - Distance: 18.401203155517578 + Distance: 17.393077850341797 Enable Stereo Rendering: Stereo Eye Separation: 0.05999999865889549 Stereo Focal Distance: 1 Swap Stereo Eyes: false Value: false Focal Point: - X: -1.8554298877716064 - Y: 5.675510883331299 - Z: -2.8006136417388916 + X: -2.118461847305298 + Y: 3.126241683959961 + Z: -0.541254997253418 Focal Shape Fixed Size: false Focal Shape Size: 0.05000000074505806 Invert Z Axis: false Name: Current View Near Clip Distance: 0.009999999776482582 - Pitch: 0.4553954601287842 + Pitch: 0.3603954613208771 Target Frame: Value: Orbit (rviz) - Yaw: 0.07311739027500153 + Yaw: 0.3381178677082062 Saved: ~ Window Geometry: Displays: @@ -497,7 +536,7 @@ Window Geometry: Height: 1376 Hide Left Dock: false Hide Right Dock: false - QMainWindow State: 000000ff00000000fd0000000400000000000001e5000004c6fc0200000009fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003b000004c6000000c700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d00610067006500000002eb000000c9000000000000000000000001000001f6000004c6fc0200000007fb00000016004c006500660074002000430061006d006500720061010000003b000001880000000000000000fb00000014004c006500660074002000440065007000740068010000003b0000016a0000000000000000fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000001c00460072006f006e00740020004c0065006600740020005200470042010000003b0000020e0000002800fffffffb0000002000460072006f006e00740020004c006500660074002000440065007000740068010000024f000002b20000002800fffffffb0000000a0056006900650077007300000000fd000001a8000000a000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000009ba0000003efc0100000002fb0000000800540069006d00650100000000000009ba0000025300fffffffb0000000800540069006d00650100000000000004500000000000000000000005d3000004c600000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + QMainWindow State: 000000ff00000000fd0000000400000000000001e5000004c6fc020000000afb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003b00000346000000c700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000003000560069007300750061006c0020004f0064006f006d006500740072007900200046006500610074007500720065007301000003870000017a0000002800fffffffb0000000a0049006d00610067006501000003ad0000016f000000000000000000000001000001f6000004c6fc0200000007fb00000016004c006500660074002000430061006d006500720061010000003b000001880000000000000000fb00000014004c006500660074002000440065007000740068010000003b0000016a0000000000000000fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000001c00460072006f006e00740020004c0065006600740020005200470042010000003b0000020e0000002800fffffffb0000002000460072006f006e00740020004c006500660074002000440065007000740068010000024f000002b20000002800fffffffb0000000a0056006900650077007300000000fd000001a8000000a000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000009ba0000003efc0100000002fb0000000800540069006d00650100000000000009ba0000025300fffffffb0000000800540069006d00650100000000000004500000000000000000000005d3000004c600000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 Selection: collapsed: false Time: @@ -506,6 +545,8 @@ Window Geometry: collapsed: false Views: collapsed: false + Visual Odometry Features: + collapsed: false Width: 2490 X: 1990 Y: 27 diff --git a/simulation/isaac-sim/docker/Dockerfile.isaac-ros b/simulation/isaac-sim/docker/Dockerfile.isaac-ros index 1c21accd4..bc0f2aa0e 100644 --- a/simulation/isaac-sim/docker/Dockerfile.isaac-ros +++ b/simulation/isaac-sim/docker/Dockerfile.isaac-ros @@ -1,79 +1,111 @@ +ARG ISAAC_VERSION="4.2.0" # expects context to be the root of the repository, i.e. AirStack/. this is so we can access AirStack/ros_ws/ -FROM nvcr.io/nvidia/isaac-sim:4.2.0 +FROM nvcr.io/nvidia/isaac-sim:${ISAAC_VERSION} +ARG ISAAC_VERSION WORKDIR /isaac-sim -# isaac's ros2 launch run_isaacsim.launch.py hardcodes to search in this path, so we have to put the executables here -RUN mkdir -p /root/.local/share/ov/pkg/ -RUN ln -s /isaac-sim /root/.local/share/ov/pkg/isaac-sim-4.2.0 # allows us to run isaac-sim as root ENV OMNI_KIT_ALLOW_ROOT=1 -# setup timezone -RUN echo 'Etc/UTC' > /etc/timezone && \ - ln -s /usr/share/zoneinfo/Etc/UTC /etc/localtime && \ - apt-get update && \ - apt-get install -q -y --no-install-recommends tzdata && \ - rm -rf /var/lib/apt/lists/* +# from https://github.com/athackst/dockerfiles/blob/main/ros2/humble.Dockerfile +ENV DEBIAN_FRONTEND=noninteractive + +# Install language +RUN apt-get update && apt-get install -y \ + locales \ + && locale-gen en_US.UTF-8 \ + && update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 \ + && rm -rf /var/lib/apt/lists/* +ENV LANG=en_US.UTF-8 + +# Install timezone +RUN ln -fs /usr/share/zoneinfo/UTC /etc/localtime \ + && export DEBIAN_FRONTEND=noninteractive \ + && apt-get update \ + && apt-get install -y tzdata \ + && dpkg-reconfigure --frontend noninteractive tzdata \ + && rm -rf /var/lib/apt/lists/* + +RUN apt-get update && apt-get -y upgrade \ + && rm -rf /var/lib/apt/lists/* -# install packages -RUN apt-get update && apt-get install -q -y --no-install-recommends \ - dirmngr \ +# Install common programs +RUN apt-get update && apt-get install -y --no-install-recommends \ + curl \ gnupg2 \ - unzip \ - tmux \ + lsb-release \ + sudo \ + software-properties-common \ + wget \ && rm -rf /var/lib/apt/lists/* -# setup keys -RUN set -eux; \ - key='C1CF6E31E6BADE8868B172B4F42ED6FBAB17C654'; \ - export GNUPGHOME="$(mktemp -d)"; \ - gpg --batch --keyserver keyserver.ubuntu.com --recv-keys "$key"; \ - mkdir -p /usr/share/keyrings; \ - gpg --batch --export "$key" > /usr/share/keyrings/ros2-latest-archive-keyring.gpg; \ - gpgconf --kill all; \ - rm -rf "$GNUPGHOME" - -# setup sources.list -RUN echo "deb [ signed-by=/usr/share/keyrings/ros2-latest-archive-keyring.gpg ] http://packages.ros.org/ros2/ubuntu jammy main" > /etc/apt/sources.list.d/ros2-latest.list - -# setup environment -ENV LANG=C.UTF-8 -ENV LC_ALL=C.UTF-8 -ENV ROS_DISTRO=humble - -# Install ROS2 packages -RUN apt update && apt install -y --no-install-recommends curl emacs vim nano tmux gdb xterm \ - cmake \ - git \ +# Install ROS2 +RUN sudo add-apt-repository universe \ + && curl -sSL https://raw.githubusercontent.com/ros/rosdistro/master/ros.key -o /usr/share/keyrings/ros-archive-keyring.gpg \ + && echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/ros-archive-keyring.gpg] http://packages.ros.org/ros2/ubuntu $(. /etc/os-release && echo $UBUNTU_CODENAME) main" | sudo tee /etc/apt/sources.list.d/ros2.list > /dev/null \ + && apt-get update && apt upgrade -y && apt-get install -y --no-install-recommends \ ros-humble-desktop \ - ros-dev-tools \ + python3-argcomplete \ + && rm -rf /var/lib/apt/lists/* + +ENV ROS_DISTRO=humble +ENV AMENT_PREFIX_PATH=/opt/ros/humble +ENV COLCON_PREFIX_PATH=/opt/ros/humble +ENV LD_LIBRARY_PATH=/opt/ros/humble/lib/x86_64-linux-gnu:/opt/ros/humble/lib +ENV PATH=/opt/ros/humble/bin:$PATH +ENV PYTHONPATH=/opt/ros/humble/local/lib/python3.10/dist-packages:/opt/ros/humble/lib/python3.10/site-packages +ENV ROS_PYTHON_VERSION=3 +ENV ROS_VERSION=2 +ENV ROS_AUTOMATIC_DISCOVERY_RANGE=SUBNET +ENV DEBIAN_FRONTEND= + +# ======================== +# Install dev tools +RUN apt update && apt install -y \ + vim nano wget curl tree \ + cmake build-essential \ + less htop jq \ python3-pip \ python3-rosdep \ + tmux \ + gdb + +# Install any additional ROS2 packages +RUN apt update -y && apt install -y \ + ros-dev-tools \ + ros-humble-mavros \ ros-humble-tf2* \ - ros-humble-mavros \ - ros-humble-ackermann-msgs + ros-humble-topic-tools \ + ros-humble-grid-map \ + ros-humble-domain-bridge \ + ros-humble-ackermann-msgs \ + libcgal-dev \ + python3-colcon-common-extensions RUN /opt/ros/humble/lib/mavros/install_geographiclib_datasets.sh + RUN /isaac-sim/python.sh -m pip install git+https://github.com/dronekit/dronekit-python#egg=dronekit RUN pip install PyYAML mavproxy tmuxp scipy +ARG isaac_dir_name="IsaacSim-ros_workspaces-IsaacSim-${ISAAC_VERSION}" # Install Isaac Sim ROS2 workspace so that we can run the isaacsim ROS package RUN cd /tmp/ && \ - curl -L -O https://github.com/isaac-sim/IsaacSim-ros_workspaces/archive/main.zip && \ - unzip main.zip && \ - mv IsaacSim-ros_workspaces-main/humble_ws /humble_ws && \ + curl -L -O https://github.com/isaac-sim/IsaacSim-ros_workspaces/archive/refs/tags/IsaacSim-${ISAAC_VERSION}.zip && \ + ls -l && \ + unzip IsaacSim-${ISAAC_VERSION}.zip && \ + mv ${isaac_dir_name}/humble_ws /humble_ws && \ cd /humble_ws && \ . /opt/ros/humble/setup.sh && \ colcon build --symlink-install && \ - rm -rf /tmp/IsaacSim-ros_workspaces-main + rm -rf /tmp/IsaacSim-${ISAAC_VERSION}.zip /tmp/${isaac_dir_name} # copy over the AscentAeroSystemsSITLPackage COPY sitl_integration/AscentAeroSystemsSITLPackage /AscentAeroSystemsSITLPackage -# the user.config.json file specifies to auto load the SITL extensions -COPY docker/user.config.json /root/.local/share/ov/data/Kit/Isaac-Sim/4.1/user.config.json +# isaac's ros2 launch run_isaacsim.launch.py hardcodes to search in this path, so we have to put the executables here +RUN mkdir -p /root/.local/share/ov/pkg/ && ln -s /isaac-sim /root/.local/share/ov/pkg/isaac-sim-${ISAAC_VERSION} COPY docker/fastdds.xml /isaac-sim/fastdds.xml diff --git a/simulation/isaac-sim/docker/docker-compose.yaml b/simulation/isaac-sim/docker/docker-compose.yaml index 10fee695b..a238dd6e8 100644 --- a/simulation/isaac-sim/docker/docker-compose.yaml +++ b/simulation/isaac-sim/docker/docker-compose.yaml @@ -52,6 +52,7 @@ services: - ~/docker/isaac-sim/logs:/root/.nvidia-omniverse/logs:rw - ~/docker/isaac-sim/data:/root/.local/share/ov/data:rw - ~/docker/isaac-sim/documents:/root/Documents:rw + # IMPORTANT: set the version number without the trailing .0 - ./user.config.json:/root/.local/share/ov/data/Kit/Isaac-Sim/4.2/user.config.json:rw - ./ui.py:/isaac-sim/kit/exts/omni.kit.widget.nucleus_connector/omni/kit/widget/nucleus_connector/ui.py:rw # developer stuff