diff --git a/docs/tutorials/custom_robot_import.md b/docs/tutorials/custom_robot_import.md index c05c25aac..e2685138d 100644 --- a/docs/tutorials/custom_robot_import.md +++ b/docs/tutorials/custom_robot_import.md @@ -494,28 +494,6 @@ Now that we have the USD file for the robot, let's write our own robot class. Fo def wheel_axle_length(self): return 0.330 - @property - def finger_lengths(self): - return {self.default_arm: 0.04} - - @property - def assisted_grasp_start_points(self): - return { - self.default_arm: [ - GraspingPoint(link_name="link_gripper_finger_right", position=th.tensor([0.013, 0.0, 0.01])), - GraspingPoint(link_name="link_gripper_finger_right", position=th.tensor([-0.01, 0.0, 0.009])), - ] - } - - @property - def assisted_grasp_end_points(self): - return { - self.default_arm: [ - GraspingPoint(link_name="link_gripper_finger_left", position=th.tensor([0.013, 0.0, 0.01])), - GraspingPoint(link_name="link_gripper_finger_left", position=th.tensor([-0.01, 0.0, 0.009])), - ] - } - @property def disabled_collision_pairs(self): return [ diff --git a/docs/tutorials/demo_collection.md b/docs/tutorials/demo_collection.md index bd52e5ba9..3fe5be681 100644 --- a/docs/tutorials/demo_collection.md +++ b/docs/tutorials/demo_collection.md @@ -101,7 +101,7 @@ playback_env = DataPlaybackWrapper.create_from_hdf5( ) # Playback the entire dataset and record observations -playback_env.playback_dataset(record=True) +playback_env.playback_dataset(record_data=True) # Save the recorded playback data playback_env.save_data() diff --git a/omnigibson/action_primitives/starter_semantic_action_primitives.py b/omnigibson/action_primitives/starter_semantic_action_primitives.py index 7cb5a235b..875c9a148 100644 --- a/omnigibson/action_primitives/starter_semantic_action_primitives.py +++ b/omnigibson/action_primitives/starter_semantic_action_primitives.py @@ -102,7 +102,7 @@ m.LOW_PRECISION_DIST_THRESHOLD = 0.1 m.LOW_PRECISION_ANGLE_THRESHOLD = 0.2 -m.JOINT_POS_DIFF_THRESHOLD = 0.01 +m.JOINT_POS_DIFF_THRESHOLD = 0.005 m.LOW_PRECISION_JOINT_POS_DIFF_THRESHOLD = 0.05 m.JOINT_CONTROL_MIN_ACTION = 0.0 m.MAX_ALLOWED_JOINT_ERROR_FOR_LINEAR_MOTION = math.radians(45) @@ -506,7 +506,10 @@ def _sample_grasp_pose(self, obj): # Identity quaternion for top-down grasping (x-forward, y-right, z-down) approach_dir = T.quat2mat(grasp_quat) @ th.tensor([0.0, 0.0, -1.0]) - pregrasp_offset = self.robot.finger_lengths[self.arm] / 2.0 + m.GRASP_APPROACH_DISTANCE + avg_finger_offset = th.mean( + th.tensor([length for length in self.robot.eef_to_fingertip_lengths[self.arm].values()]) + ) + pregrasp_offset = avg_finger_offset + m.GRASP_APPROACH_DISTANCE pregrasp_pos = grasp_pos - approach_dir * pregrasp_offset @@ -581,7 +584,7 @@ def _grasp(self, obj): # By default, it's NOT the z-axis of the world frame unless `project_pose_to_goal_frame=False` is set in curobo. # For sticky grasping, we also need to ignore the object during motion planning because the fingers are already closed. yield from self._move_hand( - grasp_pose, motion_constraint=[1, 1, 1, 1, 1, 0], stop_on_contact=True, ignore_objects=[obj] + grasp_pose, motion_constraint=[1, 1, 1, 1, 1, 0], stop_on_ag=True, ignore_objects=[obj] ) elif self.robot.grasping_mode == "assisted": indented_print("Assisted grasping: approach") @@ -853,6 +856,7 @@ def _move_hand( self, target_pose, stop_on_contact=False, + stop_on_ag=False, motion_constraint=None, low_precision=False, lock_auxiliary_arm=False, @@ -864,6 +868,7 @@ def _move_hand( Args: target_pose (Tuple[th.tensor, th.tensor]): target pose to reach for the default end-effector in the world frame stop_on_contact (bool): Whether to stop executing motion plan if contact is detected + stop_on_ag (bool): Whether to stop executing motion plan if assisted grasping is activated motion_constraint (MotionConstraint): Motion constraint for the motion low_precision (bool): Whether to use low precision for the motion lock_auxiliary_arm (bool): Whether to lock the other arm in place @@ -906,7 +911,9 @@ def _move_hand( ) indented_print(f"Plan has {len(q_traj)} steps") - yield from self._execute_motion_plan(q_traj, stop_on_contact=stop_on_contact, low_precision=low_precision) + yield from self._execute_motion_plan( + q_traj, stop_on_contact=stop_on_contact, stop_on_ag=stop_on_ag, low_precision=low_precision + ) def _plan_joint_motion( self, @@ -972,7 +979,13 @@ def _plan_joint_motion( return q_traj def _execute_motion_plan( - self, q_traj, stop_on_contact=False, ignore_failure=False, low_precision=False, ignore_physics=False + self, + q_traj, + stop_on_contact=False, + stop_on_ag=False, + ignore_failure=False, + low_precision=False, + ignore_physics=False, ): for i, joint_pos in enumerate(q_traj): # indented_print(f"Executing motion plan step {i + 1}/{len(q_traj)}") @@ -992,6 +1005,12 @@ def _execute_motion_plan( articulation_control_idx = th.cat(articulation_control_idx) for j in range(m.MAX_STEPS_FOR_JOINT_MOTION): # indented_print(f"Step {j + 1}/{m.MAX_STEPS_FOR_JOINT_MOTION}") + + # We need to call @q_to_action for every step because as the robot base moves, the same base joint_pos will be + # converted to different actions, since HolonomicBaseJointController accepts an action in the robot local frame. + action = self.robot.q_to_action(joint_pos) + yield self._postprocess_action(action) + current_joint_pos = self.robot.get_joint_positions() joint_pos_diff = joint_pos - current_joint_pos base_joint_diff = joint_pos_diff[self.robot.base_control_idx] @@ -1014,14 +1033,10 @@ def _execute_motion_plan( collision_detected = detect_robot_collision_in_sim(self.robot) if stop_on_contact and collision_detected: - indented_print(f"Collision detected at step {i + 1}/{len(q_traj)}") return - # We need to call @q_to_action for every step because as the robot base moves, the same base joint_pos will be - # converted to different actions, since HolonomicBaseJointController accepts an action in the robot local frame. - action = self.robot.q_to_action(joint_pos) - - yield self._postprocess_action(action) + if stop_on_ag and self._get_obj_in_hand() is not None: + return if not ignore_failure: if not base_target_reached: @@ -1334,13 +1349,13 @@ def _move_fingers_to_limit(self, limit_type): target_joint_positions = self._get_joint_position_with_fingers_at_limit(limit_type) action = self.robot.q_to_action(target_joint_positions) for _ in range(m.MAX_STEPS_FOR_GRASP_OR_RELEASE): - current_joint_positinos = self.robot.get_joint_positions() - if th.allclose(current_joint_positinos, target_joint_positions, atol=0.005): + yield self._postprocess_action(action) + current_joint_positions = self.robot.get_joint_positions() + if th.allclose(current_joint_positions, target_joint_positions, atol=m.JOINT_POS_DIFF_THRESHOLD): break elif limit_type == "lower" and self._get_obj_in_hand() is not None: # If we are grasping an object, we should stop when object is detected in hand break - yield self._postprocess_action(action) def _execute_grasp(self): """ diff --git a/omnigibson/configs/fetch_behavior.yaml b/omnigibson/configs/fetch_behavior.yaml index cfde1f7d0..ce0e831f1 100644 --- a/omnigibson/configs/fetch_behavior.yaml +++ b/omnigibson/configs/fetch_behavior.yaml @@ -40,6 +40,8 @@ scene: robots: - type: Fetch obs_modalities: [scan, rgb, depth] + include_sensor_names: null + exclude_sensor_names: null scale: 1.0 self_collision: false action_normalize: true diff --git a/omnigibson/configs/r1_primitives.yaml b/omnigibson/configs/r1_primitives.yaml index aed776401..147c94b52 100644 --- a/omnigibson/configs/r1_primitives.yaml +++ b/omnigibson/configs/r1_primitives.yaml @@ -32,6 +32,8 @@ scene: robots: - type: R1 obs_modalities: [rgb] + include_sensor_names: null + exclude_sensor_names: null scale: 1.0 self_collisions: true action_normalize: false diff --git a/omnigibson/configs/robots/fetch.yaml b/omnigibson/configs/robots/fetch.yaml index bb1a53b1d..30af214a4 100644 --- a/omnigibson/configs/robots/fetch.yaml +++ b/omnigibson/configs/robots/fetch.yaml @@ -17,6 +17,8 @@ robot: self_collision: true grasping_mode: physical default_arm_pose: vertical + include_sensor_names: null + exclude_sensor_names: null sensor_config: VisionSensor: sensor_kwargs: diff --git a/omnigibson/configs/robots/freight.yaml b/omnigibson/configs/robots/freight.yaml index 7431b464d..e4d5d6c7d 100644 --- a/omnigibson/configs/robots/freight.yaml +++ b/omnigibson/configs/robots/freight.yaml @@ -10,6 +10,8 @@ robot: base_name: null scale: 1.0 self_collision: false + include_sensor_names: null + exclude_sensor_names: null sensor_config: VisionSensor: sensor_kwargs: diff --git a/omnigibson/configs/robots/husky.yaml b/omnigibson/configs/robots/husky.yaml index d35680b5b..0de969c48 100644 --- a/omnigibson/configs/robots/husky.yaml +++ b/omnigibson/configs/robots/husky.yaml @@ -10,6 +10,8 @@ robot: base_name: null scale: 1.0 self_collision: false + include_sensor_names: null + exclude_sensor_names: null sensor_config: VisionSensor: sensor_kwargs: diff --git a/omnigibson/configs/robots/locobot.yaml b/omnigibson/configs/robots/locobot.yaml index c36e144f3..6a067926f 100644 --- a/omnigibson/configs/robots/locobot.yaml +++ b/omnigibson/configs/robots/locobot.yaml @@ -10,6 +10,8 @@ robot: base_name: null scale: 1.0 self_collision: false + include_sensor_names: null + exclude_sensor_names: null sensor_config: VisionSensor: sensor_kwargs: diff --git a/omnigibson/configs/robots/turtlebot.yaml b/omnigibson/configs/robots/turtlebot.yaml index 0431a430f..ec3b3d2ab 100644 --- a/omnigibson/configs/robots/turtlebot.yaml +++ b/omnigibson/configs/robots/turtlebot.yaml @@ -10,6 +10,8 @@ robot: base_name: null scale: 1.0 self_collision: false + include_sensor_names: null + exclude_sensor_names: null sensor_config: VisionSensor: sensor_kwargs: diff --git a/omnigibson/configs/tiago_primitives.yaml b/omnigibson/configs/tiago_primitives.yaml index 94d050ecb..714ee6d72 100644 --- a/omnigibson/configs/tiago_primitives.yaml +++ b/omnigibson/configs/tiago_primitives.yaml @@ -33,6 +33,8 @@ scene: robots: - type: Tiago obs_modalities: [rgb] + include_sensor_names: null + exclude_sensor_names: null scale: 1.0 self_collisions: true action_normalize: false diff --git a/omnigibson/configs/turtlebot_nav.yaml b/omnigibson/configs/turtlebot_nav.yaml index 844208e90..957ce9c5f 100644 --- a/omnigibson/configs/turtlebot_nav.yaml +++ b/omnigibson/configs/turtlebot_nav.yaml @@ -30,6 +30,8 @@ scene: robots: - type: Turtlebot obs_modalities: [scan, rgb, depth] + include_sensor_names: null + exclude_sensor_names: null scale: 1.0 self_collision: false action_normalize: true diff --git a/omnigibson/controllers/controller_base.py b/omnigibson/controllers/controller_base.py index a701c2312..4d9acf93d 100644 --- a/omnigibson/controllers/controller_base.py +++ b/omnigibson/controllers/controller_base.py @@ -4,9 +4,17 @@ import torch as th +from omnigibson.macros import create_module_macros from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.utils.python_utils import Recreatable, Registerable, Serializable, assert_valid_key, classproperty +# Create settings for this module +m = create_module_macros(module_path=__file__) + +# Set default isaac kp / kd for controllers +m.DEFAULT_ISAAC_KP = 1e7 +m.DEFAULT_ISAAC_KD = 1e5 + # Global dicts that will contain mappings REGISTERED_CONTROLLERS = dict() REGISTERED_LOCOMOTION_CONTROLLERS = dict() @@ -76,6 +84,8 @@ def __init__( dof_idx, command_input_limits="default", command_output_limits="default", + isaac_kp=None, + isaac_kd=None, ): """ Args: @@ -99,6 +109,12 @@ def __init__( then all inputted command values will be scaled from the input range to the output range. If either is None, no scaling will be used. If "default", then this range will automatically be set to the @control_limits entry corresponding to self.control_type + isaac_kp (None or float or Array[float]): If specified, stiffness gains to apply to the underlying + isaac DOFs. Can either be a single number or a per-DOF set of numbers. + Should only be nonzero if self.control_type is position + isaac_kd (None or float or Array[float]): If specified, damping gains to apply to the underlying + isaac DOFs. Can either be a single number or a per-DOF set of numbers + Should only be nonzero if self.control_type is position or velocity """ # Store arguments self._control_freq = control_freq @@ -154,6 +170,33 @@ def __init__( ) ) + # Set gains + if self.control_type == ControlType.POSITION: + # Set default kp / kd values if not specified + isaac_kp = m.DEFAULT_ISAAC_KP if isaac_kp is None else isaac_kp + isaac_kd = m.DEFAULT_ISAAC_KD if isaac_kd is None else isaac_kd + elif self.control_type == ControlType.VELOCITY: + # No kp should be specified, but kd should be + assert ( + isaac_kp is None + ), f"Control type for controller {self.__class__.__name__} is VELOCITY, so no isaac_kp should be set!" + isaac_kd = m.DEFAULT_ISAAC_KP if isaac_kd is None else isaac_kd + elif self.control_type == ControlType.EFFORT: + # Neither kp nor kd should be specified + assert ( + isaac_kp is None + ), f"Control type for controller {self.__class__.__name__} is EFFORT, so no isaac_kp should be set!" + assert ( + isaac_kd is None + ), f"Control type for controller {self.__class__.__name__} is EFFORT, so no isaac_kd should be set!" + else: + raise ValueError( + f"Expected control type to be one of: [POSITION, VELOCITY, EFFORT], but got: {self.control_type}" + ) + + self._isaac_kp = None if isaac_kp is None else self.nums2array(isaac_kp, self.control_dim) + self._isaac_kd = None if isaac_kd is None else self.nums2array(isaac_kd, self.control_dim) + def _generate_default_command_input_limits(self): """ Generates default command input limits based on the control limits @@ -510,6 +553,24 @@ def control_type(self): """ raise NotImplementedError + @property + def isaac_kp(self): + """ + Returns: + None or Array[float]: Stiffness gains that should be applied to the underlying Isaac joint motors. + None if not specified. + """ + return self._isaac_kp + + @property + def isaac_kd(self): + """ + Returns: + None or Array[float]: Stiffness gains that should be applied to the underlying Isaac joint motors. + None if not specified. + """ + return self._isaac_kd + @property def command_input_limits(self): """ diff --git a/omnigibson/controllers/dd_controller.py b/omnigibson/controllers/dd_controller.py index 25f4d6b3b..e348df5fa 100644 --- a/omnigibson/controllers/dd_controller.py +++ b/omnigibson/controllers/dd_controller.py @@ -21,6 +21,8 @@ def __init__( dof_idx, command_input_limits="default", command_output_limits="default", + isaac_kp=None, + isaac_kd=None, ): """ Args: @@ -47,6 +49,12 @@ def __init__( If either is None, no scaling will be used. If "default", then this range will automatically be set to the maximum linear and angular velocities calculated from @wheel_radius, @wheel_axle_length, and @control_limits velocity limits entry + isaac_kp (None or float or Array[float]): If specified, stiffness gains to apply to the underlying + isaac DOFs. Can either be a single number or a per-DOF set of numbers. + Should only be nonzero if self.control_type is position + isaac_kd (None or float or Array[float]): If specified, damping gains to apply to the underlying + isaac DOFs. Can either be a single number or a per-DOF set of numbers + Should only be nonzero if self.control_type is position or velocity """ # Store internal variables self._wheel_radius = wheel_radius @@ -76,6 +84,8 @@ def __init__( dof_idx=dof_idx, command_input_limits=command_input_limits, command_output_limits=command_output_limits, + isaac_kp=isaac_kp, + isaac_kd=isaac_kd, ) def _update_goal(self, command, control_dict): diff --git a/omnigibson/controllers/holonomic_base_joint_controller.py b/omnigibson/controllers/holonomic_base_joint_controller.py index 6ab998ac8..1d26fbcbc 100644 --- a/omnigibson/controllers/holonomic_base_joint_controller.py +++ b/omnigibson/controllers/holonomic_base_joint_controller.py @@ -25,6 +25,8 @@ def __init__( dof_idx, command_input_limits=None, command_output_limits=None, + isaac_kp=None, + isaac_kd=None, pos_kp=None, pos_damping_ratio=None, vel_kp=None, @@ -55,6 +57,12 @@ def __init__( then all inputted command values will be scaled from the input range to the output range. If either is None, no scaling will be used. If "default", then this range will automatically be set to the @control_limits entry corresponding to self.control_type + isaac_kp (None or float or Array[float]): If specified, stiffness gains to apply to the underlying + isaac DOFs. Can either be a single number or a per-DOF set of numbers. + Should only be nonzero if self.control_type is position + isaac_kd (None or float or Array[float]): If specified, damping gains to apply to the underlying + isaac DOFs. Can either be a single number or a per-DOF set of numbers + Should only be nonzero if self.control_type is position or velocity pos_kp (None or float): If @motor_type is "position" and @use_impedances=True, this is the proportional gain applied to the joint controller. If None, a default value will be used. pos_damping_ratio (None or float): If @motor_type is "position" and @use_impedances=True, this is the @@ -78,6 +86,8 @@ def __init__( dof_idx=dof_idx, command_input_limits=command_input_limits, command_output_limits=command_output_limits, + isaac_kp=isaac_kp, + isaac_kd=isaac_kd, pos_kp=pos_kp, pos_damping_ratio=pos_damping_ratio, vel_kp=vel_kp, diff --git a/omnigibson/controllers/ik_controller.py b/omnigibson/controllers/ik_controller.py index 5c1fc1323..addb58bf9 100644 --- a/omnigibson/controllers/ik_controller.py +++ b/omnigibson/controllers/ik_controller.py @@ -50,10 +50,12 @@ def __init__( (-0.2, -0.2, -0.2, -0.5, -0.5, -0.5), (0.2, 0.2, 0.2, 0.5, 0.5, 0.5), ), + isaac_kp=None, + isaac_kd=None, pos_kp=None, pos_damping_ratio=None, vel_kp=None, - use_impedances=True, + use_impedances=False, mode="pose_delta_ori", smoothing_filter_size=None, workspace_pose_limiter=None, @@ -86,6 +88,12 @@ def __init__( then all inputted command values will be scaled from the input range to the output range. If either is None, no scaling will be used. If "default", then this range will automatically be set to the @control_limits entry corresponding to self.control_type + isaac_kp (None or float or Array[float]): If specified, stiffness gains to apply to the underlying + isaac DOFs. Can either be a single number or a per-DOF set of numbers. + Should only be nonzero if self.control_type is position + isaac_kd (None or float or Array[float]): If specified, damping gains to apply to the underlying + isaac DOFs. Can either be a single number or a per-DOF set of numbers + Should only be nonzero if self.control_type is position or velocity pos_kp (None or float): If @motor_type is "position" and @use_impedances=True, this is the proportional gain applied to the joint controller. If None, a default value will be used. pos_damping_ratio (None or float): If @motor_type is "position" and @use_impedances=True, this is the @@ -183,6 +191,8 @@ def limiter(target_pos: Array[float], target_quat: Array[float], control_dict: D use_impedances=use_impedances, command_input_limits=command_input_limits, command_output_limits=command_output_limits, + isaac_kp=isaac_kp, + isaac_kd=isaac_kd, ) def reset(self): diff --git a/omnigibson/controllers/joint_controller.py b/omnigibson/controllers/joint_controller.py index bf2a2545e..d418af4ef 100644 --- a/omnigibson/controllers/joint_controller.py +++ b/omnigibson/controllers/joint_controller.py @@ -13,7 +13,7 @@ ) from omnigibson.macros import create_module_macros from omnigibson.utils.backend_utils import _compute_backend as cb -from omnigibson.utils.backend_utils import _ComputeBackend, _ComputeNumpyBackend, _ComputeTorchBackend +from omnigibson.utils.backend_utils import add_compute_function from omnigibson.utils.python_utils import assert_valid_key from omnigibson.utils.ui_utils import create_module_logger @@ -46,6 +46,8 @@ def __init__( dof_idx, command_input_limits="default", command_output_limits="default", + isaac_kp=None, + isaac_kd=None, pos_kp=None, pos_damping_ratio=None, vel_kp=None, @@ -78,6 +80,12 @@ def __init__( then all inputted command values will be scaled from the input range to the output range. If either is None, no scaling will be used. If "default", then this range will automatically be set to the @control_limits entry corresponding to self.control_type + isaac_kp (None or float or Array[float]): If specified, stiffness gains to apply to the underlying + isaac DOFs. Can either be a single number or a per-DOF set of numbers. + Should only be nonzero if self.control_type is position + isaac_kd (None or float or Array[float]): If specified, damping gains to apply to the underlying + isaac DOFs. Can either be a single number or a per-DOF set of numbers + Should only be nonzero if self.control_type is position or velocity pos_kp (None or float): If @motor_type is "position" and @use_impedances=True, this is the proportional gain applied to the joint controller. If None, a default value will be used. pos_damping_ratio (None or float): If @motor_type is "position" and @use_impedances=True, this is the @@ -141,6 +149,8 @@ def __init__( dof_idx=dof_idx, command_input_limits=command_input_limits, command_output_limits=command_output_limits, + isaac_kp=isaac_kp, + isaac_kd=isaac_kd, ) def _generate_default_command_output_limits(self): @@ -223,7 +233,7 @@ def compute_control(self, goal_dict, control_dict): else: # effort u = target - u = cb.compute_joint_torques(u, control_dict["mass_matrix"], self.dof_idx) + u = cb.get_custom_method("compute_joint_torques")(u, control_dict["mass_matrix"], self.dof_idx) # Add gravity compensation if self._use_gravity_compensation: @@ -341,6 +351,6 @@ def _compute_joint_torques_numpy( # Set these as part of the backend values -setattr(_ComputeBackend, "compute_joint_torques", None) -setattr(_ComputeTorchBackend, "compute_joint_torques", _compute_joint_torques_torch) -setattr(_ComputeNumpyBackend, "compute_joint_torques", _compute_joint_torques_numpy) +add_compute_function( + name="compute_joint_torques", np_function=_compute_joint_torques_numpy, th_function=_compute_joint_torques_torch +) diff --git a/omnigibson/controllers/multi_finger_gripper_controller.py b/omnigibson/controllers/multi_finger_gripper_controller.py index 854609275..be3111c78 100644 --- a/omnigibson/controllers/multi_finger_gripper_controller.py +++ b/omnigibson/controllers/multi_finger_gripper_controller.py @@ -40,6 +40,8 @@ def __init__( dof_idx, command_input_limits="default", command_output_limits="default", + isaac_kp=None, + isaac_kd=None, inverted=False, mode="binary", open_qpos=None, @@ -69,6 +71,12 @@ def __init__( then all inputted command values will be scaled from the input range to the output range. If either is None, no scaling will be used. If "default", then this range will automatically be set to the @control_limits entry corresponding to self.control_type + isaac_kp (None or float or Array[float]): If specified, stiffness gains to apply to the underlying + isaac DOFs. Can either be a single number or a per-DOF set of numbers. + Should only be nonzero if self.control_type is position + isaac_kd (None or float or Array[float]): If specified, damping gains to apply to the underlying + isaac DOFs. Can either be a single number or a per-DOF set of numbers + Should only be nonzero if self.control_type is position or velocity inverted (bool): whether or not the command direction (grasp is negative) and the control direction are inverted, e.g. to grasp you need to move the joint in the positive direction. mode (str): mode for this controller. Valid options are: @@ -116,6 +124,8 @@ def __init__( dof_idx=dof_idx, command_input_limits=command_input_limits, command_output_limits=command_output_limits, + isaac_kp=isaac_kp, + isaac_kd=isaac_kd, ) def _generate_default_command_output_limits(self): @@ -126,8 +136,15 @@ def _generate_default_command_output_limits(self): if self._mode == "binary": command_output_limits = (-1.0, 1.0) # If we're in smoothing mode, output limits should be the average of the independent limits - elif self._mode == "smoothing": - command_output_limits = cb.mean(command_output_limits[0]), cb.mean(command_output_limits[1]) + elif self._mode == "smooth": + command_output_limits = ( + cb.mean(command_output_limits[0]), + cb.mean(command_output_limits[1]), + ) + elif self._mode == "independent": + pass + else: + raise ValueError(f"Invalid mode {self._mode}") return command_output_limits diff --git a/omnigibson/controllers/null_joint_controller.py b/omnigibson/controllers/null_joint_controller.py index 07aa0cb6e..188497ee4 100644 --- a/omnigibson/controllers/null_joint_controller.py +++ b/omnigibson/controllers/null_joint_controller.py @@ -17,6 +17,8 @@ def __init__( dof_idx, command_input_limits="default", command_output_limits="default", + isaac_kp=None, + isaac_kd=None, default_goal=None, pos_kp=None, pos_damping_ratio=None, @@ -45,6 +47,12 @@ def __init__( then all inputted command values will be scaled from the input range to the output range. If either is None, no scaling will be used. If "default", then this range will automatically be set to the @control_limits entry corresponding to self.control_type + isaac_kp (None or float or Array[float]): If specified, stiffness gains to apply to the underlying + isaac DOFs. Can either be a single number or a per-DOF set of numbers. + Should only be nonzero if self.control_type is position + isaac_kd (None or float or Array[float]): If specified, damping gains to apply to the underlying + isaac DOFs. Can either be a single number or a per-DOF set of numbers + Should only be nonzero if self.control_type is position or velocity default_goal (None or n-array): if specified, should be the same length as @dof_idx, specifying the default joint goal for this controller to converge to pos_kp (None or float): If @motor_type is "position" and @use_impedances=True, this is the @@ -67,6 +75,8 @@ def __init__( dof_idx=dof_idx, command_input_limits=command_input_limits, command_output_limits=command_output_limits, + isaac_kp=isaac_kp, + isaac_kd=isaac_kd, pos_kp=pos_kp, pos_damping_ratio=pos_damping_ratio, vel_kp=vel_kp, diff --git a/omnigibson/controllers/osc_controller.py b/omnigibson/controllers/osc_controller.py index bcbeafa24..2b89f0c7c 100644 --- a/omnigibson/controllers/osc_controller.py +++ b/omnigibson/controllers/osc_controller.py @@ -64,6 +64,8 @@ def __init__( dof_idx, command_input_limits="default", command_output_limits=((-0.2, -0.2, -0.2, -0.5, -0.5, -0.5), (0.2, 0.2, 0.2, 0.5, 0.5, 0.5)), + isaac_kp=None, + isaac_kd=None, kp=150.0, kp_limits=(10.0, 300.0), damping_ratio=1.0, @@ -103,6 +105,12 @@ def __init__( then all inputted command values will be scaled from the input range to the output range. If either is None, no scaling will be used. If "default", then this range will automatically be set to the @control_limits entry corresponding to self.control_type + isaac_kp (None or float or Array[float]): If specified, stiffness gains to apply to the underlying + isaac DOFs. Can either be a single number or a per-DOF set of numbers. + Should only be nonzero if self.control_type is position + isaac_kd (None or float or Array[float]): If specified, damping gains to apply to the underlying + isaac DOFs. Can either be a single number or a per-DOF set of numbers + Should only be nonzero if self.control_type is position or velocity kp (None, int, float, or array): Gain values to apply to 6DOF error. If None, will be variable (part of action space) kp_limits (2-array): (min, max) values of kp @@ -251,6 +259,8 @@ def limiter(target_pos: Array[float], target_quat: Array[float], control_dict: D dof_idx=dof_idx, command_input_limits=command_input_limits, command_output_limits=command_output_limits, + isaac_kp=isaac_kp, + isaac_kd=isaac_kd, ) def reset(self): diff --git a/omnigibson/envs/data_wrapper.py b/omnigibson/envs/data_wrapper.py index 0202e7877..01c4006dc 100644 --- a/omnigibson/envs/data_wrapper.py +++ b/omnigibson/envs/data_wrapper.py @@ -10,12 +10,12 @@ import omnigibson as og import omnigibson.lazy as lazy -from omnigibson.envs.env_wrapper import EnvironmentWrapper +from omnigibson.envs.env_wrapper import EnvironmentWrapper, create_wrapper from omnigibson.macros import gm from omnigibson.objects.object_base import BaseObject from omnigibson.sensors.vision_sensor import VisionSensor from omnigibson.utils.config_utils import TorchEncoder -from omnigibson.utils.python_utils import create_object_from_init_info, h5py_group_to_torch +from omnigibson.utils.python_utils import create_object_from_init_info, h5py_group_to_torch, assert_valid_key from omnigibson.utils.ui_utils import create_module_logger # Create module logger @@ -38,8 +38,8 @@ def __init__(self, env, output_path, only_successes=True): """ # Make sure the wrapped environment inherits correct omnigibson format assert isinstance( - env, og.Environment - ), "Expected wrapped @env to be a subclass of OmniGibson's Environment class!" + env, (og.Environment, EnvironmentWrapper) + ), "Expected wrapped @env to be a subclass of OmniGibson's Environment class or EnvironmentWrapper!" # Only one scene is supported for now assert len(og.sim.scenes) == 1, "Only one scene is currently supported for DataWrapper env!" @@ -64,12 +64,13 @@ def __init__(self, env, output_path, only_successes=True): # Run super super().__init__(env=env) - def step(self, action): + def step(self, action, n_render_iterations=1): """ Run the environment step() function and collect data Args: action (th.Tensor): action to take in environment + n_render_iterations (int): Number of rendering iterations to use before returning observations Returns: 5-tuple: @@ -84,7 +85,7 @@ def step(self, action): if isinstance(action, dict): action = th.cat([act for act in action.values()]) - next_obs, reward, terminated, truncated, info = self.env.step(action) + next_obs, reward, terminated, truncated, info = self.env.step(action, n_render_iterations=n_render_iterations) self.step_count += 1 self._record_step_trajectory(action, next_obs, reward, terminated, truncated, info) @@ -374,6 +375,10 @@ def reset(self): # Call super first init_obs, init_info = super().reset() + # Make sure all objects are awake to begin to guarantee we save their initial states + for obj in self.scene.objects: + obj.wake() + # Store this initial state as part of the trajectory state = og.sim.dump_state(serialized=True) step_data = { @@ -465,11 +470,15 @@ def create_from_hdf5( cls, input_path, output_path, - robot_obs_modalities, + robot_obs_modalities=tuple(), robot_sensor_config=None, external_sensors_config=None, + include_sensor_names=None, + exclude_sensor_names=None, n_render_iterations=5, only_successes=False, + include_env_wrapper=False, + additional_wrapper_configs=None, ): """ Create a DataPlaybackWrapper environment instance form the recorded demonstration info @@ -489,12 +498,21 @@ def create_from_hdf5( dictionary specifying an individual external sensor's relevant parameters. See the example external_sensors key in fetch_behavior.yaml env config. This can be used to specify additional sensors to collect observations during playback. + include_sensor_names (None or list of str): If specified, substring(s) to check for in all raw sensor prim + paths found on the robot. A sensor must include one of the specified substrings in order to be included + in this robot's set of sensors during playback + exclude_sensor_names (None or list of str): If specified, substring(s) to check against in all raw sensor + prim paths found on the robot. A sensor must not include any of the specified substrings in order to + be included in this robot's set of sensors during playback n_render_iterations (int): Number of rendering iterations to use when loading each stored frame from the recorded data. This is needed because the omniverse real-time raytracing always lags behind the underlying physical state by a few frames, and additionally produces transient visual artifacts when the physical state changes. Increasing this number will improve the rendered quality at the expense of speed. only_successes (bool): Whether to only save successful episodes + include_env_wrapper (bool): Whether to include environment wrapper stored in the underlying env config + additional_wrapper_configs (None or list of dict): If specified, list of wrapper config(s) specifying + environment wrappers to wrap the internal environment class in Returns: DataPlaybackWrapper: Generated playback environment @@ -524,7 +542,10 @@ def create_from_hdf5( # Set observation modalities and update sensor config for robot_cfg in config["robots"]: - robot_cfg["obs_modalities"] = robot_obs_modalities + robot_cfg["obs_modalities"] = list(robot_obs_modalities) + robot_cfg["include_sensor_names"] = include_sensor_names + robot_cfg["exclude_sensor_names"] = exclude_sensor_names + if robot_sensor_config is not None: robot_cfg["sensor_config"] = robot_sensor_config if external_sensors_config is not None: @@ -533,6 +554,14 @@ def create_from_hdf5( # Load env env = og.Environment(configs=config) + # Optionally include the desired environment wrapper specified in the config + if include_env_wrapper: + env = create_wrapper(env=env) + + if additional_wrapper_configs is not None: + for wrapper_cfg in additional_wrapper_configs: + env = create_wrapper(env=env, wrapper_cfg=wrapper_cfg) + # Wrap and return env return cls( env=env, @@ -565,31 +594,39 @@ def __init__(self, env, input_path, output_path, n_render_iterations=5, only_suc # Run super super().__init__(env=env, output_path=output_path, only_successes=only_successes) + def _process_obs(self, obs, info): + """ + Modifies @obs inplace for any relevant post-processing + + Args: + obs (dict): Keyword-mapped relevant observations from the immediate env step + info (dict): Keyword-mapped relevant information from the immediate env step + """ + # Default is a no-op + return obs + def _parse_step_data(self, action, obs, reward, terminated, truncated, info): # Store action, obs, reward, terminated, truncated, info step_data = dict() - step_data["obs"] = obs + step_data["obs"] = self._process_obs(obs=obs, info=info) step_data["action"] = action step_data["reward"] = reward step_data["terminated"] = terminated step_data["truncated"] = truncated return step_data - def playback_episode(self, episode_id, record=True, video_path=None, video_writer=None): + def playback_episode(self, episode_id, record_data=True, video_writer=None, video_rgb_key=None): """ Playback episode @episode_id, and optionally record observation data if @record is True Args: episode_id (int): Episode to playback. This should be a valid demo ID number from the inputted collected data hdf5 file - record (bool): Whether to record data during playback or not - video_path (None or str): If specified, path to write the playback video to - video_writer (None or str): If specified, an imageio video writer to use for writing the video (can be specified in place of @video_path) + record_data (bool): Whether to record data during playback or not + video_writer (None or imageio.Writer): If specified, writer object that RGB frames will be written to + video_rgb_key (None or str): If specified, observation key representing the RGB frames to write to video. + If @video_writer is specified, this must also be specified! """ - using_external_writer = video_writer is not None - if video_writer is None and video_path is not None: - video_writer = imageio.get_writer(video_path, fps=30) - data_grp = self.input_hdf5["data"] assert f"demo_{episode_id}" in data_grp, f"No valid episode with ID {episode_id} found!" traj_grp = data_grp[f"demo_{episode_id}"] @@ -612,9 +649,9 @@ def playback_episode(self, episode_id, record=True, video_path=None, video_write og.sim.load_state(state[0, : int(state_size[0])], serialized=True) # If record, record initial observations - if record: - init_obs, _, _, _, _ = self.env.step(action=action[0], n_render_iterations=self.n_render_iterations) - step_data = {"obs": init_obs} + if record_data: + init_obs, _, _, _, init_info = self.env.step(action=action[0], n_render_iterations=self.n_render_iterations) + step_data = {"obs": self._process_obs(obs=init_obs, info=init_info)} self.current_traj_history.append(step_data) for i, (a, s, ss, r, te, tr) in enumerate( @@ -644,7 +681,7 @@ def playback_episode(self, episode_id, record=True, video_path=None, video_write self.current_obs, _, _, _, info = self.env.step(action=a, n_render_iterations=self.n_render_iterations) # If recording, record data - if record: + if record_data: step_data = self._parse_step_data( action=a, obs=self.current_obs, @@ -655,33 +692,44 @@ def playback_episode(self, episode_id, record=True, video_path=None, video_write ) self.current_traj_history.append(step_data) - # If writing video, save the current frame + # If writing to video, write desired frame if video_writer is not None: - video_writer.append_data(og.sim.viewer_camera.get_obs()[0]["rgb"][:, :, :3].numpy()) + assert_valid_key(video_rgb_key, self.current_obs.keys(), "video_rgb_key") + video_writer.append_data(self.current_obs[video_rgb_key][:, :, :3].numpy()) self.step_count += 1 - if record: + if record_data: self.flush_current_traj() - # If we weren't using an external writer but we're still writing a video, close the writer - if video_writer is not None and not using_external_writer: - video_writer.close() - - def playback_dataset(self, record=True, video_path=None, video_writer=None): + def playback_dataset(self, record_data=False, video_writer=None, video_rgb_key=None): """ Playback all episodes from the input HDF5 file, and optionally record observation data if @record is True Args: - record (bool): Whether to record data during playback or not - video_path (None or str): If specified, path to write the playback video to - video_writer (None or str): If specified, an imageio video writer to use for writing the video (can be specified in place of @video_path) + record_data (bool): Whether to record data during playback or not + video_writer (None or imageio.Writer): If specified, writer object that RGB frames will be written to + video_rgb_key (None or str): If specified, observation key representing the RGB frames to write to video. + If @video_writer is specified, this must also be specified! """ - if video_writer is None and video_path is not None: - video_writer = imageio.get_writer(video_path, fps=30) for episode_id in range(self.input_hdf5["data"].attrs["n_episodes"]): - self.playback_episode(episode_id=episode_id, record=record, video_path=None, video_writer=video_writer) + self.playback_episode( + episode_id=episode_id, + record_data=record_data, + video_writer=video_writer, + video_rgb_key=video_rgb_key, + ) + + def create_video_writer(self, fpath, fps=30): + """ + Creates a video writer to write video frames to when playing back the dataset - # Close the video writer at the end if created - if video_writer is not None: - video_writer.close() + Args: + fpath (str): Absolute path that the generated video writer will write to. Should end in .mp4 + fps (int): Desired frames per second when generating video + + Returns: + imageio.Writer: Generated video writer + """ + assert fpath.endswith(".mp4"), f"Video writer fpath must end with .mp4! Got: {fpath}" + return imageio.get_writer(fpath, fps=fps) diff --git a/omnigibson/envs/env_wrapper.py b/omnigibson/envs/env_wrapper.py index 09f69d80e..ce8a9aa8e 100644 --- a/omnigibson/envs/env_wrapper.py +++ b/omnigibson/envs/env_wrapper.py @@ -10,11 +10,16 @@ log = create_module_logger(module_name=__name__) -def create_wrapper(env): +def create_wrapper(env, wrapper_cfg=None): """ - Wraps environment @env with wrapper defined by env.wrapper_config + Wraps environment @env with wrapper defined @wrapper_cfg + + Args: + env (og.Environment): environment to wrap + wrapper_cfg (None or dict): Specified, configuration to wrap environment with + If not specified, will default to env.wrapper_config """ - wrapper_cfg = deepcopy(env.wrapper_config) + wrapper_cfg = deepcopy(env.wrapper_config if wrapper_cfg is None else wrapper_cfg) wrapper_type = wrapper_cfg.pop("type") wrapper_cfg["env"] = env @@ -41,12 +46,13 @@ def __init__(self, env): # Run super super().__init__(obj=env) - def step(self, action): + def step(self, action, n_render_iterations=1): """ By default, run the normal environment step() function Args: action (th.tensor): action to take in environment + n_render_iterations (int): Number of rendering iterations to use before returning observations Returns: 4-tuple: @@ -56,7 +62,7 @@ def step(self, action): - (bool) whether the current episode is truncated - (dict) misc information """ - return self.env.step(action) + return self.env.step(action, n_render_iterations=n_render_iterations) def reset(self): """ diff --git a/omnigibson/examples/robots/import_custom_robot.py b/omnigibson/examples/robots/import_custom_robot.py index 99fcc32be..4837bae65 100644 --- a/omnigibson/examples/robots/import_custom_robot.py +++ b/omnigibson/examples/robots/import_custom_robot.py @@ -86,7 +86,10 @@ parent_link: left_arm_link6 offset: position: [0, 0, 0.06] - orientation: [0, 0, 0, 1] + orientation: [0, 0, 0, 1] # NOTE: Convention for these eef vis links should be tuned such that: + # z-axis points out from the tips of the fingers + # y-axis points in the direction from the left finger to the right finger + # x-axis is automatically inferred from the above two axes - link: right_eef_link # same format as @camera_links parent_link: right_arm_link6 offset: @@ -722,7 +725,9 @@ def create_curobo_cfgs(robot_prim, robot_urdf_path, curobo_cfg, root_link, save_ is_holonomic (bool): Whether the robot has a holonomic base applied or not """ robot_name = robot_prim.GetName() - ee_links = list(curobo_cfg.eef_to_gripper_info.keys()) + + # Left, then right by default if sorted alphabetically + ee_links = list(sorted(curobo_cfg.eef_to_gripper_info.keys())) # Find all joints that have a negative axis specified so we know to flip them in curobo tree = ET.parse(robot_urdf_path) diff --git a/omnigibson/objects/controllable_object.py b/omnigibson/objects/controllable_object.py index 335ca84c7..599cba909 100644 --- a/omnigibson/objects/controllable_object.py +++ b/omnigibson/objects/controllable_object.py @@ -2,6 +2,7 @@ from abc import abstractmethod from copy import deepcopy from functools import cached_property +from typing import Literal import gymnasium as gym import networkx as nx @@ -294,20 +295,16 @@ def update_controller_mode(self): """ # Update the control modes of each joint based on the outputted control from the controllers unused_dofs = {i for i in range(self.n_dof)} - for name in self._controllers: - for dof in self._controllers[name].dof_idx: + for controller in self._controllers.values(): + for i, dof in enumerate(controller.dof_idx): # Make sure the DOF has not already been set yet, and remove it afterwards assert dof.item() in unused_dofs unused_dofs.remove(dof.item()) - control_type = self._controllers[name].control_type + control_type = controller.control_type self._joints[self.dof_names_ordered[dof]].set_control_type( control_type=control_type, - kp=self.default_kp if control_type == ControlType.POSITION else None, - kd=( - self.default_kd - if control_type == ControlType.POSITION or control_type == ControlType.VELOCITY - else None - ), + kp=None if controller.isaac_kp is None else controller.isaac_kp[i], + kd=None if controller.isaac_kd is None else controller.isaac_kd[i], ) # For all remaining DOFs not controlled, we assume these are free DOFs (e.g.: virtual joints representing free @@ -613,16 +610,29 @@ def get_control_dict(self): fcns["_root_pos_quat"] = lambda: ControllableObjectViewAPI.get_position_orientation(self.articulation_root_path) fcns["root_pos"] = lambda: fcns["_root_pos_quat"][0] fcns["root_quat"] = lambda: fcns["_root_pos_quat"][1] - fcns["root_lin_vel"] = lambda: ControllableObjectViewAPI.get_linear_velocity(self.articulation_root_path) - fcns["root_ang_vel"] = lambda: ControllableObjectViewAPI.get_angular_velocity(self.articulation_root_path) + + # NOTE: We explicitly compute hand-calculated (i.e.: non-Isaac native) values for velocity because + # Isaac has some numerical inconsistencies for low velocity values, which cause downstream issues for + # controllers when computing accurate control. This is why we explicitly set the `estimate=True` flag here, + # which is not used anywhere else in the codebase + fcns["root_lin_vel"] = lambda: ControllableObjectViewAPI.get_linear_velocity( + self.articulation_root_path, estimate=True + ) + fcns["root_ang_vel"] = lambda: ControllableObjectViewAPI.get_angular_velocity( + self.articulation_root_path, estimate=True + ) fcns["root_rel_lin_vel"] = lambda: ControllableObjectViewAPI.get_relative_linear_velocity( - self.articulation_root_path + self.articulation_root_path, + estimate=True, ) fcns["root_rel_ang_vel"] = lambda: ControllableObjectViewAPI.get_relative_angular_velocity( - self.articulation_root_path + self.articulation_root_path, + estimate=True, ) fcns["joint_position"] = lambda: ControllableObjectViewAPI.get_joint_positions(self.articulation_root_path) - fcns["joint_velocity"] = lambda: ControllableObjectViewAPI.get_joint_velocities(self.articulation_root_path) + fcns["joint_velocity"] = lambda: ControllableObjectViewAPI.get_joint_velocities( + self.articulation_root_path, estimate=True + ) fcns["joint_effort"] = lambda: ControllableObjectViewAPI.get_joint_efforts(self.articulation_root_path) fcns["mass_matrix"] = lambda: ControllableObjectViewAPI.get_mass_matrix(self.articulation_root_path) fcns["gravity_force"] = lambda: ControllableObjectViewAPI.get_generalized_gravity_forces( @@ -651,11 +661,20 @@ def _add_task_frame_control_dict(self, fcns, task_name, link_name): ) fcns[f"{task_name}_pos_relative"] = lambda: fcns[f"_{task_name}_pos_quat_relative"][0] fcns[f"{task_name}_quat_relative"] = lambda: fcns[f"_{task_name}_pos_quat_relative"][1] + + # NOTE: We explicitly compute hand-calculated (i.e.: non-Isaac native) values for velocity because + # Isaac has some numerical inconsistencies for low velocity values, which cause downstream issues for + # controllers when computing accurate control. This is why we explicitly set the `estimate=True` flag here, + # which is not used anywhere else in the codebase fcns[f"{task_name}_lin_vel_relative"] = lambda: ControllableObjectViewAPI.get_link_relative_linear_velocity( - self.articulation_root_path, link_name + self.articulation_root_path, + link_name, + estimate=True, ) fcns[f"{task_name}_ang_vel_relative"] = lambda: ControllableObjectViewAPI.get_link_relative_angular_velocity( - self.articulation_root_path, link_name + self.articulation_root_path, + link_name, + estimate=True, ) # -n_joints because there may be an additional 6 entries at the beginning of the array, if this robot does # not have a fixed base (i.e.: the 6DOF --> "floating" joint) @@ -693,12 +712,21 @@ def dump_action(self): """ return self._last_action + def set_position_orientation(self, position=None, orientation=None, frame: Literal["world", "scene"] = "world"): + # Run super first + super().set_position_orientation(position, orientation, frame) + + # Clear the controllable view's backend since state has changed + ControllableObjectViewAPI.clear_object(prim_path=self.articulation_root_path) + def set_joint_positions(self, positions, indices=None, normalized=False, drive=False): # Call super first super().set_joint_positions(positions=positions, indices=indices, normalized=normalized, drive=drive) # If we're not driving the joints, reset the controllers so that the goals are updated wrt to the new state + # Also clear the controllable view's backend since state has changed if not drive: + ControllableObjectViewAPI.clear_object(prim_path=self.articulation_root_path) for controller in self._controllers.values(): controller.reset() @@ -891,24 +919,6 @@ def control_limits(self): "has_limit": self.joint_has_limits, } - @property - def default_kp(self): - """ - Returns: - float: Default kp gain to apply to any DOF when switching control modes (e.g.: switching from a - velocity control mode to a position control mode) - """ - return 1e7 - - @property - def default_kd(self): - """ - Returns: - float: Default kd gain to apply to any DOF when switching control modes (e.g.: switching from a - position control mode to a velocity control mode) - """ - return 1e5 - @property def reset_joint_pos(self): """ diff --git a/omnigibson/prims/xform_prim.py b/omnigibson/prims/xform_prim.py index 27f71f8f0..6fea9561c 100644 --- a/omnigibson/prims/xform_prim.py +++ b/omnigibson/prims/xform_prim.py @@ -344,7 +344,7 @@ def get_local_pose(self): logger.warning( 'get_local_pose is deprecated and will be removed in a future release. Use get_position_orientation(frame="parent") instead' ) - return self.get_position_orientation(self.prim_path, frame="parent") + return self.get_position_orientation(frame="parent") def set_local_pose(self, position=None, orientation=None): """ @@ -359,7 +359,7 @@ def set_local_pose(self, position=None, orientation=None): logger.warning( 'set_local_pose is deprecated and will be removed in a future release. Use set_position_orientation(position=position, orientation=orientation, frame="parent") instead' ) - return self.set_position_orientation(self.prim_path, position, orientation, frame="parent") + return self.set_position_orientation(position, orientation, frame="parent") def get_world_scale(self): """ diff --git a/omnigibson/robots/a1.py b/omnigibson/robots/a1.py index b4cb60f14..478d1d211 100644 --- a/omnigibson/robots/a1.py +++ b/omnigibson/robots/a1.py @@ -33,6 +33,8 @@ def __init__( reset_joint_pos=None, # Unique to BaseRobot obs_modalities=("rgb", "proprio"), + include_sensor_names=None, + exclude_sensor_names=None, proprio_obs="default", sensor_config=None, # Unique to ManipulationRobot @@ -70,6 +72,12 @@ def __init__( Valid options are "all", or a list containing any subset of omnigibson.sensors.ALL_SENSOR_MODALITIES. Note: If @sensor_config explicitly specifies `modalities` for a given sensor class, it will override any values specified from @obs_modalities! + include_sensor_names (None or list of str): If specified, substring(s) to check for in all raw sensor prim + paths found on the robot. A sensor must include one of the specified substrings in order to be included + in this robot's set of sensors + exclude_sensor_names (None or list of str): If specified, substring(s) to check against in all raw sensor + prim paths found on the robot. A sensor must not include any of the specified substrings in order to + be included in this robot's set of sensors proprio_obs (str or list of str): proprioception observation key(s) to use for generating proprioceptive observations. If str, should be exactly "default" -- this results in the default proprioception observations being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict @@ -128,6 +136,8 @@ def __init__( action_normalize=action_normalize, reset_joint_pos=reset_joint_pos, obs_modalities=obs_modalities, + include_sensor_names=include_sensor_names, + exclude_sensor_names=exclude_sensor_names, proprio_obs=proprio_obs, sensor_config=sensor_config, grasping_mode=grasping_mode, @@ -164,10 +174,6 @@ def _default_controllers(self): def _default_joint_pos(self): return self._default_robot_model_joint_pos - @property - def finger_lengths(self): - return {self.default_arm: 0.087} - @cached_property def arm_link_names(self): return {self.default_arm: [f"arm_seg{i+1}" for i in range(5)]} @@ -205,12 +211,12 @@ def teleop_rotation_offset(self): return {self.default_arm: self._teleop_rotation_offset} @property - def assisted_grasp_start_points(self): + def _assisted_grasp_start_points(self): return {self.default_arm: self._ag_start_points} @property - def assisted_grasp_end_points(self): - return {self.default_arm: self._ag_start_points} + def _assisted_grasp_end_points(self): + return {self.default_arm: self._ag_end_points} @property def disabled_collision_pairs(self): diff --git a/omnigibson/robots/active_camera_robot.py b/omnigibson/robots/active_camera_robot.py index ef2a21948..3802533b0 100644 --- a/omnigibson/robots/active_camera_robot.py +++ b/omnigibson/robots/active_camera_robot.py @@ -22,17 +22,6 @@ class ActiveCameraRobot(BaseRobot): """ - def _validate_configuration(self): - # Make sure a camera controller is specified - assert ( - "camera" in self._controllers - ), "Controller 'camera' must exist in controllers! Current controllers: {}".format( - list(self._controllers.keys()) - ) - - # run super - super()._validate_configuration() - def _get_proprioception_dict(self): dic = super()._get_proprioception_dict() diff --git a/omnigibson/robots/behavior_robot.py b/omnigibson/robots/behavior_robot.py index 442358653..6dfac40cc 100644 --- a/omnigibson/robots/behavior_robot.py +++ b/omnigibson/robots/behavior_robot.py @@ -79,6 +79,8 @@ def __init__( reset_joint_pos=None, # Unique to BaseRobot obs_modalities="rgb", + include_sensor_names=None, + exclude_sensor_names=None, proprio_obs="default", # Unique to ManipulationRobot grasping_mode="assisted", @@ -108,6 +110,8 @@ def __init__( action_normalize=action_normalize, reset_joint_pos=reset_joint_pos, obs_modalities=obs_modalities, + include_sensor_names=include_sensor_names, + exclude_sensor_names=exclude_sensor_names, proprio_obs=proprio_obs, grasping_mode=grasping_mode, grasping_direction="upper", @@ -400,7 +404,7 @@ def set_position_orientation( ) @property - def assisted_grasp_start_points(self): + def _assisted_grasp_start_points(self): side_coefficients = {"left": th.tensor([1, -1, 1]), "right": th.tensor([1, 1, 1])} return { arm: [ @@ -415,7 +419,7 @@ def assisted_grasp_start_points(self): } @property - def assisted_grasp_end_points(self): + def _assisted_grasp_end_points(self): side_coefficients = {"left": th.tensor([1, -1, 1]), "right": th.tensor([1, 1, 1])} return { arm: [ diff --git a/omnigibson/robots/fetch.py b/omnigibson/robots/fetch.py index 8b4702cb7..8677bb65f 100644 --- a/omnigibson/robots/fetch.py +++ b/omnigibson/robots/fetch.py @@ -5,7 +5,6 @@ from omnigibson.robots.active_camera_robot import ActiveCameraRobot from omnigibson.robots.articulated_trunk_robot import ArticulatedTrunkRobot -from omnigibson.robots.manipulation_robot import GraspingPoint from omnigibson.robots.two_wheel_robot import TwoWheelRobot from omnigibson.robots.untucked_arm_pose_robot import UntuckedArmPoseRobot from omnigibson.utils.transform_utils import euler2quat @@ -41,6 +40,8 @@ def __init__( reset_joint_pos=None, # Unique to BaseRobot obs_modalities=("rgb", "proprio"), + include_sensor_names=None, + exclude_sensor_names=None, proprio_obs="default", sensor_config=None, # Unique to ManipulationRobot @@ -83,6 +84,12 @@ def __init__( Valid options are "all", or a list containing any subset of omnigibson.sensors.ALL_SENSOR_MODALITIES. Note: If @sensor_config explicitly specifies `modalities` for a given sensor class, it will override any values specified from @obs_modalities! + include_sensor_names (None or list of str): If specified, substring(s) to check for in all raw sensor prim + paths found on the robot. A sensor must include one of the specified substrings in order to be included + in this robot's set of sensors + exclude_sensor_names (None or list of str): If specified, substring(s) to check against in all raw sensor + prim paths found on the robot. A sensor must not include any of the specified substrings in order to + be included in this robot's set of sensors proprio_obs (str or list of str): proprioception observation key(s) to use for generating proprioceptive observations. If str, should be exactly "default" -- this results in the default proprioception observations being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict @@ -121,6 +128,8 @@ def __init__( action_normalize=action_normalize, reset_joint_pos=reset_joint_pos, obs_modalities=obs_modalities, + include_sensor_names=include_sensor_names, + exclude_sensor_names=exclude_sensor_names, proprio_obs=proprio_obs, sensor_config=sensor_config, grasping_mode=grasping_mode, @@ -204,28 +213,6 @@ def wheel_radius(self): def wheel_axle_length(self): return 0.372 - @property - def finger_lengths(self): - return {self.default_arm: 0.1} - - @property - def assisted_grasp_start_points(self): - return { - self.default_arm: [ - GraspingPoint(link_name="r_gripper_finger_link", position=th.tensor([0.025, -0.02, 0.0])), - GraspingPoint(link_name="r_gripper_finger_link", position=th.tensor([-0.025, -0.02, 0.0])), - ] - } - - @property - def assisted_grasp_end_points(self): - return { - self.default_arm: [ - GraspingPoint(link_name="l_gripper_finger_link", position=th.tensor([0.025, 0.02, 0.0])), - GraspingPoint(link_name="l_gripper_finger_link", position=th.tensor([-0.025, 0.02, 0.0])), - ] - } - @cached_property def base_joint_names(self): return ["l_wheel_joint", "r_wheel_joint"] diff --git a/omnigibson/robots/franka.py b/omnigibson/robots/franka.py index ccb64e358..b7c8a00ca 100644 --- a/omnigibson/robots/franka.py +++ b/omnigibson/robots/franka.py @@ -33,6 +33,8 @@ def __init__( reset_joint_pos=None, # Unique to BaseRobot obs_modalities=("rgb", "proprio"), + include_sensor_names=None, + exclude_sensor_names=None, proprio_obs="default", sensor_config=None, # Unique to ManipulationRobot @@ -71,6 +73,12 @@ def __init__( Valid options are "all", or a list containing any subset of omnigibson.sensors.ALL_SENSOR_MODALITIES. Note: If @sensor_config explicitly specifies `modalities` for a given sensor class, it will override any values specified from @obs_modalities! + include_sensor_names (None or list of str): If specified, substring(s) to check for in all raw sensor prim + paths found on the robot. A sensor must include one of the specified substrings in order to be included + in this robot's set of sensors + exclude_sensor_names (None or list of str): If specified, substring(s) to check against in all raw sensor + prim paths found on the robot. A sensor must not include any of the specified substrings in order to + be included in this robot's set of sensors proprio_obs (str or list of str): proprioception observation key(s) to use for generating proprioceptive observations. If str, should be exactly "default" -- this results in the default proprioception observations being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict @@ -90,10 +98,10 @@ def __init__( if end_effector == "gripper": self._model_name = "franka_panda" self._gripper_control_idx = th.arange(7, 9) - self._eef_link_names = "panda_hand" + self._eef_link_names = "eef_link" self._finger_link_names = ["panda_leftfinger", "panda_rightfinger"] self._finger_joint_names = ["panda_finger_joint1", "panda_finger_joint2"] - self._default_robot_model_joint_pos = th.tensor([0.00, -1.3, 0.00, -2.87, 0.00, 2.00, 0.75, 0.00, 0.00]) + self._default_robot_model_joint_pos = th.tensor([0.00, -1.3, 0.00, -2.87, 0.00, 2.00, 0.75, 0.04, 0.04]) self._teleop_rotation_offset = th.tensor([-1, 0, 0, 0]) self._ag_start_points = [ GraspingPoint(link_name="panda_rightfinger", position=th.tensor([0.0, 0.001, 0.045])), @@ -191,6 +199,8 @@ def __init__( action_normalize=action_normalize, reset_joint_pos=reset_joint_pos, obs_modalities=obs_modalities, + include_sensor_names=include_sensor_names, + exclude_sensor_names=exclude_sensor_names, proprio_obs=proprio_obs, sensor_config=sensor_config, grasping_mode=grasping_mode, @@ -227,10 +237,6 @@ def _default_controllers(self): def _default_joint_pos(self): return self._default_robot_model_joint_pos - @property - def finger_lengths(self): - return {self.default_arm: 0.1} - @cached_property def arm_link_names(self): return {self.default_arm: [f"panda_link{i}" for i in range(8)]} @@ -253,11 +259,19 @@ def finger_joint_names(self): @property def usd_path(self): - return os.path.join(gm.ASSET_PATH, f"models/franka/{self.model_name}.usd") + return ( + os.path.join(gm.ASSET_PATH, "models/franka/franka_panda/usd/franka_panda.usda") + if self.model_name == "franka_panda" + else os.path.join(gm.ASSET_PATH, f"models/franka/{self.model_name}.usd") + ) @property def urdf_path(self): - return os.path.join(gm.ASSET_PATH, f"models/franka/{self.model_name}.urdf") + return ( + os.path.join(gm.ASSET_PATH, "models/franka/franka_panda/urdf/franka_panda.urdf") + if self.model_name == "franka_panda" + else os.path.join(gm.ASSET_PATH, f"models/franka/{self.model_name}.urdf") + ) @property def curobo_path(self): @@ -265,22 +279,27 @@ def curobo_path(self): assert ( self._model_name == "franka_panda" ), f"Only franka_panda is currently supported for curobo. Got: {self._model_name}" - return os.path.join(gm.ASSET_PATH, f"models/franka/{self.model_name}_description_curobo.yaml") + return os.path.join( + gm.ASSET_PATH, "models/franka/franka_panda/curobo/franka_panda_description_curobo_default.yaml" + ) @cached_property def curobo_attached_object_link_names(self): - return {self._eef_link_names: "attached_object"} + assert ( + self._model_name == "franka_panda" + ), f"Only franka_panda is currently supported for curobo. Got: {self._model_name}" + return super().curobo_attached_object_link_names @property def teleop_rotation_offset(self): return {self.default_arm: self._teleop_rotation_offset} @property - def assisted_grasp_start_points(self): + def _assisted_grasp_start_points(self): return {self.default_arm: self._ag_start_points} @property - def assisted_grasp_end_points(self): + def _assisted_grasp_end_points(self): return {self.default_arm: self._ag_start_points} @property diff --git a/omnigibson/robots/franka_mounted.py b/omnigibson/robots/franka_mounted.py index bf531aa1f..e38dd09c7 100644 --- a/omnigibson/robots/franka_mounted.py +++ b/omnigibson/robots/franka_mounted.py @@ -1,10 +1,6 @@ import os - -import torch as th - from omnigibson.macros import gm from omnigibson.robots.franka import FrankaPanda -from omnigibson.robots.manipulation_robot import GraspingPoint class FrankaMounted(FrankaPanda): @@ -23,34 +19,16 @@ def _default_controllers(self): controllers[f"gripper_{self.default_arm}"] = "MultiFingerGripperController" return controllers - @property - def finger_lengths(self): - return {self.default_arm: 0.15} - @property def usd_path(self): - return os.path.join(gm.ASSET_PATH, "models/franka/franka_mounted.usd") + return os.path.join(gm.ASSET_PATH, "models/franka/franka_mounted/usd/franka_mounted.usda") @property def urdf_path(self): - return os.path.join(gm.ASSET_PATH, "models/franka/franka_mounted.urdf") + return os.path.join(gm.ASSET_PATH, "models/franka/franka_mounted/urdf/franka_mounted.urdf") @property def curobo_path(self): - return os.path.join(gm.ASSET_PATH, "models/franka/franka_mounted_description_curobo.yaml") - - @property - def assisted_grasp_start_points(self): - return { - self.default_arm: [ - GraspingPoint(link_name="panda_rightfinger", position=th.tensor([0.0, 0.001, 0.045])), - ] - } - - @property - def assisted_grasp_end_points(self): - return { - self.default_arm: [ - GraspingPoint(link_name="panda_leftfinger", position=th.tensor([0.0, 0.001, 0.045])), - ] - } + return os.path.join( + gm.ASSET_PATH, "models/franka/franka_mounted/curobo/franka_mounted_description_curobo_default.yaml" + ) diff --git a/omnigibson/robots/holonomic_base_robot.py b/omnigibson/robots/holonomic_base_robot.py index 3cff80ef8..14214708c 100644 --- a/omnigibson/robots/holonomic_base_robot.py +++ b/omnigibson/robots/holonomic_base_robot.py @@ -57,6 +57,8 @@ def __init__( reset_joint_pos=None, # Unique to BaseRobot obs_modalities=("rgb", "proprio"), + include_sensor_names=None, + exclude_sensor_names=None, proprio_obs="default", sensor_config=None, **kwargs, @@ -91,6 +93,12 @@ def __init__( Valid options are "all", or a list containing any subset of omnigibson.sensors.ALL_SENSOR_MODALITIES. Note: If @sensor_config explicitly specifies `modalities` for a given sensor class, it will override any values specified from @obs_modalities! + include_sensor_names (None or list of str): If specified, substring(s) to check for in all raw sensor prim + paths found on the robot. A sensor must include one of the specified substrings in order to be included + in this robot's set of sensors + exclude_sensor_names (None or list of str): If specified, substring(s) to check against in all raw sensor + prim paths found on the robot. A sensor must not include any of the specified substrings in order to + be included in this robot's set of sensors proprio_obs (str or list of str): proprioception observation key(s) to use for generating proprioceptive observations. If str, should be exactly "default" -- this results in the default proprioception observations being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict @@ -125,6 +133,8 @@ def __init__( action_normalize=action_normalize, reset_joint_pos=reset_joint_pos, obs_modalities=obs_modalities, + include_sensor_names=include_sensor_names, + exclude_sensor_names=exclude_sensor_names, proprio_obs=proprio_obs, sensor_config=sensor_config, **kwargs, diff --git a/omnigibson/robots/locomotion_robot.py b/omnigibson/robots/locomotion_robot.py index 136189bac..d1f361dfa 100644 --- a/omnigibson/robots/locomotion_robot.py +++ b/omnigibson/robots/locomotion_robot.py @@ -25,13 +25,11 @@ class LocomotionRobot(BaseRobot): """ def _validate_configuration(self): - # We make sure that our base controller exists and is a locomotion controller - assert ( - "base" in self._controllers - ), "Controller 'base' must exist in controllers! Current controllers: {}".format(list(self._controllers.keys())) - assert isinstance( - self._controllers["base"], LocomotionController - ), "Base controller must be a LocomotionController!" + # If we have a base controller, make sure it is a locomotion controller + if "base" in self._controllers: + assert isinstance( + self._controllers["base"], LocomotionController + ), "Base controller must be a LocomotionController!" # run super super()._validate_configuration() diff --git a/omnigibson/robots/manipulation_robot.py b/omnigibson/robots/manipulation_robot.py index e338f3bff..efe0d17e3 100644 --- a/omnigibson/robots/manipulation_robot.py +++ b/omnigibson/robots/manipulation_robot.py @@ -22,13 +22,24 @@ ) from omnigibson.macros import create_module_macros, gm from omnigibson.object_states import ContactBodies +from omnigibson.prims.geom_prim import VisualGeomPrim from omnigibson.robots.robot_base import BaseRobot from omnigibson.utils.backend_utils import _compute_backend as cb from omnigibson.utils.constants import JointType, PrimType from omnigibson.utils.geometry_utils import generate_points_in_volume_checker_function from omnigibson.utils.python_utils import assert_valid_key, classproperty from omnigibson.utils.sampling_utils import raytest_batch -from omnigibson.utils.usd_utils import ControllableObjectViewAPI, GripperRigidContactAPI, create_joint +from omnigibson.utils.usd_utils import ( + ControllableObjectViewAPI, + GripperRigidContactAPI, + create_joint, + create_primitive_mesh, + absolute_prim_path_to_scene_relative, +) +from omnigibson.utils.ui_utils import create_module_logger + +# Create module logger +log = create_module_logger(module_name=__name__) # Create settings for this module m = create_module_macros(module_path=__file__) @@ -39,6 +50,9 @@ m.ARTICULATED_ASSIST_FRACTION = 0.7 m.MIN_ASSIST_FORCE = 0 m.MAX_ASSIST_FORCE = 100 +m.MIN_AG_DEFAULT_GRASP_POINT_PROP = 0.2 +m.MAX_AG_DEFAULT_GRASP_POINT_PROP = 0.8 + m.CONSTRAINT_VIOLATION_THRESHOLD = 0.1 m.RELEASE_WINDOW = 1 / 30.0 # release window in seconds @@ -85,6 +99,8 @@ def __init__( reset_joint_pos=None, # Unique to BaseRobot obs_modalities=("rgb", "proprio"), + include_sensor_names=None, + exclude_sensor_names=None, proprio_obs="default", sensor_config=None, # Unique to ManipulationRobot @@ -123,6 +139,12 @@ def __init__( Otherwise, valid options should be part of omnigibson.sensors.ALL_SENSOR_MODALITIES. Note: If @sensor_config explicitly specifies `modalities` for a given sensor class, it will override any values specified from @obs_modalities! + include_sensor_names (None or list of str): If specified, substring(s) to check for in all raw sensor prim + paths found on the robot. A sensor must include one of the specified substrings in order to be included + in this robot's set of sensors + exclude_sensor_names (None or list of str): If specified, substring(s) to check against in all raw sensor + prim paths found on the robot. A sensor must not include any of the specified substrings in order to + be included in this robot's set of sensors proprio_obs (str or list of str): proprioception observation key(s) to use for generating proprioceptive observations. If str, should be exactly "default" -- this results in the default proprioception observations being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict @@ -149,6 +171,9 @@ def __init__( self._grasping_direction = grasping_direction self._disable_grasp_handling = disable_grasp_handling + # Other variables filled in at runtime + self._eef_to_fingertip_lengths = None # dict mapping arm name to finger name to offset + # Initialize other variables used for assistive grasping self._ag_freeze_joint_pos = { arm: {} for arm in self.arm_names @@ -178,6 +203,8 @@ def __init__( action_normalize=action_normalize, reset_joint_pos=reset_joint_pos, obs_modalities=obs_modalities, + include_sensor_names=include_sensor_names, + exclude_sensor_names=exclude_sensor_names, proprio_obs=proprio_obs, sensor_config=sensor_config, **kwargs, @@ -186,25 +213,17 @@ def __init__( def _validate_configuration(self): # Iterate over all arms for arm in self.arm_names: - # We make sure that our arm controller exists and is a manipulation controller - assert ( - "arm_{}".format(arm) in self._controllers - ), "Controller 'arm_{}' must exist in controllers! Current controllers: {}".format( - arm, list(self._controllers.keys()) - ) - assert isinstance( - self._controllers["arm_{}".format(arm)], ManipulationController - ), "Arm {} controller must be a ManipulationController!".format(arm) - - # We make sure that our gripper controller exists and is a gripper controller - assert ( - "gripper_{}".format(arm) in self._controllers - ), "Controller 'gripper_{}' must exist in controllers! Current controllers: {}".format( - arm, list(self._controllers.keys()) - ) - assert isinstance( - self._controllers["gripper_{}".format(arm)], GripperController - ), "Gripper {} controller must be a GripperController!".format(arm) + # If we have an arm controller, make sure it is a manipulation controller + if f"arm_{arm}" in self._controllers: + assert isinstance( + self._controllers["arm_{}".format(arm)], ManipulationController + ), "Arm {} controller must be a ManipulationController!".format(arm) + + # If we have a gripper controller, make sure it is a manipulation controller + if f"gripper_{arm}" in self._controllers: + assert isinstance( + self._controllers["gripper_{}".format(arm)], GripperController + ), "Gripper {} controller must be a GripperController!".format(arm) # run super super()._validate_configuration() @@ -212,6 +231,14 @@ def _validate_configuration(self): def _initialize(self): super()._initialize() + # Infer relevant link properties, e.g.: fingertip location, AG grasping points + # We use a try / except to maintain backwards-compatibility with robots that do not follow our + # OG-specified convention + try: + self._infer_finger_properties() + except AssertionError as e: + log.warning(f"Could not infer relevant finger link properties because:\n\n{e}") + if gm.AG_CLOTH: for arm in self.arm_names: self._ag_check_in_volume[arm], self._ag_calculate_volume[arm] = ( @@ -220,6 +247,163 @@ def _initialize(self): ) ) + def _infer_finger_properties(self): + """ + Infers relevant finger properties based on the given finger meshes of the robot + + NOTE: This assumes the given EEF convention for parallel jaw grippers -- i.e.: + z points in the direction of the fingers, y points in the direction of grasp articulation, and x + is then inferred automatically + """ + # Calculate and cache fingertip to eef frame offsets, as well as AG grasping points + self._eef_to_fingertip_lengths = dict() + self._default_ag_start_points = dict() + self._default_ag_end_points = dict() + for arm, finger_links in self.finger_links.items(): + self._eef_to_fingertip_lengths[arm] = dict() + eef_link = self.eef_links[arm] + world_to_eef_tf = T.pose2mat(eef_link.get_position_orientation()) + eef_to_world_tf = T.pose_inv(world_to_eef_tf) + + # Infer parent link for this finger + finger_parent_link, finger_parent_max_z = None, None + is_parallel_jaw = len(finger_links) == 2 + assert ( + is_parallel_jaw + ), "Inferring finger link information can only be done for parallel jaw gripper robots!" + for i, finger_link in enumerate(finger_links): + # Find parent, and make sure one exists + parent_prim_path, parent_link = None, None + for joint in self.joints.values(): + if finger_link.prim_path == joint.body1: + parent_prim_path = joint.body0 + break + assert ( + parent_prim_path is not None + ), f"Expected articulated parent joint for finger link {finger_link.name} but found none!" + for link in self.links.values(): + if parent_prim_path == link.prim_path: + parent_link = link + break + assert parent_link is not None, f"Expected parent link located at {parent_prim_path} but found none!" + # Make sure all fingers share the same parent + if finger_parent_link is None: + finger_parent_link = parent_link + finger_parent_pts = finger_parent_link.collision_boundary_points_world + assert ( + finger_parent_pts is not None + ), f"Expected finger parent points to be defined for parent link {finger_parent_link.name}, but got None!" + # Convert from world frame -> eef frame + finger_parent_pts = th.concatenate([finger_parent_pts, th.ones(len(finger_parent_pts), 1)], dim=-1) + finger_parent_pts = (finger_parent_pts @ eef_to_world_tf.T)[:, :3] + finger_parent_max_z = finger_parent_pts[:, 2].max().item() + else: + assert ( + finger_parent_link == parent_link + ), f"Expected all fingers to have same parent link, but found multiple parents at {finger_parent_link.prim_path} and {parent_link.prim_path}" + + # Calculate this finger's collision boundary points in the world frame + finger_pts = finger_link.collision_boundary_points_world + assert ( + finger_pts is not None + ), f"Expected finger points to be defined for link {finger_link.name}, but got None!" + # Convert from world frame -> eef frame + finger_pts = th.concatenate([finger_pts, th.ones(len(finger_pts), 1)], dim=-1) + finger_pts = (finger_pts @ eef_to_world_tf.T)[:, :3] + + # Since we know the EEF frame always points with z outwards towards the fingers, the outer-most point / + # fingertip is the maximum z value + finger_max_z = finger_pts[:, 2].max().item() + assert ( + finger_max_z > 0 + ), f"Expected positive fingertip to eef frame offset for link {finger_link.name}, but got: {finger_max_z}. Does the EEF frame z-axis point in the direction of the fingers?" + self._eef_to_fingertip_lengths[arm][finger_link.name] = finger_max_z + + # Now, only keep points that are above the parent max z by 20% for inferring y values + finger_range = finger_max_z - finger_parent_max_z + valid_idxs = th.where( + finger_pts[:, 2] > (finger_parent_max_z + finger_range * m.MIN_AG_DEFAULT_GRASP_POINT_PROP) + )[0] + finger_pts = finger_pts[valid_idxs] + # Make sure all points lie on a single side of the EEF's y-axis + y_signs = th.sign(finger_pts[:, 1]) + assert th.all( + y_signs == y_signs[0] + ).item(), "Expected all finger points to lie on single side of EEF y-axis!" + y_sign = y_signs[0].item() + y_abs_min = th.abs(finger_pts[:, 1]).min().item() + + # Compute the default grasping points for this finger + # For now, we only have a strong heuristic defined for parallel jaw grippers, which assumes that + # there are exactly 2 fingers + # In this case, this is defined as the x2 (x,y,z) tuples where: + # z - the 20% and 80% length between the range from [finger_parent_max_z, finger_max_z] + # This is synonymous to inferring the length of the finger (lower bounded by the gripper base, + # assumed to be the parent link), and then taking the values MIN% and MAX% along its length + # y - the value closest to the edge of the finger surface in the direction of +/- EEF y-axis. + # This assumes that each individual finger lies completely on one side of the EEF y-axis + # x - 0. This assumes that the EEF axis evenly splits each finger symmetrically on each side + # (x,y,z,1) -- homogenous form for efficient transforming into finger link frame + grasp_pts = th.tensor( + [ + [ + 0, + (y_abs_min - 0.002) * y_sign, + finger_parent_max_z + finger_range * m.MIN_AG_DEFAULT_GRASP_POINT_PROP, + 1, + ], + [ + 0, + (y_abs_min - 0.002) * y_sign, + finger_parent_max_z + finger_range * m.MAX_AG_DEFAULT_GRASP_POINT_PROP, + 1, + ], + ] + ) + # Convert the grasping points from the EEF frame -> finger frame + finger_to_world_tf = T.pose_inv(T.pose2mat(finger_link.get_position_orientation())) + finger_to_eef_tf = finger_to_world_tf @ world_to_eef_tf + grasp_pts = (grasp_pts @ finger_to_eef_tf.T)[:, :3] + grasping_points = [ + GraspingPoint(link_name=finger_link.body_name, position=grasp_pt) for grasp_pt in grasp_pts + ] + if i == 0: + # Start point + self._default_ag_start_points[arm] = grasping_points + else: + # End point + self._default_ag_end_points[arm] = grasping_points + + # For each grasping point, if we're in DEBUG mode, visualize with spheres + if gm.DEBUG: + for ag_points in (self.assisted_grasp_start_points, self.assisted_grasp_end_points): + for arm_ag_points in ag_points.values(): + # Skip if None exist + if arm_ag_points is None: + continue + # For each ag point, generate a small sphere at that point + for i, arm_ag_point in enumerate(arm_ag_points): + link = self.links[arm_ag_point.link_name] + local_pos = arm_ag_point.position + vis_mesh_prim_path = f"{link.prim_path}/ag_point_{i}" + create_primitive_mesh( + prim_path=vis_mesh_prim_path, + extents=0.005, + primitive_type="Sphere", + ) + vis_geom = VisualGeomPrim( + relative_prim_path=absolute_prim_path_to_scene_relative( + scene=self.scene, + absolute_prim_path=vis_mesh_prim_path, + ), + name=f"ag_point_{i}", + ) + vis_geom.load(self.scene) + vis_geom.set_position_orientation( + position=local_pos, + frame="parent", + ) + def is_grasping(self, arm="default", candidate_obj=None): """ Returns True if the robot is grasping the target option @candidate_obj or any object if @candidate_obj is None. @@ -632,7 +816,9 @@ def eef_links(self): """ Returns: dict: Dictionary mapping arm appendage name to robot link corresponding to that arm's - eef link + eef link. NOTE: These links always have a canonical local orientation frame -- assuming a parallel jaw + eef morphology, it is assumed that the eef z-axis points out from the tips of the fingers, the y-axis + points from the left finger to the right finger, and the x-axis inferred programmatically """ return {arm: self._links[self.eef_link_names[arm]] for arm in self.arm_names} @@ -654,6 +840,23 @@ def finger_joints(self): """ return {arm: [self._joints[joint] for joint in self.finger_joint_names[arm]] for arm in self.arm_names} + @property + def _assisted_grasp_start_points(self): + """ + Returns: + dict: Dictionary mapping individual arm appendage names to array of GraspingPoint tuples, + composed of (link_name, position) values specifying valid grasping start points located at + cartesian (x,y,z) coordinates specified in link_name's local coordinate frame. + These values will be used in conjunction with + @self.assisted_grasp_end_points to trigger assisted grasps, where objects that intersect + with any ray starting at any point in @self.assisted_grasp_start_points and terminating at any point in + @self.assisted_grasp_end_points will trigger an assisted grasp (calculated individually for each gripper + appendage). By default, each entry returns None, and must be implemented by any robot subclass that + wishes to use assisted grasping. + """ + # Should be optionally implemented by subclass + return None + @property def assisted_grasp_start_points(self): """ @@ -668,7 +871,28 @@ def assisted_grasp_start_points(self): appendage). By default, each entry returns None, and must be implemented by any robot subclass that wishes to use assisted grasping. """ - return {arm: None for arm in self.arm_names} + return ( + self._assisted_grasp_start_points + if self._assisted_grasp_start_points is not None + else self._default_ag_start_points + ) + + @property + def _assisted_grasp_end_points(self): + """ + Returns: + dict: Dictionary mapping individual arm appendage names to array of GraspingPoint tuples, + composed of (link_name, position) values specifying valid grasping end points located at + cartesian (x,y,z) coordinates specified in link_name's local coordinate frame. + These values will be used in conjunction with + @self.assisted_grasp_start_points to trigger assisted grasps, where objects that intersect + with any ray starting at any point in @self.assisted_grasp_start_points and terminating at any point in + @self.assisted_grasp_end_points will trigger an assisted grasp (calculated individually for each gripper + appendage). By default, each entry returns None, and must be implemented by any robot subclass that + wishes to use assisted grasping. + """ + # Should be optionally implemented by subclass + return None @property def assisted_grasp_end_points(self): @@ -684,16 +908,20 @@ def assisted_grasp_end_points(self): appendage). By default, each entry returns None, and must be implemented by any robot subclass that wishes to use assisted grasping. """ - return {arm: None for arm in self.arm_names} + return ( + self._assisted_grasp_end_points + if self._assisted_grasp_end_points is not None + else self._default_ag_end_points + ) @property - def finger_lengths(self): + def eef_to_fingertip_lengths(self): """ Returns: - dict: Dictionary mapping arm appendage name to corresponding length of the fingers in that - hand defined from the palm (assuming all fingers in one hand are equally long) + dict: Dictionary mapping arm appendage name to per-finger corresponding z-distance between EEF and each + respective fingertip """ - raise NotImplementedError + return self._eef_to_fingertip_lengths @property def arm_workspace_range(self): @@ -1020,7 +1248,7 @@ def _default_arm_joint_controller_configs(self): "command_output_limits": None, "motor_type": "position", "use_delta_commands": True, - "use_impedances": True, + "use_impedances": False, } return dic diff --git a/omnigibson/robots/mobile_manipulation_robot.py b/omnigibson/robots/mobile_manipulation_robot.py index 09ae8d7a2..f481d12c3 100644 --- a/omnigibson/robots/mobile_manipulation_robot.py +++ b/omnigibson/robots/mobile_manipulation_robot.py @@ -42,6 +42,8 @@ def __init__( reset_joint_pos=None, # Unique to BaseRobot obs_modalities=("rgb", "proprio"), + include_sensor_names=None, + exclude_sensor_names=None, proprio_obs="default", sensor_config=None, # Unique to ManipulationRobot @@ -81,6 +83,12 @@ def __init__( Valid options are "all", or a list containing any subset of omnigibson.sensors.ALL_SENSOR_MODALITIES. Note: If @sensor_config explicitly specifies `modalities` for a given sensor class, it will override any values specified from @obs_modalities! + include_sensor_names (None or list of str): If specified, substring(s) to check for in all raw sensor prim + paths found on the robot. A sensor must include one of the specified substrings in order to be included + in this robot's set of sensors + exclude_sensor_names (None or list of str): If specified, substring(s) to check against in all raw sensor + prim paths found on the robot. A sensor must not include any of the specified substrings in order to + be included in this robot's set of sensors proprio_obs (str or list of str): proprioception observation key(s) to use for generating proprioceptive observations. If str, should be exactly "default" -- this results in the default proprioception observations being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict @@ -118,6 +126,8 @@ def __init__( action_normalize=action_normalize, reset_joint_pos=reset_joint_pos, obs_modalities=obs_modalities, + include_sensor_names=include_sensor_names, + exclude_sensor_names=exclude_sensor_names, proprio_obs=proprio_obs, sensor_config=sensor_config, grasping_mode=grasping_mode, diff --git a/omnigibson/robots/r1.py b/omnigibson/robots/r1.py index b9ca8259e..2410325f1 100644 --- a/omnigibson/robots/r1.py +++ b/omnigibson/robots/r1.py @@ -4,7 +4,6 @@ from omnigibson.robots.articulated_trunk_robot import ArticulatedTrunkRobot from omnigibson.robots.holonomic_base_robot import HolonomicBaseRobot -from omnigibson.robots.manipulation_robot import GraspingPoint from omnigibson.robots.mobile_manipulation_robot import MobileManipulationRobot from omnigibson.utils.python_utils import classproperty @@ -34,6 +33,8 @@ def __init__( reset_joint_pos=None, # Unique to BaseRobot obs_modalities=("rgb", "proprio"), + include_sensor_names=None, + exclude_sensor_names=None, proprio_obs="default", sensor_config=None, # Unique to ManipulationRobot @@ -73,6 +74,12 @@ def __init__( Valid options are "all", or a list containing any subset of omnigibson.sensors.ALL_SENSOR_MODALITIES. Note: If @sensor_config explicitly specifies `modalities` for a given sensor class, it will override any values specified from @obs_modalities! + include_sensor_names (None or list of str): If specified, substring(s) to check for in all raw sensor prim + paths found on the robot. A sensor must include one of the specified substrings in order to be included + in this robot's set of sensors + exclude_sensor_names (None or list of str): If specified, substring(s) to check against in all raw sensor + prim paths found on the robot. A sensor must not include any of the specified substrings in order to + be included in this robot's set of sensors proprio_obs (str or list of str): proprioception observation key(s) to use for generating proprioceptive observations. If str, should be exactly "default" -- this results in the default proprioception observations being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict @@ -106,6 +113,8 @@ def __init__( action_normalize=action_normalize, reset_joint_pos=reset_joint_pos, obs_modalities=obs_modalities, + include_sensor_names=include_sensor_names, + exclude_sensor_names=exclude_sensor_names, proprio_obs=proprio_obs, sensor_config=sensor_config, grasping_mode=grasping_mode, @@ -165,30 +174,6 @@ def untucked_default_joint_pos(self): pos[self.arm_control_idx[arm]] = th.tensor([0.0, 1.906, -0.991, 1.571, 0.915, -1.571]) return pos - @property - def finger_lengths(self): - return {self.default_arm: 0.087} - - @property - def assisted_grasp_start_points(self): - return { - arm: [ - GraspingPoint(link_name=f"{arm}_gripper_link1", position=th.tensor([-0.032, 0.0, -0.01])), - GraspingPoint(link_name=f"{arm}_gripper_link1", position=th.tensor([0.025, 0.0, -0.01])), - ] - for arm in self.arm_names - } - - @property - def assisted_grasp_end_points(self): - return { - arm: [ - GraspingPoint(link_name=f"{arm}_gripper_link2", position=th.tensor([-0.032, 0.0, -0.01])), - GraspingPoint(link_name=f"{arm}_gripper_link2", position=th.tensor([0.025, 0.0, -0.01])), - ] - for arm in self.arm_names - } - @cached_property def floor_touching_base_link_names(self): return ["wheel_link1", "wheel_link2", "wheel_link3"] diff --git a/omnigibson/robots/robot_base.py b/omnigibson/robots/robot_base.py index 1a925b8f5..2b55a3854 100644 --- a/omnigibson/robots/robot_base.py +++ b/omnigibson/robots/robot_base.py @@ -64,6 +64,8 @@ def __init__( reset_joint_pos=None, # Unique to BaseRobot obs_modalities=("rgb", "proprio"), + include_sensor_names=None, + exclude_sensor_names=None, proprio_obs="default", sensor_config=None, **kwargs, @@ -99,6 +101,12 @@ def __init__( Valid options are "all", or a list containing any subset of omnigibson.sensors.ALL_SENSOR_MODALITIES. Note: If @sensor_config explicitly specifies `modalities` for a given sensor class, it will override any values specified from @obs_modalities! + include_sensor_names (None or list of str): If specified, substring(s) to check for in all raw sensor prim + paths found on the robot. A sensor must include one of the specified substrings in order to be included + in this robot's set of sensors + exclude_sensor_names (None or list of str): If specified, substring(s) to check against in all raw sensor + prim paths found on the robot. A sensor must not include any of the specified substrings in order to + be included in this robot's set of sensors proprio_obs (str or list of str): proprioception observation key(s) to use for generating proprioceptive observations. If str, should be exactly "default" -- this results in the default proprioception observations being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict @@ -124,6 +132,8 @@ def __init__( abilities = robot_abilities if abilities is None else robot_abilities.update(abilities) # Initialize internal attributes that will be loaded later + self._include_sensor_names = None if include_sensor_names is None else set(include_sensor_names) + self._exclude_sensor_names = None if exclude_sensor_names is None else set(exclude_sensor_names) self._sensors = None # e.g.: scan sensor, vision sensor # All BaseRobots should have xform properties pre-loaded @@ -198,6 +208,25 @@ def _load_sensors(self): for prim in link.prim.GetChildren(): prim_type = prim.GetPrimTypeInfo().GetTypeName() if prim_type in SENSOR_PRIMS_TO_SENSOR_CLS: + # Possibly filter out the sensor based on name + prim_path = str(prim.GetPrimPath()) + not_blacklisted = self._exclude_sensor_names is None or not any( + name in prim_path for name in self._exclude_sensor_names + ) + whitelisted = self._include_sensor_names is None or any( + name in prim_path for name in self._include_sensor_names + ) + # Also make sure that the include / exclude sensor names are mutually exclusive + if self._exclude_sensor_names is not None and self._include_sensor_names is not None: + assert ( + len(set(self._exclude_sensor_names).intersection(set(self._include_sensor_names))) == 0 + ), ( + f"include_sensor_names and exclude_sensor_names must be mutually exclusive! " + f"Got: {self._include_sensor_names} and {self._exclude_sensor_names}" + ) + if not (not_blacklisted and whitelisted): + continue + # Infer what obs modalities to use for this sensor sensor_cls = SENSOR_PRIMS_TO_SENSOR_CLS[prim_type] sensor_kwargs = self._sensor_config[sensor_cls.__name__] @@ -210,11 +239,12 @@ def _load_sensors(self): # If the modalities list is empty, don't import the sensor. if not sensor_kwargs["modalities"]: continue + obs_modalities = obs_modalities.union(sensor_kwargs["modalities"]) # Create the sensor and store it internally sensor = create_sensor( sensor_type=prim_type, - relative_prim_path=absolute_prim_path_to_scene_relative(self.scene, str(prim.GetPrimPath())), + relative_prim_path=absolute_prim_path_to_scene_relative(self.scene, prim_path), name=f"{self.name}:{link_name}:{prim_type}:{sensor_counts[prim_type]}", **sensor_kwargs, ) @@ -298,11 +328,15 @@ def _get_proprioception_dict(self): dict: keyword-mapped proprioception observations available for this robot. Can be extended by subclasses """ - joint_positions = cb.to_torch(ControllableObjectViewAPI.get_joint_positions(self.articulation_root_path)) - joint_velocities = cb.to_torch(ControllableObjectViewAPI.get_joint_velocities(self.articulation_root_path)) - joint_efforts = cb.to_torch(ControllableObjectViewAPI.get_joint_efforts(self.articulation_root_path)) + joint_positions = cb.to_torch( + cb.copy(ControllableObjectViewAPI.get_joint_positions(self.articulation_root_path)) + ) + joint_velocities = cb.to_torch( + cb.copy(ControllableObjectViewAPI.get_joint_velocities(self.articulation_root_path)) + ) + joint_efforts = cb.to_torch(cb.copy(ControllableObjectViewAPI.get_joint_efforts(self.articulation_root_path))) pos, quat = ControllableObjectViewAPI.get_position_orientation(self.articulation_root_path) - pos, quat = cb.to_torch(pos), cb.to_torch(quat) + pos, quat = cb.to_torch(cb.copy(pos)), cb.to_torch(cb.copy(quat)) ori = T.quat2euler(quat) ori_2d = T.z_angle_from_quat(quat) @@ -320,8 +354,12 @@ def _get_proprioception_dict(self): robot_2d_ori=ori_2d, robot_2d_ori_cos=th.cos(ori_2d), robot_2d_ori_sin=th.sin(ori_2d), - robot_lin_vel=cb.to_torch(ControllableObjectViewAPI.get_linear_velocity(self.articulation_root_path)), - robot_ang_vel=cb.to_torch(ControllableObjectViewAPI.get_angular_velocity(self.articulation_root_path)), + robot_lin_vel=cb.to_torch( + cb.copy(ControllableObjectViewAPI.get_linear_velocity(self.articulation_root_path)) + ), + robot_ang_vel=cb.to_torch( + cb.copy(ControllableObjectViewAPI.get_angular_velocity(self.articulation_root_path)) + ), ) def _load_observation_space(self): diff --git a/omnigibson/robots/stretch.py b/omnigibson/robots/stretch.py index 8ba67b4ad..1fdacaa8e 100644 --- a/omnigibson/robots/stretch.py +++ b/omnigibson/robots/stretch.py @@ -4,7 +4,7 @@ import torch as th from omnigibson.robots.active_camera_robot import ActiveCameraRobot -from omnigibson.robots.manipulation_robot import GraspingPoint, ManipulationRobot +from omnigibson.robots.manipulation_robot import ManipulationRobot from omnigibson.robots.two_wheel_robot import TwoWheelRobot from omnigibson.utils.ui_utils import create_module_logger @@ -54,28 +54,6 @@ def wheel_radius(self): def wheel_axle_length(self): return 0.330 - @property - def finger_lengths(self): - return {self.default_arm: 0.04} - - @property - def assisted_grasp_start_points(self): - return { - self.default_arm: [ - GraspingPoint(link_name="link_gripper_finger_right", position=th.tensor([0.013, 0.0, 0.01])), - GraspingPoint(link_name="link_gripper_finger_right", position=th.tensor([-0.01, 0.0, 0.009])), - ] - } - - @property - def assisted_grasp_end_points(self): - return { - self.default_arm: [ - GraspingPoint(link_name="link_gripper_finger_left", position=th.tensor([0.013, 0.0, 0.01])), - GraspingPoint(link_name="link_gripper_finger_left", position=th.tensor([-0.01, 0.0, 0.009])), - ] - } - @property def disabled_collision_pairs(self): return [ diff --git a/omnigibson/robots/tiago.py b/omnigibson/robots/tiago.py index 53a72ad07..4186ef359 100644 --- a/omnigibson/robots/tiago.py +++ b/omnigibson/robots/tiago.py @@ -5,7 +5,6 @@ from omnigibson.robots.active_camera_robot import ActiveCameraRobot from omnigibson.robots.articulated_trunk_robot import ArticulatedTrunkRobot from omnigibson.robots.holonomic_base_robot import HolonomicBaseRobot -from omnigibson.robots.manipulation_robot import GraspingPoint from omnigibson.robots.untucked_arm_pose_robot import UntuckedArmPoseRobot from omnigibson.utils.python_utils import classproperty @@ -36,6 +35,8 @@ def __init__( reset_joint_pos=None, # Unique to BaseRobot obs_modalities=("rgb", "proprio"), + include_sensor_names=None, + exclude_sensor_names=None, proprio_obs="default", sensor_config=None, # Unique to ManipulationRobot @@ -79,6 +80,12 @@ def __init__( Valid options are "all", or a list containing any subset of omnigibson.sensors.ALL_SENSOR_MODALITIES. Note: If @sensor_config explicitly specifies `modalities` for a given sensor class, it will override any values specified from @obs_modalities! + include_sensor_names (None or list of str): If specified, substring(s) to check for in all raw sensor prim + paths found on the robot. A sensor must include one of the specified substrings in order to be included + in this robot's set of sensors + exclude_sensor_names (None or list of str): If specified, substring(s) to check against in all raw sensor + prim paths found on the robot. A sensor must not include any of the specified substrings in order to + be included in this robot's set of sensors proprio_obs (str or list of str): proprioception observation key(s) to use for generating proprioceptive observations. If str, should be exactly "default" -- this results in the default proprioception observations being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict @@ -121,6 +128,8 @@ def __init__( action_normalize=action_normalize, reset_joint_pos=reset_joint_pos, obs_modalities=obs_modalities, + include_sensor_names=include_sensor_names, + exclude_sensor_names=exclude_sensor_names, proprio_obs=proprio_obs, sensor_config=sensor_config, grasping_mode=grasping_mode, @@ -230,34 +239,6 @@ def _default_controllers(self): controllers["gripper_{}".format(arm)] = "MultiFingerGripperController" return controllers - @property - def assisted_grasp_start_points(self): - return { - arm: [ - GraspingPoint( - link_name="gripper_{}_right_finger_link".format(arm), position=th.tensor([-0.001, 0.0, -0.2]) - ), - GraspingPoint( - link_name="gripper_{}_right_finger_link".format(arm), position=th.tensor([-0.001, 0.0, -0.13]) - ), - ] - for arm in self.arm_names - } - - @property - def assisted_grasp_end_points(self): - return { - arm: [ - GraspingPoint( - link_name="gripper_{}_left_finger_link".format(arm), position=th.tensor([0.001, 0.0, -0.2]) - ), - GraspingPoint( - link_name="gripper_{}_left_finger_link".format(arm), position=th.tensor([0.001, 0.0, -0.13]) - ), - ] - for arm in self.arm_names - } - @property def arm_control_idx(self): # Add combined entry @@ -266,10 +247,6 @@ def arm_control_idx(self): idxs["combined"] = th.sort(th.cat([val for val in idxs.values()]))[0] return idxs - @property - def finger_lengths(self): - return {arm: 0.12 for arm in self.arm_names} - @property def disabled_collision_link_names(self): # These should NEVER have collisions in the first place (i.e.: these are poorly modeled geoms from the source diff --git a/omnigibson/robots/untucked_arm_pose_robot.py b/omnigibson/robots/untucked_arm_pose_robot.py index 4000969ce..cf7664d9e 100644 --- a/omnigibson/robots/untucked_arm_pose_robot.py +++ b/omnigibson/robots/untucked_arm_pose_robot.py @@ -39,6 +39,8 @@ def __init__( reset_joint_pos=None, # Unique to BaseRobot obs_modalities=("rgb", "proprio"), + include_sensor_names=None, + exclude_sensor_names=None, proprio_obs="default", sensor_config=None, # Unique to ManipulationRobot @@ -80,6 +82,12 @@ def __init__( Valid options are "all", or a list containing any subset of omnigibson.sensors.ALL_SENSOR_MODALITIES. Note: If @sensor_config explicitly specifies `modalities` for a given sensor class, it will override any values specified from @obs_modalities! + include_sensor_names (None or list of str): If specified, substring(s) to check for in all raw sensor prim + paths found on the robot. A sensor must include one of the specified substrings in order to be included + in this robot's set of sensors + exclude_sensor_names (None or list of str): If specified, substring(s) to check against in all raw sensor + prim paths found on the robot. A sensor must not include any of the specified substrings in order to + be included in this robot's set of sensors proprio_obs (str or list of str): proprioception observation key(s) to use for generating proprioceptive observations. If str, should be exactly "default" -- this results in the default proprioception observations being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict @@ -117,6 +125,8 @@ def __init__( action_normalize=action_normalize, reset_joint_pos=reset_joint_pos, obs_modalities=obs_modalities, + include_sensor_names=include_sensor_names, + exclude_sensor_names=exclude_sensor_names, proprio_obs=proprio_obs, sensor_config=sensor_config, grasping_mode=grasping_mode, diff --git a/omnigibson/robots/vx300s.py b/omnigibson/robots/vx300s.py index ac55c2b74..59905d287 100644 --- a/omnigibson/robots/vx300s.py +++ b/omnigibson/robots/vx300s.py @@ -3,7 +3,7 @@ import torch as th -from omnigibson.robots.manipulation_robot import GraspingPoint, ManipulationRobot +from omnigibson.robots.manipulation_robot import ManipulationRobot from omnigibson.utils.transform_utils import euler2quat @@ -34,6 +34,8 @@ def __init__( reset_joint_pos=None, # Unique to BaseRobot obs_modalities=("rgb", "proprio"), + include_sensor_names=None, + exclude_sensor_names=None, proprio_obs="default", sensor_config=None, # Unique to ManipulationRobot @@ -70,6 +72,12 @@ def __init__( Valid options are "all", or a list containing any subset of omnigibson.sensors.ALL_SENSOR_MODALITIES. Note: If @sensor_config explicitly specifies `modalities` for a given sensor class, it will override any values specified from @obs_modalities! + include_sensor_names (None or list of str): If specified, substring(s) to check for in all raw sensor prim + paths found on the robot. A sensor must include one of the specified substrings in order to be included + in this robot's set of sensors + exclude_sensor_names (None or list of str): If specified, substring(s) to check against in all raw sensor + prim paths found on the robot. A sensor must not include any of the specified substrings in order to + be included in this robot's set of sensors proprio_obs (str or list of str): proprioception observation key(s) to use for generating proprioceptive observations. If str, should be exactly "default" -- this results in the default proprioception observations being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict @@ -100,6 +108,8 @@ def __init__( action_normalize=action_normalize, reset_joint_pos=reset_joint_pos, obs_modalities=obs_modalities, + include_sensor_names=include_sensor_names, + exclude_sensor_names=exclude_sensor_names, proprio_obs=proprio_obs, sensor_config=sensor_config, grasping_mode=grasping_mode, @@ -128,10 +138,6 @@ def _default_controllers(self): def _default_joint_pos(self): return th.tensor([0.0, -0.849879, 0.258767, 0.0, 1.2831712, 0.0, 0.057, 0.057]) - @property - def finger_lengths(self): - return {self.default_arm: 0.1} - @cached_property def arm_link_names(self): return { @@ -174,19 +180,3 @@ def finger_joint_names(self): @property def teleop_rotation_offset(self): return {self.default_arm: euler2quat([-math.pi, 0, 0])} - - @property - def assisted_grasp_start_points(self): - return { - self.default_arm: [ - GraspingPoint(link_name="right_finger_link", position=th.tensor([0.0, 0.001, 0.057])), - ] - } - - @property - def assisted_grasp_end_points(self): - return { - self.default_arm: [ - GraspingPoint(link_name="left_finger_link", position=th.tensor([0.0, 0.001, 0.057])), - ] - } diff --git a/omnigibson/simulator.py b/omnigibson/simulator.py index d93211fd9..224ed6163 100644 --- a/omnigibson/simulator.py +++ b/omnigibson/simulator.py @@ -330,11 +330,17 @@ def __init__( self._on_contact ) # The callback will be called right *before* the physics step - self._physics_step_callback = self._physics_context._physx_interface.subscribe_physics_on_step_events( - lambda _: self._on_physics_step(), + self._pre_physics_step_callback = self._physics_context._physx_interface.subscribe_physics_on_step_events( + lambda _: self._on_pre_physics_step(), pre_step=True, order=0, ) + # The callback will be called right *after* the physics step + self._post_physics_step_callback = self._physics_context._physx_interface.subscribe_physics_on_step_events( + lambda _: self._on_post_physics_step(), + pre_step=False, + order=0, + ) self._simulation_event_callback = ( self._physx_interface.get_simulation_event_stream_v2().create_subscription_to_pop( self._on_simulation_event @@ -997,7 +1003,6 @@ def _omni_update_step(self): # Clear the bounding box and contact caches so that they get updated during the next time they're called RigidContactAPI.clear() GripperRigidContactAPI.clear() - ControllableObjectViewAPI.clear() def play(self): if not self.is_playing(): @@ -1120,10 +1125,7 @@ def step_physics(self): self._omni_update_step() PoseAPI.invalidate() - def _on_physics_step(self): - # Make the controllable object view API refresh - ControllableObjectViewAPI.clear() - + def _on_pre_physics_step(self): # Run the controller step on every controllable object for scene in self.scenes: for obj in scene.objects: @@ -1133,6 +1135,10 @@ def _on_physics_step(self): # Flush the controls from the ControllableObjectViewAPI ControllableObjectViewAPI.flush_control() + def _on_post_physics_step(self): + # Run the post physics update for backend view + ControllableObjectViewAPI.post_physics_step() + def _on_contact(self, contact_headers, contact_data): """ This callback will be invoked after every PHYSICS step if there is any contact. diff --git a/omnigibson/utils/backend_utils.py b/omnigibson/utils/backend_utils.py index dfdf8a32d..a4d89f988 100644 --- a/omnigibson/utils/backend_utils.py +++ b/omnigibson/utils/backend_utils.py @@ -46,6 +46,7 @@ class _ComputeBackend: all = None abs = None sqrt = None + norm = None mean = None copy = None eye = None @@ -96,7 +97,7 @@ class _ComputeTorchBackend(_ComputeBackend): zeros = lambda *args: th.zeros(*args, dtype=th.float32) ones = lambda *args: th.ones(*args, dtype=th.float32) to_numpy = lambda x: x.numpy() - from_numpy = lambda x: th.from_numpy() + from_numpy = lambda x: th.from_numpy(x) to_torch = lambda x: x from_torch = lambda x: x from_torch_recursive = lambda dic: dic @@ -111,6 +112,7 @@ class _ComputeTorchBackend(_ComputeBackend): all = th.all abs = th.abs sqrt = th.sqrt + norm = lambda val, dim=None, keepdim=False: th.norm(val, dim=dim, keepdim=keepdim) mean = lambda val, dim=None, keepdim=False: th.mean(val, dim=dim, keepdim=keepdim) copy = lambda arr: arr.clone() eye = th.eye @@ -144,8 +146,9 @@ class _ComputeNumpyBackend(_ComputeBackend): all = np.all abs = np.abs sqrt = np.sqrt + norm = lambda val, dim=None, keepdim=False: np.linalg.norm(val, axis=dim, keepdims=keepdim) mean = lambda val, dim=None, keepdim=False: np.mean(val, axis=dim, keepdims=keepdim) - copy = lambda arr: np.array(arr) + copy = lambda arr: arr.copy() eye = np.eye view = lambda arr, shape: arr.reshape(shape) arange = np.arange diff --git a/omnigibson/utils/grasping_planning_utils.py b/omnigibson/utils/grasping_planning_utils.py index 726706841..db59225f0 100644 --- a/omnigibson/utils/grasping_planning_utils.py +++ b/omnigibson/utils/grasping_planning_utils.py @@ -231,7 +231,10 @@ def grasp_position_for_open_on_prismatic_joint(robot, target_obj, relevant_joint ) # Now apply the grasp offset. - dist_from_grasp_pos = robot.finger_lengths[robot.default_arm] + 0.05 + avg_finger_offset = th.mean( + th.tensor([length for length in robot.eef_to_fingertip_lengths[robot.default_arm].values()]) + ) + dist_from_grasp_pos = avg_finger_offset + 0.05 offset_grasp_pose_in_bbox_frame = ( grasp_position_in_bbox_frame + canonical_push_axis * push_axis_closer_side_sign * dist_from_grasp_pos, grasp_quat_in_bbox_frame, @@ -263,14 +266,16 @@ def grasp_position_for_open_on_prismatic_joint(robot, target_obj, relevant_joint waypoint_start_offset = ( -0.05 * approach_direction_in_world_frame if should_open else 0.05 * approach_direction_in_world_frame ) + avg_finger_offset = th.mean( + th.tensor([length for length in robot.eef_to_fingertip_lengths[robot.default_arm].values()]) + ) waypoint_start_pose = ( grasp_pose_in_world_frame[0] - + -1 * approach_direction_in_world_frame * (robot.finger_lengths[robot.default_arm] + waypoint_start_offset), + + -1 * approach_direction_in_world_frame * (avg_finger_offset + waypoint_start_offset), grasp_pose_in_world_frame[1], ) waypoint_end_pose = ( - target_hand_pose_in_world_frame[0] - + -1 * approach_direction_in_world_frame * (robot.finger_lengths[robot.default_arm]), + target_hand_pose_in_world_frame[0] + -1 * approach_direction_in_world_frame * avg_finger_offset, target_hand_pose_in_world_frame[1], ) waypoints = interpolate_waypoints(waypoint_start_pose, waypoint_end_pose, num_waypoints=num_waypoints) @@ -409,7 +414,10 @@ def grasp_position_for_open_on_revolute_joint(robot, target_obj, relevant_joint, ) # Now apply the grasp offset. - dist_from_grasp_pos = robot.finger_lengths[robot.default_arm] + 0.05 + avg_finger_offset = th.mean( + th.tensor([length for length in robot.eef_to_fingertip_lengths[robot.default_arm].values()]) + ) + dist_from_grasp_pos = avg_finger_offset + 0.05 offset_in_bbox_frame = canonical_open_direction * open_axis_closer_side_sign * dist_from_grasp_pos offset_grasp_pose_in_bbox_frame = (grasp_position + offset_in_bbox_frame, grasp_quat_in_bbox_frame) offset_grasp_pose_in_world_frame = T.pose_transform( diff --git a/omnigibson/utils/processing_utils.py b/omnigibson/utils/processing_utils.py index bd81ceba8..5acc088a7 100644 --- a/omnigibson/utils/processing_utils.py +++ b/omnigibson/utils/processing_utils.py @@ -116,7 +116,7 @@ def _dump_state(self): state = super()._dump_state() # Add info from this filter - state["past_samples"] = cb.to_torch(self.past_samples) + state["past_samples"] = cb.to_torch(cb.copy(self.past_samples)) state["current_idx"] = self.current_idx state["fully_filled"] = self.fully_filled @@ -127,7 +127,7 @@ def _load_state(self, state): super()._load_state(state=state) # Load relevant info for this filter - self.past_samples = cb.from_torch(state["past_samples"]) + self.past_samples = cb.copy(cb.from_torch(state["past_samples"])) self.current_idx = state["current_idx"] self.fully_filled = state["fully_filled"] diff --git a/omnigibson/utils/transform_utils_np.py b/omnigibson/utils/transform_utils_np.py index 12e4a9a40..7fb7e33c1 100644 --- a/omnigibson/utils/transform_utils_np.py +++ b/omnigibson/utils/transform_utils_np.py @@ -158,8 +158,16 @@ def convert_quat(q, to="xyzw"): raise Exception("convert_quat: choose a valid `to` argument (xyzw or wxyz)") -@jit(nopython=True) def quat_multiply(quaternion1, quaternion0): + if quaternion1.dtype != np.float32: + quaternion1 = quaternion1.astype(np.float32) + if quaternion0.dtype != np.float32: + quaternion0 = quaternion0.astype(np.float32) + return _quat_multiply(quaternion1, quaternion0) + + +@jit(nopython=True) +def _quat_multiply(quaternion1, quaternion0): """ Return multiplication of two quaternions (q1 * q0). @@ -175,16 +183,16 @@ def quat_multiply(quaternion1, quaternion0): Returns: np.array: (x,y,z,w) multiplied quaternion """ - x0, y0, z0, w0 = quaternion0 - x1, y1, z1, w1 = quaternion1 - return np.array( + x0, y0, z0, w0 = np.split(quaternion0, 4, axis=-1) + x1, y1, z1, w1 = np.split(quaternion1, 4, axis=-1) + return np.concatenate( ( x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0, -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0, x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0, -x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0, ), - dtype=np.float32, + axis=-1, ) @@ -204,10 +212,9 @@ def quat_conjugate(quaternion): Returns: np.array: (x,y,z,w) quaternion conjugate """ - return np.array( - (-quaternion[0], -quaternion[1], -quaternion[2], quaternion[3]), - dtype=np.float32, - ) + n_dims = len(quaternion.shape) + # Reshape to explicitly handle batched calls + return quaternion * np.array([-1.0, -1.0, -1.0, 1.0], dtype=np.float32).reshape([1] * (n_dims - 1) + [4]) def quat_inverse(quaternion): @@ -226,7 +233,7 @@ def quat_inverse(quaternion): Returns: np.array: (x,y,z,w) quaternion inverse """ - return quat_conjugate(quaternion) / np.dot(quaternion, quaternion) + return quat_conjugate(quaternion) / np.sum(quaternion * quaternion, axis=-1, keepdims=True) def quat_distance(quaternion1, quaternion0): diff --git a/omnigibson/utils/ui_utils.py b/omnigibson/utils/ui_utils.py index 8df735478..9b96898eb 100644 --- a/omnigibson/utils/ui_utils.py +++ b/omnigibson/utils/ui_utils.py @@ -659,18 +659,18 @@ def generate_ik_keypress_mapping(self, controller_info): """ mapping = {} - mapping[lazy.carb.input.KeyboardInput.UP] = {"idx": controller_info["start_idx"] + 0, "val": 0.5} - mapping[lazy.carb.input.KeyboardInput.DOWN] = {"idx": controller_info["start_idx"] + 0, "val": -0.5} - mapping[lazy.carb.input.KeyboardInput.RIGHT] = {"idx": controller_info["start_idx"] + 1, "val": -0.5} - mapping[lazy.carb.input.KeyboardInput.LEFT] = {"idx": controller_info["start_idx"] + 1, "val": 0.5} - mapping[lazy.carb.input.KeyboardInput.P] = {"idx": controller_info["start_idx"] + 2, "val": 0.5} - mapping[lazy.carb.input.KeyboardInput.SEMICOLON] = {"idx": controller_info["start_idx"] + 2, "val": -0.5} - mapping[lazy.carb.input.KeyboardInput.N] = {"idx": controller_info["start_idx"] + 3, "val": 0.5} - mapping[lazy.carb.input.KeyboardInput.B] = {"idx": controller_info["start_idx"] + 3, "val": -0.5} - mapping[lazy.carb.input.KeyboardInput.O] = {"idx": controller_info["start_idx"] + 4, "val": 0.5} - mapping[lazy.carb.input.KeyboardInput.U] = {"idx": controller_info["start_idx"] + 4, "val": -0.5} - mapping[lazy.carb.input.KeyboardInput.V] = {"idx": controller_info["start_idx"] + 5, "val": 0.5} - mapping[lazy.carb.input.KeyboardInput.C] = {"idx": controller_info["start_idx"] + 5, "val": -0.5} + mapping[lazy.carb.input.KeyboardInput.UP] = {"idx": controller_info["start_idx"] + 0, "val": 0.1} + mapping[lazy.carb.input.KeyboardInput.DOWN] = {"idx": controller_info["start_idx"] + 0, "val": -0.1} + mapping[lazy.carb.input.KeyboardInput.RIGHT] = {"idx": controller_info["start_idx"] + 1, "val": -0.1} + mapping[lazy.carb.input.KeyboardInput.LEFT] = {"idx": controller_info["start_idx"] + 1, "val": 0.1} + mapping[lazy.carb.input.KeyboardInput.P] = {"idx": controller_info["start_idx"] + 2, "val": 0.1} + mapping[lazy.carb.input.KeyboardInput.SEMICOLON] = {"idx": controller_info["start_idx"] + 2, "val": -0.1} + mapping[lazy.carb.input.KeyboardInput.N] = {"idx": controller_info["start_idx"] + 3, "val": 0.1} + mapping[lazy.carb.input.KeyboardInput.B] = {"idx": controller_info["start_idx"] + 3, "val": -0.1} + mapping[lazy.carb.input.KeyboardInput.O] = {"idx": controller_info["start_idx"] + 4, "val": 0.1} + mapping[lazy.carb.input.KeyboardInput.U] = {"idx": controller_info["start_idx"] + 4, "val": -0.1} + mapping[lazy.carb.input.KeyboardInput.V] = {"idx": controller_info["start_idx"] + 5, "val": 0.1} + mapping[lazy.carb.input.KeyboardInput.C] = {"idx": controller_info["start_idx"] + 5, "val": -0.1} return mapping @@ -687,18 +687,18 @@ def generate_osc_keypress_mapping(self, controller_info): """ mapping = {} - mapping[lazy.carb.input.KeyboardInput.UP] = {"idx": controller_info["start_idx"] + 0, "val": 0.5} - mapping[lazy.carb.input.KeyboardInput.DOWN] = {"idx": controller_info["start_idx"] + 0, "val": -0.5} - mapping[lazy.carb.input.KeyboardInput.RIGHT] = {"idx": controller_info["start_idx"] + 1, "val": -0.5} - mapping[lazy.carb.input.KeyboardInput.LEFT] = {"idx": controller_info["start_idx"] + 1, "val": 0.5} - mapping[lazy.carb.input.KeyboardInput.P] = {"idx": controller_info["start_idx"] + 2, "val": 0.5} - mapping[lazy.carb.input.KeyboardInput.SEMICOLON] = {"idx": controller_info["start_idx"] + 2, "val": -0.5} - mapping[lazy.carb.input.KeyboardInput.N] = {"idx": controller_info["start_idx"] + 3, "val": 0.5} - mapping[lazy.carb.input.KeyboardInput.B] = {"idx": controller_info["start_idx"] + 3, "val": -0.5} - mapping[lazy.carb.input.KeyboardInput.O] = {"idx": controller_info["start_idx"] + 4, "val": 0.5} - mapping[lazy.carb.input.KeyboardInput.U] = {"idx": controller_info["start_idx"] + 4, "val": -0.5} - mapping[lazy.carb.input.KeyboardInput.V] = {"idx": controller_info["start_idx"] + 5, "val": 0.5} - mapping[lazy.carb.input.KeyboardInput.C] = {"idx": controller_info["start_idx"] + 5, "val": -0.5} + mapping[lazy.carb.input.KeyboardInput.UP] = {"idx": controller_info["start_idx"] + 0, "val": 0.25} + mapping[lazy.carb.input.KeyboardInput.DOWN] = {"idx": controller_info["start_idx"] + 0, "val": -0.25} + mapping[lazy.carb.input.KeyboardInput.RIGHT] = {"idx": controller_info["start_idx"] + 1, "val": -0.25} + mapping[lazy.carb.input.KeyboardInput.LEFT] = {"idx": controller_info["start_idx"] + 1, "val": 0.25} + mapping[lazy.carb.input.KeyboardInput.P] = {"idx": controller_info["start_idx"] + 2, "val": 0.25} + mapping[lazy.carb.input.KeyboardInput.SEMICOLON] = {"idx": controller_info["start_idx"] + 2, "val": -0.25} + mapping[lazy.carb.input.KeyboardInput.N] = {"idx": controller_info["start_idx"] + 3, "val": 0.25} + mapping[lazy.carb.input.KeyboardInput.B] = {"idx": controller_info["start_idx"] + 3, "val": -0.25} + mapping[lazy.carb.input.KeyboardInput.O] = {"idx": controller_info["start_idx"] + 4, "val": 0.25} + mapping[lazy.carb.input.KeyboardInput.U] = {"idx": controller_info["start_idx"] + 4, "val": -0.25} + mapping[lazy.carb.input.KeyboardInput.V] = {"idx": controller_info["start_idx"] + 5, "val": 0.25} + mapping[lazy.carb.input.KeyboardInput.C] = {"idx": controller_info["start_idx"] + 5, "val": -0.25} return mapping diff --git a/omnigibson/utils/usd_utils.py b/omnigibson/utils/usd_utils.py index ac7e568b8..f9d9a84d3 100644 --- a/omnigibson/utils/usd_utils.py +++ b/omnigibson/utils/usd_utils.py @@ -850,10 +850,45 @@ def __init__(self, pattern): # Mapping from prim path to base footprint link name if one exists, None if the root is the base link. self._base_footprint_link_names = {} - def clear(self): + # Prior link transforms / dof positions for estimating velocities since Omni gives inaccurate values + self._last_state = None + + def post_physics_step(self): + # Should be called every sim physics step, right after a new physics step occurs + # The current poses (if it exists) are now the former poses from the previous timestep + # These values are needed to compute velocity estimates + if ( + "root_transforms" in self._read_cache + and "link_transforms" in self._read_cache + and "dof_positions" in self._read_cache + ): + self._last_state = { + "root_transforms": cb.copy(self._read_cache["root_transforms"]), + "link_transforms": cb.copy(self._read_cache["link_transforms"]), + "dof_positions": cb.copy(self._read_cache["dof_positions"]), + } + else: + # We don't have enough info to populate the history, so simply clear it instead + self._last_state = None + + # Clear the internal data since everything is outdated + self.clear(keep_last_pose=True) + + def clear(self, keep_last_pose=False): self._read_cache = {} self._write_idx_cache = collections.defaultdict(set) + # Clear our last timestep's cached values by default + if not keep_last_pose: + self._last_state = None + + # Cache the (now current) transforms so that they're guaranteed to exist throughout the duration of this + # timestep, and available for caching during the next timestep's post_physics_step() call + if og.sim.is_playing(): + self._read_cache["root_transforms"] = cb.from_torch(self._view.get_root_transforms()) + self._read_cache["link_transforms"] = cb.from_torch(self._view.get_link_transforms()) + self._read_cache["dof_positions"] = cb.from_torch(self._view.get_dof_positions()) + def _set_dof_position_targets(self, data, indices, cast=True): # No casting results in better efficiency if cast: @@ -1004,27 +1039,32 @@ def get_position_orientation(self, prim_path): else: return self.get_root_transform(prim_path) - def _get_velocities(self, prim_path): + def _get_velocities(self, prim_path, estimate=False): if self._base_footprint_link_names[prim_path] is not None: link_name = self._base_footprint_link_names[prim_path] - return self._get_link_velocities(prim_path, link_name) + return self._get_link_velocities(prim_path, link_name, estimate=estimate) else: - return self._get_root_velocities(prim_path) + return self._get_root_velocities(prim_path, estimate=estimate) - def _get_relative_velocities(self, prim_path): - if "relative_velocities" not in self._read_cache: - self._read_cache["relative_velocities"] = {} + def _get_relative_velocities(self, prim_path, estimate=False): + vel_str = "velocities_estimate" if estimate else "velocities" - if prim_path not in self._read_cache["relative_velocities"]: - # Compute all tfs at once, including base as well as all links - if "link_velocities" not in self._read_cache: - self._read_cache["link_velocities"] = cb.from_torch(self._view.get_link_velocities()) + if f"relative_{vel_str}" not in self._read_cache: + self._read_cache[f"relative_{vel_str}"] = {} + if prim_path not in self._read_cache[f"relative_{vel_str}"]: + # Compute all tfs at once, including base as well as all links idx = self._idx[prim_path] + if f"link_{vel_str}" not in self._read_cache: + # Force the internal cache to update + self._get_link_velocities( + prim_path=prim_path, link_name=next(iter(self._link_idx[idx].keys())), estimate=estimate + ) + vels = cb.zeros((len(self._link_idx[idx]) + 1, 6, 1)) # base vel is the final -1 index - vels[:-1, :, 0] = self._read_cache["link_velocities"][idx, :] - vels[-1, :, 0] = self._get_velocities(prim_path=prim_path) + vels[:-1, :, 0] = self._read_cache[f"link_{vel_str}"][idx, :] + vels[-1, :, 0] = self._get_velocities(prim_path=prim_path, estimate=estimate) tf = cb.zeros((1, 6, 6)) orn_t = cb.T.quat2mat(self.get_position_orientation(prim_path)[1]).T @@ -1032,30 +1072,48 @@ def _get_relative_velocities(self, prim_path): tf[0, 3:, 3:] = orn_t # x.T --> transpose (inverse) orientation # (1, 6, 6) @ (n_links, 6, 1) -> (n_links, 6, 1) -> (n_links, 6) - self._read_cache["relative_velocities"][prim_path] = cb.squeeze(tf @ vels, dim=-1) + self._read_cache[f"relative_{vel_str}"][prim_path] = cb.squeeze(tf @ vels, dim=-1) + + return self._read_cache[f"relative_{vel_str}"][prim_path] + + def get_linear_velocity(self, prim_path, estimate=False): + return self._get_velocities(prim_path, estimate=estimate)[:3] - return self._read_cache["relative_velocities"][prim_path] + def get_angular_velocity(self, prim_path, estimate=False): + return self._get_velocities(prim_path, estimate=estimate)[3:] - def get_linear_velocity(self, prim_path): - return self._get_velocities(prim_path)[:3] + def _get_root_velocities(self, prim_path, estimate=False): + vel_str = "velocities_estimate" if estimate else "velocities" - def get_angular_velocity(self, prim_path): - return self._get_velocities(prim_path)[3:] + # Use estimated calculation if requested and we have prior history info + if f"root_{vel_str}" not in self._read_cache: + if estimate and self._last_state is not None: + # Compute root velocities estimate as delta between prior timestep and current timestep + vels = cb.zeros((self._last_state["root_transforms"].shape[0], 6)) - def _get_root_velocities(self, prim_path): - if "root_velocities" not in self._read_cache: - self._read_cache["root_velocities"] = cb.from_torch(self._view.get_root_velocities()) + if "root_transforms" not in self._read_cache: + self._read_cache["root_transforms"] = cb.from_torch(self._view.get_root_transforms()) + + vels[:, :3] = self._read_cache["root_transforms"][:, :3] - self._last_state["root_transforms"][:, :3] + vels[:, 3:] = cb.T.quat2axisangle( + cb.T.quat_distance( + self._read_cache["root_transforms"][:, 3:], self._last_state["root_transforms"][:, 3:] + ) + ) + self._read_cache[f"root_{vel_str}"] = vels / og.sim.get_physics_dt() + else: + self._read_cache[f"root_{vel_str}"] = cb.from_torch(self._view.get_root_velocities()) idx = self._idx[prim_path] - return self._read_cache["root_velocities"][idx] + return self._read_cache[f"root_{vel_str}"][idx] - def get_relative_linear_velocity(self, prim_path): + def get_relative_linear_velocity(self, prim_path, estimate=False): # base corresponds to final index - return self._get_relative_velocities(prim_path)[-1, :3] + return self._get_relative_velocities(prim_path, estimate=estimate)[-1, :3] - def get_relative_angular_velocity(self, prim_path): + def get_relative_angular_velocity(self, prim_path, estimate=False): # base corresponds to final index - return self._get_relative_velocities(prim_path)[-1, 3:] + return self._get_relative_velocities(prim_path, estimate=estimate)[-1, 3:] def get_joint_positions(self, prim_path): if "dof_positions" not in self._read_cache: @@ -1064,12 +1122,22 @@ def get_joint_positions(self, prim_path): idx = self._idx[prim_path] return self._read_cache["dof_positions"][idx] - def get_joint_velocities(self, prim_path): - if "dof_velocities" not in self._read_cache: - self._read_cache["dof_velocities"] = cb.from_torch(self._view.get_dof_velocities()) + def get_joint_velocities(self, prim_path, estimate=False): + vel_str = "velocities_estimate" if estimate else "velocities" + + # Use estimated calculation if requested and we have prior history info + if f"dof_{vel_str}" not in self._read_cache: + if estimate and self._last_state is not None: + if "dof_positions" not in self._read_cache: + self._read_cache["dof_positions"] = cb.from_torch(self._view.get_dof_positions()) + self._read_cache[f"dof_{vel_str}"] = ( + self._read_cache["dof_positions"] - self._last_state["dof_positions"] + ) / og.sim.get_physics_dt() + else: + self._read_cache[f"dof_{vel_str}"] = cb.from_torch(self._view.get_dof_velocities()) idx = self._idx[prim_path] - return self._read_cache["dof_velocities"][idx] + return self._read_cache[f"dof_{vel_str}"][idx] def get_joint_efforts(self, prim_path): if "dof_projected_joint_forces" not in self._read_cache: @@ -1135,31 +1203,58 @@ def get_link_relative_position_orientation(self, prim_path, link_name): rel_pose = self._get_relative_poses(prim_path)[link_idx] return rel_pose[:3], rel_pose[3:] - def _get_link_velocities(self, prim_path, link_name): - if "link_velocities" not in self._read_cache: - self._read_cache["link_velocities"] = cb.from_torch(self._view.get_link_velocities()) + def _get_link_velocities(self, prim_path, link_name, estimate=False): + vel_str = "velocities_estimate" if estimate else "velocities" + + # Use estimated calculation if requested and we have prior history info + if f"link_{vel_str}" not in self._read_cache: + if estimate and self._last_state is not None: + # Compute link velocities estimate as delta between prior timestep and current timestep + N, L, _ = self._last_state["link_transforms"].shape + vels = cb.zeros((N, L, 6)) + + if "link_transforms" not in self._read_cache: + self._read_cache["link_transforms"] = cb.from_torch(self._view.get_link_transforms()) + + vels[:, :, :3] = ( + self._read_cache["link_transforms"][:, :, :3] - self._last_state["link_transforms"][:, :, :3] + ) + vels[:, :, 3:] = cb.view( + cb.T.quat2axisangle( + cb.T.quat_distance( + cb.view(self._read_cache["link_transforms"][:, :, 3:], (-1, 4)), + cb.view(self._last_state["link_transforms"][:, :, 3:], (-1, 4)), + ) + ), + (N, L, 3), + ) + self._read_cache[f"link_{vel_str}"] = vels / og.sim.get_physics_dt() + + # Otherwise, directly grab velocities + else: + self._read_cache[f"link_{vel_str}"] = cb.from_torch(self._view.get_link_velocities()) idx = self._idx[prim_path] link_idx = self._link_idx[idx][link_name] - vel = self._read_cache["link_velocities"][idx, link_idx] + vel = self._read_cache[f"link_{vel_str}"][idx, link_idx] return vel - def get_link_linear_velocity(self, prim_path, link_name): - return self._get_link_velocities(prim_path, link_name)[:3] + def get_link_linear_velocity(self, prim_path, link_name, estimate=False): + return self._get_link_velocities(prim_path, link_name, estimate=estimate)[:3] - def get_link_relative_linear_velocity(self, prim_path, link_name): + def get_link_relative_linear_velocity(self, prim_path, link_name, estimate=False): idx = self._idx[prim_path] link_idx = self._link_idx[idx][link_name] - return self._get_relative_velocities(prim_path)[link_idx, :3] + return self._get_relative_velocities(prim_path, estimate=estimate)[link_idx, :3] - def get_link_angular_velocity(self, prim_path, link_name): - return self._get_link_velocities(prim_path, link_name)[3:] + def get_link_angular_velocity(self, prim_path, link_name, estimate=False): + return self._get_link_velocities(prim_path, link_name, estimate=estimate)[3:] - def get_link_relative_angular_velocity(self, prim_path, link_name): + def get_link_relative_angular_velocity(self, prim_path, link_name, estimate=False): idx = self._idx[prim_path] link_idx = self._link_idx[idx][link_name] - return self._get_relative_velocities(prim_path)[link_idx, 3:] + return self._get_relative_velocities(prim_path, estimate=estimate)[link_idx, 3:] def get_jacobian(self, prim_path): if "jacobians" not in self._read_cache: @@ -1206,11 +1301,21 @@ class ControllableObjectViewAPI: # Dictionary mapping from pattern to BatchControlViewAPIImpl _VIEWS_BY_PATTERN = {} + @classmethod + def post_physics_step(cls): + for view in cls._VIEWS_BY_PATTERN.values(): + view.post_physics_step() + @classmethod def clear(cls): for view in cls._VIEWS_BY_PATTERN.values(): view.clear() + @classmethod + def clear_object(cls, prim_path): + if cls._get_pattern_from_prim_path(prim_path) in cls._VIEWS_BY_PATTERN: + cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].clear() + @classmethod def flush_control(cls): for view in cls._VIEWS_BY_PATTERN.values(): @@ -1302,21 +1407,28 @@ def get_root_position_orientation(cls, prim_path): return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_root_transform(prim_path) @classmethod - def get_linear_velocity(cls, prim_path): - return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_linear_velocity(prim_path) + def get_linear_velocity(cls, prim_path, estimate=False): + return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_linear_velocity( + prim_path, estimate=estimate + ) @classmethod - def get_angular_velocity(cls, prim_path): - return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_angular_velocity(prim_path) + def get_angular_velocity(cls, prim_path, estimate=False): + return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_angular_velocity( + prim_path, estimate=estimate + ) @classmethod - def get_relative_linear_velocity(cls, prim_path): - return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_relative_linear_velocity(prim_path) + def get_relative_linear_velocity(cls, prim_path, estimate=False): + return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_relative_linear_velocity( + prim_path, estimate=estimate + ) @classmethod - def get_relative_angular_velocity(cls, prim_path): + def get_relative_angular_velocity(cls, prim_path, estimate=False): return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_relative_angular_velocity( - prim_path + prim_path, + estimate=estimate, ) @classmethod @@ -1324,8 +1436,10 @@ def get_joint_positions(cls, prim_path): return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_joint_positions(prim_path) @classmethod - def get_joint_velocities(cls, prim_path): - return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_joint_velocities(prim_path) + def get_joint_velocities(cls, prim_path, estimate=False): + return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_joint_velocities( + prim_path, estimate=estimate + ) @classmethod def get_joint_efforts(cls, prim_path): @@ -1360,15 +1474,19 @@ def get_link_relative_position_orientation(cls, prim_path, link_name): ) @classmethod - def get_link_relative_linear_velocity(cls, prim_path, link_name): + def get_link_relative_linear_velocity(cls, prim_path, link_name, estimate=False): return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_link_relative_linear_velocity( - prim_path, link_name + prim_path, + link_name, + estimate=estimate, ) @classmethod - def get_link_relative_angular_velocity(cls, prim_path, link_name): + def get_link_relative_angular_velocity(cls, prim_path, link_name, estimate=False): return cls._VIEWS_BY_PATTERN[cls._get_pattern_from_prim_path(prim_path)].get_link_relative_angular_velocity( - prim_path, link_name + prim_path, + link_name, + estimate=estimate, ) @classmethod diff --git a/tests/test_controllers.py b/tests/test_controllers.py index fbb4130c4..f0ce52e39 100644 --- a/tests/test_controllers.py +++ b/tests/test_controllers.py @@ -149,11 +149,11 @@ def check_ori_error(curr_orientation, init_orientation, tol=0.1): n_steps = { "pose_delta_ori": { - "zero": 10, - "forward": 10, - "side": 10, - "up": 10, - "rotate": 10, + "zero": 40, + "forward": 40, + "side": 40, + "up": 40, + "rotate": 20, "base_move": 30, }, "absolute_pose": { @@ -181,10 +181,15 @@ def check_ori_error(curr_orientation, init_orientation, tol=0.1): for robot in env.robots: robot.keep_still() + # We need to explicitly reset the controllers to unify the initial state that will be seen + # during downstream action executions -- i.e.: the state seen after robot.reload_controllers() + # is called each time + for controller in robot.controllers.values(): + controller.reset() + # Update initial state (robot should be stable and still) env.scene.update_initial_state() - # Reset the environment env.scene.reset() # Record initial eef pose of all robots @@ -233,10 +238,10 @@ def check_ori_error(curr_orientation, init_orientation, tol=0.1): break start_idx += robot.controllers[c].command_dim if controller_mode == "pose_delta_ori": - forward_action[start_idx] = 0.1 - side_action[start_idx + 1] = 0.1 - up_action[start_idx + 2] = 0.1 - rot_action[start_idx + 3] = 0.1 + forward_action[start_idx] = 0.02 + side_action[start_idx + 1] = 0.02 + up_action[start_idx + 2] = 0.02 + rot_action[start_idx + 3] = 0.02 elif controller_mode == "absolute_pose": for act in [zero_action, forward_action, side_action, up_action, rot_action]: act[start_idx : start_idx + 3] = init_eef_pos.clone() diff --git a/tests/test_data_collection.py b/tests/test_data_collection.py index 30625b3b1..fac7532c2 100644 --- a/tests/test_data_collection.py +++ b/tests/test_data_collection.py @@ -136,5 +136,5 @@ def test_data_collect_and_playback(): n_render_iterations=1, only_successes=False, ) - env.playback_dataset(record=True) + env.playback_dataset(record_data=True) env.save_data() diff --git a/tests/test_robot_states_flatcache.py b/tests/test_robot_states_flatcache.py index 3de592249..522f801af 100644 --- a/tests/test_robot_states_flatcache.py +++ b/tests/test_robot_states_flatcache.py @@ -63,8 +63,8 @@ def camera_pose_test(flatcache): relative_pose_transform(sensor_world_pos, sensor_world_ori, robot_world_pos, robot_world_ori) ) - sensor_world_pos_gt = th.tensor([150.5187, 149.8295, 101.0960]) - sensor_world_ori_gt = th.tensor([0.0199, -0.1330, 0.9892, -0.0580]) + sensor_world_pos_gt = th.tensor([150.5134, 149.8278, 101.0816]) + sensor_world_ori_gt = th.tensor([0.0176, -0.1205, 0.9910, -0.0549]) assert th.allclose(sensor_world_pos, sensor_world_pos_gt, atol=1e-3) assert quaternions_close(sensor_world_ori, sensor_world_ori_gt, atol=1e-3)