diff --git a/docs/assets/robots/A1.png b/docs/assets/robots/A1.png index 856daa0cb..19d0c55bc 100644 Binary files a/docs/assets/robots/A1.png and b/docs/assets/robots/A1.png differ diff --git a/docs/assets/robots/R1.png b/docs/assets/robots/R1.png index 8733d8836..29cf0e549 100644 Binary files a/docs/assets/robots/R1.png and b/docs/assets/robots/R1.png differ diff --git a/docs/assets/robots/Stretch.png b/docs/assets/robots/Stretch.png index 79553b17c..192f116f4 100644 Binary files a/docs/assets/robots/Stretch.png and b/docs/assets/robots/Stretch.png differ diff --git a/omnigibson/envs/data_wrapper.py b/omnigibson/envs/data_wrapper.py index 8e3e19557..e4d2a37d3 100644 --- a/omnigibson/envs/data_wrapper.py +++ b/omnigibson/envs/data_wrapper.py @@ -5,6 +5,7 @@ from pathlib import Path import h5py +import imageio import torch as th import omnigibson as og @@ -60,6 +61,8 @@ def __init__(self, env, output_path, only_successes=True): self.add_metadata(group=data_grp, name="config", data=config) self.add_metadata(group=data_grp, name="scene_file", data=scene_file) + self.is_recording = True + # Run super super().__init__(env=env) @@ -86,14 +89,28 @@ def step(self, action): next_obs, reward, terminated, truncated, info = self.env.step(action) self.step_count += 1 + self._record_step_trajectory(action, next_obs, reward, terminated, truncated, info) + + return next_obs, reward, terminated, truncated, info + + def _record_step_trajectory(self, action, obs, reward, terminated, truncated, info): + """ + Record the current step data to the trajectory history + + Args: + action (th.Tensor): action deployed resulting in @obs + obs (dict): state, i.e. observation + reward (float): reward, i.e. reward at this current timestep + terminated (bool): terminated, i.e. whether this episode ended due to a failure or success + truncated (bool): truncated, i.e. whether this episode ended due to a time limit etc. + info (dict): info, i.e. dictionary with any useful information + """ # Aggregate step data - step_data = self._parse_step_data(action, next_obs, reward, terminated, truncated, info) + step_data = self._parse_step_data(action, obs, reward, terminated, truncated, info) # Update obs and traj history self.current_traj_history.append(step_data) - self.current_obs = next_obs - - return next_obs, reward, terminated, truncated, info + self.current_obs = obs def _parse_step_data(self, action, obs, reward, terminated, truncated, info): """ @@ -188,13 +205,22 @@ def process_traj_to_hdf5(self, traj_data, traj_grp_name, nested_keys=("obs",)): return traj_grp + @property + def should_save_current_episode(self): + """ + Returns: + bool: Whether the current episode should be saved or discarded + """ + # Only save successful demos and if actually recording + success = self.env.task.success or not self.only_successes + return success and self.hdf5_file is not None + def flush_current_traj(self): """ Flush current trajectory data """ # Only save successful demos and if actually recording - success = self.env.task.success or not self.only_successes - if success and self.hdf5_file is not None: + if self.should_save_current_episode: traj_grp_name = f"demo_{self.traj_count}" traj_grp = self.process_traj_to_hdf5(self.current_traj_history, traj_grp_name, nested_keys=["obs"]) self.traj_count += 1 @@ -252,7 +278,9 @@ class DataCollectionWrapper(DataWrapper): dataset! """ - def __init__(self, env, output_path, viewport_camera_path="/World/viewer_camera", only_successes=True): + def __init__( + self, env, output_path, viewport_camera_path="/World/viewer_camera", only_successes=True, use_vr=False + ): """ Args: env (Environment): The environment to wrap @@ -260,6 +288,7 @@ def __init__(self, env, output_path, viewport_camera_path="/World/viewer_camera" viewport_camera_path (str): prim path to the camera to use when rendering the main viewport during data collection only_successes (bool): Whether to only save successful episodes + use_vr (bool): Whether to use VR headset for data collection """ # Store additional variables needed for optimized data collection @@ -270,6 +299,9 @@ def __init__(self, env, output_path, viewport_camera_path="/World/viewer_camera" # the given simulator step. See add_transition_info() for more info self.current_transitions = dict() + self._is_recording = True + self.use_vr = use_vr + # Add callbacks on import / remove objects and systems og.sim.add_callback_on_system_init( name="data_collection", callback=lambda system: self.add_transition_info(obj=system, add=True) @@ -290,6 +322,18 @@ def __init__(self, env, output_path, viewport_camera_path="/World/viewer_camera" # Configure the simulator to optimize for data collection self._optimize_sim_for_data_collection(viewport_camera_path=viewport_camera_path) + @property + def is_recording(self): + return self._is_recording + + @is_recording.setter + def is_recording(self, value: bool): + self._is_recording = value + + def _record_step_trajectory(self, action, obs, reward, terminated, truncated, info): + if self.is_recording: + super()._record_step_trajectory(action, obs, reward, terminated, truncated, info) + def _optimize_sim_for_data_collection(self, viewport_camera_path): """ Configures the simulator to optimize for data collection @@ -310,12 +354,14 @@ def _optimize_sim_for_data_collection(self, viewport_camera_path): # toggling these settings to be True -> False -> True # Only setting it to True once will actually freeze the GUI for some reason! if not gm.HEADLESS: - lazy.carb.settings.get_settings().set_bool("/app/asyncRendering", True) - lazy.carb.settings.get_settings().set_bool("/app/asyncRenderingLowLatency", True) - lazy.carb.settings.get_settings().set_bool("/app/asyncRendering", False) - lazy.carb.settings.get_settings().set_bool("/app/asyncRenderingLowLatency", False) - lazy.carb.settings.get_settings().set_bool("/app/asyncRendering", True) - lazy.carb.settings.get_settings().set_bool("/app/asyncRenderingLowLatency", True) + # Async rendering does not work in VR mode + if not self.use_vr: + lazy.carb.settings.get_settings().set_bool("/app/asyncRendering", True) + lazy.carb.settings.get_settings().set_bool("/app/asyncRenderingLowLatency", True) + lazy.carb.settings.get_settings().set_bool("/app/asyncRendering", False) + lazy.carb.settings.get_settings().set_bool("/app/asyncRenderingLowLatency", False) + lazy.carb.settings.get_settings().set_bool("/app/asyncRendering", True) + lazy.carb.settings.get_settings().set_bool("/app/asyncRenderingLowLatency", True) # Disable mouse grabbing since we're only using the UI passively lazy.carb.settings.get_settings().set_bool("/physics/mouseInteractionEnabled", False) @@ -384,6 +430,11 @@ def flush_current_traj(self): self.max_state_size = 0 self.current_transitions = dict() + @property + def should_save_current_episode(self): + # In addition to default conditions, we only save the current episode if we are actually recording + return super().should_save_current_episode and self.is_recording + def add_transition_info(self, obj, add=True): """ Adds transition info to the current sim step for specific object @obj. @@ -470,6 +521,10 @@ def create_from_hdf5( if config["task"]["type"] == "BehaviorTask": config["task"]["online_object_sampling"] = False + # Because we're loading directly from the cached scene file, we need to disable any additional objects that are being added since + # they will already be cached in the original scene file + config["objects"] = [] + # Set observation modalities and update sensor config for robot_cfg in config["robots"]: robot_cfg["obs_modalities"] = robot_obs_modalities @@ -523,7 +578,7 @@ def _parse_step_data(self, action, obs, reward, terminated, truncated, info): step_data["truncated"] = truncated return step_data - def playback_episode(self, episode_id, record=True): + def playback_episode(self, episode_id, record=True, video_path=None, video_writer=None): """ Playback episode @episode_id, and optionally record observation data if @record is True @@ -531,7 +586,13 @@ def playback_episode(self, episode_id, record=True): episode_id (int): Episode to playback. This should be a valid demo ID number from the inputted collected data hdf5 file record (bool): Whether to record data during playback or not + video_path (None or str): If specified, path to write the playback video to + video_writer (None or str): If specified, an imageio video writer to use for writing the video (can be specified in place of @video_path) """ + using_external_writer = video_writer is not None + if video_writer is None and video_path is not None: + video_writer = imageio.get_writer(video_path, fps=30) + data_grp = self.input_hdf5["data"] assert f"demo_{episode_id}" in data_grp, f"No valid episode with ID {episode_id} found!" traj_grp = data_grp[f"demo_{episode_id}"] @@ -597,17 +658,33 @@ def playback_episode(self, episode_id, record=True): ) self.current_traj_history.append(step_data) + # If writing video, save the current frame + if video_writer is not None: + video_writer.append_data(og.sim.viewer_camera.get_obs()[0]["rgb"][:, :, :3].numpy()) + self.step_count += 1 if record: self.flush_current_traj() - def playback_dataset(self, record=True): + # If we weren't using an external writer but we're still writing a video, close the writer + if video_writer is not None and not using_external_writer: + video_writer.close() + + def playback_dataset(self, record=True, video_path=None, video_writer=None): """ Playback all episodes from the input HDF5 file, and optionally record observation data if @record is True Args: record (bool): Whether to record data during playback or not + video_path (None or str): If specified, path to write the playback video to + video_writer (None or str): If specified, an imageio video writer to use for writing the video (can be specified in place of @video_path) """ + if video_writer is None and video_path is not None: + video_writer = imageio.get_writer(video_path, fps=30) for episode_id in range(self.input_hdf5["data"].attrs["n_episodes"]): - self.playback_episode(episode_id=episode_id, record=record) + self.playback_episode(episode_id=episode_id, record=record, video_path=None, video_writer=video_writer) + + # Close the video writer at the end if created + if video_writer is not None: + video_writer.close() diff --git a/omnigibson/envs/env_base.py b/omnigibson/envs/env_base.py index 6b1796160..2e578d60d 100644 --- a/omnigibson/envs/env_base.py +++ b/omnigibson/envs/env_base.py @@ -717,7 +717,7 @@ def reset(self, get_obs=True, **kwargs): log.error(f"Expected: {exp_obs[k]}") log.error(f"Received: {real_obs[k]}") - raise ValueError("Observation space does not match returned observations!") + # raise ValueError("Observation space does not match returned observations!") return obs, {} diff --git a/omnigibson/examples/teleoperation/vr_robot_control_demo.py b/omnigibson/examples/teleoperation/vr_robot_control_demo.py new file mode 100644 index 000000000..509720abe --- /dev/null +++ b/omnigibson/examples/teleoperation/vr_robot_control_demo.py @@ -0,0 +1,104 @@ +""" +Example script for interacting with OmniGibson scenes with VR and BehaviorRobot. +""" + +import torch as th + +import omnigibson as og +from omnigibson.macros import gm +from omnigibson.utils.teleop_utils import OVXRSystem + +gm.ENABLE_OBJECT_STATES = False +gm.ENABLE_TRANSITION_RULES = False +gm.ENABLE_FLATCACHE = True +gm.GUI_VIEWPORT_ONLY = True + +# import torch._dynamo +# torch._dynamo.config.suppress_errors = True + + +def main(): + """ + Spawn a BehaviorRobot in Rs_int and users can navigate around and interact with the scene using VR. + """ + # Create the config for generating the environment we want + scene_cfg = {"type": "InteractiveTraversableScene", "scene_model": "Rs_int"} + robot0_cfg = { + "type": "R1", + "obs_modalities": ["rgb"], + "controller_config": { + "arm_left": { + "name": "InverseKinematicsController", + "mode": "absolute_pose", + "command_input_limits": None, + "command_output_limits": None, + }, + "arm_right": { + "name": "InverseKinematicsController", + "mode": "absolute_pose", + "command_input_limits": None, + "command_output_limits": None, + }, + "gripper_left": {"name": "MultiFingerGripperController", "command_input_limits": "default"}, + "gripper_right": {"name": "MultiFingerGripperController", "command_input_limits": "default"}, + }, + "action_normalize": False, + "reset_joint_pos": [ + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 1.0, + -1.8000, + -0.8000, + 0.0000, + -0.0068, + 0.0059, + 2.6054, + 2.5988, + -1.4515, + -1.4478, + -0.0065, + 0.0052, + 1.5670, + -1.5635, + -1.1428, + 1.1610, + 0.0087, + 0.0087, + 0.0087, + 0.0087, + ], + } + cfg = dict(scene=scene_cfg, robots=[robot0_cfg]) + + # Create the environment + env = og.Environment(configs=cfg) + env.reset() + # start vrsys + vrsys = OVXRSystem( + robot=env.robots[0], + show_control_marker=True, + system="SteamVR", + eef_tracking_mode="controller", + align_anchor_to="camera", + # roll, pitch, yaw + view_angle_limits=[180, 30, 30], + ) + vrsys.start() + + for _ in range(3000): + # update the VR system + vrsys.update() + # get the action from the VR system and step the environment + env.step(vrsys.get_robot_teleop_action()) + + print("Cleaning up...") + vrsys.stop() + og.clear() + + +if __name__ == "__main__": + main() diff --git a/omnigibson/examples/teleoperation/vr_scene_tour_demo.py b/omnigibson/examples/teleoperation/vr_scene_tour_demo.py new file mode 100644 index 000000000..74ed01642 --- /dev/null +++ b/omnigibson/examples/teleoperation/vr_scene_tour_demo.py @@ -0,0 +1,63 @@ +""" +Example script for interacting with OmniGibson scenes with VR. +""" + +import torch as th +import torch._dynamo + +import omnigibson as og +from omnigibson.macros import gm +from omnigibson.utils.asset_utils import get_available_og_scenes +from omnigibson.utils.teleop_utils import OVXRSystem +from omnigibson.utils.ui_utils import choose_from_options + +torch._dynamo.config.suppress_errors = True + +gm.ENABLE_FLATCACHE = True +gm.ENABLE_OBJECT_STATES = False +gm.ENABLE_TRANSITION_RULES = False + + +def main(): + """ + Users can navigate around and interact with a selected scene using VR. + """ + + # Choose the scene model to load + scenes = get_available_og_scenes() + scene_model = choose_from_options(options=scenes, name="scene model") + + # Create the config for generating the environment we want + scene_cfg = {"type": "InteractiveTraversableScene", "scene_model": scene_model} + cfg = dict(scene=scene_cfg) + + # Create the environment + env = og.Environment(configs=cfg) + env.reset() + # start vrsys + vrsys = OVXRSystem( + robot=None, + show_control_marker=False, + system="SteamVR", + eef_tracking_mode="disabled", + align_anchor_to="touchpad", + ) + vrsys.start() + # set headset position to be 1m above ground and facing +x + vrsys.vr_profile.set_physical_world_to_world_anchor_transform_to_match_xr_device( + vrsys.og2xr(th.tensor([0.0, 0.0, 1.0]), th.tensor([-0.5, 0.5, 0.5, -0.5])).numpy(), vrsys.hmd + ) + + # main simulation loop + while True: + # step the VR system to get the latest data from VR runtime + vrsys.update(optimized_for_tour=True) + og.sim.render() + + # Shut down the environment cleanly at the end + vrsys.stop() + og.clear() + + +if __name__ == "__main__": + main() diff --git a/omnigibson/examples/teleoperation/vr_simple_demo.py b/omnigibson/examples/teleoperation/vr_simple_demo.py deleted file mode 100644 index fa4cad1d8..000000000 --- a/omnigibson/examples/teleoperation/vr_simple_demo.py +++ /dev/null @@ -1,49 +0,0 @@ -""" -Example script for interacting with OmniGibson scenes with VR and BehaviorRobot. -""" - -import omnigibson as og -from omnigibson.utils.teleop_utils import OVXRSystem - - -def main(): - """ - Spawn a BehaviorRobot in Rs_int and users can navigate around and interact with the scene using VR. - """ - # Create the config for generating the environment we want - scene_cfg = {"type": "Scene"} # "InteractiveTraversableScene", "scene_model": "Rs_int"} - robot0_cfg = { - "type": "Tiago", - "controller_config": { - "gripper_left": {"command_input_limits": "default"}, - "gripper_right": {"command_input_limits": "default"}, - }, - } - cfg = dict(scene=scene_cfg, robots=[robot0_cfg]) - - # Create the environment - env = og.Environment(configs=cfg) - env.reset() - # start vrsys - vrsys = OVXRSystem( - robot=env.robots[0], show_control_marker=False, system="SteamVR", align_anchor_to_robot_base=True - ) - vrsys.start() - # set headset position to be 1m above ground and facing +x - vrsys.set_initial_transform(pos=[0, 0, 1], orn=[0, 0, 0, 1]) - - # main simulation loop - for _ in range(10000): - # step the VR system to get the latest data from VR runtime - vrsys.update() - # generate robot action and step the environment - action = vrsys.teleop_data_to_action() - env.step(action) - - # Shut down the environment cleanly at the end - vrsys.stop() - og.clear() - - -if __name__ == "__main__": - main() diff --git a/omnigibson/install.py b/omnigibson/install.py index eec12a267..8156fe80b 100644 --- a/omnigibson/install.py +++ b/omnigibson/install.py @@ -14,32 +14,32 @@ # List of NVIDIA PyPI packages needed for OmniGibson ISAAC_SIM_PACKAGES = [ - "omniverse_kit-106.0.1.126909", - "isaacsim_kernel-4.1.0.0", - "isaacsim_app-4.1.0.0", - "isaacsim_core-4.1.0.0", - "isaacsim_gui-4.1.0.0", - "isaacsim_utils-4.1.0.0", - "isaacsim_storage-4.1.0.0", - "isaacsim_asset-4.1.0.0", - "isaacsim_sensor-4.1.0.0", - "isaacsim_robot_motion-4.1.0.0", - "isaacsim_robot-4.1.0.0", - "isaacsim_benchmark-4.1.0.0", - "isaacsim_code_editor-4.1.0.0", - "isaacsim_ros1-4.1.0.0", - "isaacsim_cortex-4.1.0.0", - "isaacsim_example-4.1.0.0", - "isaacsim_replicator-4.1.0.0", - "isaacsim_rl-4.1.0.0", - "isaacsim_robot_setup-4.1.0.0", - "isaacsim_ros2-4.1.0.0", - "isaacsim_template-4.1.0.0", - "isaacsim_test-4.1.0.0", - "isaacsim-4.1.0.0", - "isaacsim_extscache_physics-4.1.0.0", - "isaacsim_extscache_kit-4.1.0.0", - "isaacsim_extscache_kit_sdk-4.1.0.0", + "omniverse_kit-106.1.0.140981", + "isaacsim_kernel-4.2.0.2", + "isaacsim_app-4.2.0.2", + "isaacsim_core-4.2.0.2", + "isaacsim_gui-4.2.0.2", + "isaacsim_utils-4.2.0.2", + "isaacsim_storage-4.2.0.2", + "isaacsim_asset-4.2.0.2", + "isaacsim_sensor-4.2.0.2", + "isaacsim_robot_motion-4.2.0.2", + "isaacsim_robot-4.2.0.2", + "isaacsim_benchmark-4.2.0.2", + "isaacsim_code_editor-4.2.0.2", + "isaacsim_ros1-4.2.0.2", + "isaacsim_cortex-4.2.0.2", + "isaacsim_example-4.2.0.2", + "isaacsim_replicator-4.2.0.2", + "isaacsim_rl-4.2.0.2", + "isaacsim_robot_setup-4.2.0.2", + "isaacsim_ros2-4.2.0.2", + "isaacsim_template-4.2.0.2", + "isaacsim_test-4.2.0.2", + "isaacsim-4.2.0.2", + "isaacsim_extscache_physics-4.2.0.2", + "isaacsim_extscache_kit-4.2.0.2", + "isaacsim_extscache_kit_sdk-4.2.0.2", ] BASE_URL = "https://pypi.nvidia.com" @@ -216,7 +216,7 @@ def _launcher_based_install(isaac_sim_path: Optional[Path]): isaac_version_str = version_content.split("-")[0] isaac_version_tuple = tuple(map(int, isaac_version_str.split(".")[:3])) - if isaac_version_tuple not in ((4, 0, 0), (4, 1, 0)): + if isaac_version_tuple not in ((4, 0, 0), (4, 1, 0), (4, 2, 0)): click.echo(f"Isaac Sim version {isaac_version_str} is not supported by OmniGibson.") return False diff --git a/omnigibson/object_states/factory.py b/omnigibson/object_states/factory.py index c4d26c88c..9bbe27134 100644 --- a/omnigibson/object_states/factory.py +++ b/omnigibson/object_states/factory.py @@ -65,6 +65,12 @@ ] ) +_MIST_STATE_SET = frozenset( + [ + ParticleApplier, + ] +) + _TEXTURE_CHANGE_STATE_SET = frozenset( [ Frozen, @@ -84,7 +90,7 @@ ] ) -_VISUAL_STATE_SET = frozenset(_FIRE_STATE_SET | _STEAM_STATE_SET | _TEXTURE_CHANGE_STATE_SET) +_VISUAL_STATE_SET = frozenset(_FIRE_STATE_SET | _STEAM_STATE_SET | _TEXTURE_CHANGE_STATE_SET | _MIST_STATE_SET) _TEXTURE_CHANGE_PRIORITY = { Frozen: 4, @@ -107,6 +113,10 @@ def get_steam_states(): return _STEAM_STATE_SET +def get_mist_states(): + return _MIST_STATE_SET + + def get_texture_change_states(): return _TEXTURE_CHANGE_STATE_SET diff --git a/omnigibson/object_states/particle_modifier.py b/omnigibson/object_states/particle_modifier.py index a7f655eab..f1342548d 100644 --- a/omnigibson/object_states/particle_modifier.py +++ b/omnigibson/object_states/particle_modifier.py @@ -1012,7 +1012,6 @@ def __init__( self.projection_system = None self.projection_system_prim = None - self.projection_emitter = None # Run super super().__init__(obj=obj, method=method, conditions=conditions, projection_mesh_params=projection_mesh_params) @@ -1026,79 +1025,6 @@ def _initialize(self): # This will initialize the system if it's not initialized already. system = self.obj.scene.get_system(system_name) - if self.visualize: - assert self._projection_mesh_params["type"] in { - "Cylinder", - "Cone", - }, f"{self.__class__.__name__} visualization only supports Cylinder and Cone types!" - radius, height = ( - th.mean(self._projection_mesh_params["extents"][:2]) / 2.0, - self._projection_mesh_params["extents"][2], - ) - # Generate the projection visualization - particle_radius = ( - m.VISUAL_PARTICLE_PROJECTION_PARTICLE_RADIUS - if self.obj.scene.is_visual_particle_system(system_name=system.name) - else system.particle_radius - ) - - name_prefix = f"{self.obj.name}_{self.__class__.__name__}" - # Create the projection visualization if it doesn't already exist, otherwise we reference it directly - projection_name = f"{name_prefix}_projection_visualization" - projection_path = f"/OmniGraph/{projection_name}" - projection_visualization_path = f"{self.link.prim_path}/projection_visualization" - if lazy.omni.isaac.core.utils.prims.is_prim_path_valid(projection_path): - self.projection_system = lazy.omni.isaac.core.utils.prims.get_prim_at_path(projection_path) - self.projection_emitter = lazy.omni.isaac.core.utils.prims.get_prim_at_path( - f"{projection_path}/emitter" - ) - else: - self.projection_system, self.projection_emitter = create_projection_visualization( - scene=self.obj.scene, - prim_path=projection_visualization_path, - shape=self._projection_mesh_params["type"], - projection_name=projection_name, - projection_radius=radius, - projection_height=height, - particle_radius=particle_radius, - parent_scale=self.link.scale, - material=system.material, - ) - relative_projection_system_path = absolute_prim_path_to_scene_relative( - self.obj.scene, self.projection_system.GetPrimPath().pathString - ) - self.projection_system_prim = BasePrim( - relative_prim_path=relative_projection_system_path, name=projection_name - ) - self.projection_system_prim.load(self.obj.scene) - - # Create the visual geom instance referencing the generated source mesh prim, and then hide it - relative_projection_source_path = absolute_prim_path_to_scene_relative( - self.obj.scene, projection_visualization_path - ) - self.projection_source_sphere = VisualGeomPrim( - relative_prim_path=relative_projection_source_path, name=f"{name_prefix}_projection_source_sphere" - ) - self.projection_source_sphere.load(self.obj.scene) - self.projection_source_sphere.initialize() - self.projection_source_sphere.visible = False - # Rotate by 90 degrees in y-axis so that the projection visualization aligns with the projection mesh - self.projection_source_sphere.set_position_orientation( - orientation=T.euler2quat(th.tensor([0, math.pi / 2, 0], dtype=th.float32)), frame="parent" - ) - - # Make sure the meta mesh is aligned with the meta link if visualizing - # This corresponds to checking (a) position of tip of projection mesh should align with origin of - # metalink, and (b) zero relative orientation between the metalink and the projection mesh - local_pos, local_quat = self.projection_mesh.get_position_orientation(frame="parent") - assert th.all( - th.isclose(local_pos + th.tensor([0, 0, height / 2.0]), th.zeros_like(local_pos)) - ), "Projection mesh tip should align with metalink position!" - local_euler = T.quat2euler(local_quat) - assert th.all( - th.isclose(local_euler, th.zeros_like(local_euler)) - ), "Projection mesh orientation should align with metalink orientation!" - # Store which method to use for sampling particle locations if self._sample_with_raycast: if self.method == ParticleModifyMethod.PROJECTION: @@ -1193,16 +1119,6 @@ def _compute_particle_spawn_information(self, system): ) self._in_mesh_local_particle_directions = directions / th.norm(directions, dim=-1).reshape(-1, 1) - def _update(self): - # If we're about to check for modification, update whether it the visualization should be active or not - if self.visualize and self._current_step == 0: - # Only one system in our conditions, so next(iter()) suffices - is_active = all(condition(self.obj) for condition in next(iter(self.conditions.values()))) - self.projection_emitter.GetProperty("inputs:active").Set(is_active) - - # Run super - super()._update() - def remove(self): # We need to remove the projection visualization if it exists if self.projection_system_prim is not None: @@ -1489,8 +1405,8 @@ def systems_to_check(self): @property def projection_is_active(self): - # Only active if the projection mesh is enabled - return self.projection_emitter.GetProperty("inputs:active").Get() + # Only one system in our conditions, so next(iter()) suffices + return all(condition(self.obj) for condition in next(iter(self.conditions.values()))) @classproperty def metalink_prefix(cls): diff --git a/omnigibson/objects/object_base.py b/omnigibson/objects/object_base.py index a4101b42f..43e361a56 100644 --- a/omnigibson/objects/object_base.py +++ b/omnigibson/objects/object_base.py @@ -228,6 +228,9 @@ def _initialize(self): self._highlight_cached_values = dict() for material in self.materials: + if material.is_glass: + # Skip glass materials + continue self._highlight_cached_values[material] = { "enable_emission": material.enable_emission, "emissive_color": material.emissive_color, @@ -334,6 +337,9 @@ def highlighted(self, enabled): return for material in self.materials: + if material.is_glass: + # Skip glass materials + continue if enabled: # Store values before swapping self._highlight_cached_values[material] = { diff --git a/omnigibson/objects/stateful_object.py b/omnigibson/objects/stateful_object.py index a1bd5fc1e..650f23014 100644 --- a/omnigibson/objects/stateful_object.py +++ b/omnigibson/objects/stateful_object.py @@ -12,6 +12,7 @@ from omnigibson.object_states.factory import ( get_default_states, get_fire_states, + get_mist_states, get_requirements_for_ability, get_state_name, get_states_by_dependency_order, @@ -24,7 +25,7 @@ from omnigibson.object_states.heat_source_or_sink import HeatSourceOrSink from omnigibson.object_states.object_state_base import REGISTERED_OBJECT_STATES from omnigibson.object_states.on_fire import OnFire -from omnigibson.object_states.particle_modifier import ParticleRemover +from omnigibson.object_states.particle_modifier import ParticleApplier, ParticleRemover from omnigibson.objects.object_base import BaseObject from omnigibson.renderer_settings.renderer_settings import RendererSettings from omnigibson.utils.constants import EmitterType, PrimType @@ -167,6 +168,9 @@ def _initialize(self): if len(states_set & get_fire_states()) > 0: self._create_emitter_apis(EmitterType.FIRE) + if len(states_set & get_mist_states()) > 0: + self._create_emitter_apis(EmitterType.MIST) + def add_state(self, state): """ Adds state @state with name @name to self.states. @@ -336,8 +340,20 @@ def _create_emitter_apis(self, emitter_type): emitter_config["gravity"] = (0, 0, -50.0) emitter_config["constantMask"] = 10.0 emitter_config["attenuation"] = 1.5 + elif emitter_type == EmitterType.MIST: + link = self.states[ParticleApplier].link + emitter_config["name"] = "flowEmitterMesh" + emitter_config["type"] = "FlowEmitterMesh" + emitter_config["position"] = (0.0, 0.0, 0.0) + emitter_config["fuel"] = 1.0 + emitter_config["coupleRateFuel"] = 0.5 + emitter_config["buoyancyPerTemp"] = 0.05 + emitter_config["burnPerTemp"] = 0.5 + emitter_config["gravity"] = (0, 0, 0.0) + emitter_config["constantMask"] = 10.0 + emitter_config["attenuation"] = 1.5 else: - raise ValueError("Currently, only EmitterTypes FIRE and STEAM are supported!") + raise ValueError("Currently, only EmitterTypes FIRE, STEAM, and MIST are supported!") # Define prim paths. # The flow system is created under the root link so that it automatically updates its pose as the object moves @@ -465,6 +481,8 @@ def update_visuals(self): emitter_enabled[EmitterType.STEAM] |= state.get_value() if state_type in get_fire_states(): emitter_enabled[EmitterType.FIRE] |= state.get_value() + if state_type in get_mist_states(): + emitter_enabled[EmitterType.MIST] |= state.projection_is_active for emitter_type in emitter_enabled: self.set_emitter_enabled(emitter_type, emitter_enabled[emitter_type]) diff --git a/omnigibson/omnigibson_4_2_0.kit b/omnigibson/omnigibson_4_2_0.kit new file mode 100644 index 000000000..517024b4e --- /dev/null +++ b/omnigibson/omnigibson_4_2_0.kit @@ -0,0 +1,367 @@ +[package] +title = "OmniGibson" +description = "A platform for accelerating Embodied AI research" +version = "4.2.0" + +# That makes it browsable in UI with "experience" filter +keywords = ["experience", "app", "usd"] + +[dependencies] +# Isaac Sim extensions +"omni.isaac.cloner" = {} +"omni.isaac.core" = {} +## "omni.isaac.core_archive" = {} +"omni.isaac.core_nodes" = {} +"omni.isaac.cortex" = {} +"omni.isaac.cortex.sample_behaviors" = {} +"omni.isaac.debug_draw" = {} +"omni.isaac.dynamic_control" = {} +"omni.isaac.franka" = {} +"omni.isaac.kit" = {} +"omni.isaac.lula" = {} +"omni.isaac.manipulators" = {} +## "omni.isaac.ml_archive" = {} +"omni.isaac.motion_generation" = {} +"omni.isaac.menu" = {} +"omni.isaac.nucleus" = {} +"omni.isaac.occupancy_map" = {} +"omni.isaac.quadruped" = {} +"omni.isaac.range_sensor" = {} +"omni.isaac.scene_blox" = {} +"omni.isaac.sensor" = {} +"omni.isaac.surface_gripper" = {} +"omni.isaac.universal_robots" = {} +"omni.isaac.utils" = {} +"omni.isaac.wheeled_robots" = {} +## "omni.kit.property.isaac" = {} +## "omni.pip.cloud" = {} +## "omni.pip.compute" = {} +"omni.replicator.isaac" = {} + +# Kit extensions +#"omni.activity.profiler" = {} +#"omni.activity.pump" = {} +"omni.anim.graph.schema" = {} +"omni.anim.navigation.schema" = {} +"omni.usd.schema.omniscripting" = {} +"omni.graph.bundle.action" = {} +"omni.graph.visualization.nodes" = {} +"omni.graph.window.action" = {} +"omni.graph.window.generic" = {} +"omni.hydra.engine.stats" = {} +"omni.importer.mjcf" = {} +"omni.importer.urdf" = {} +"omni.kit.collaboration.channel_manager" = {} +"omni.kit.context_menu" = {} +"omni.kit.hotkeys.window" = {} +"omni.kit.loop-isaac" = {} +"omni.kit.manipulator.prim" = {} +"omni.kit.manipulator.transform" = {} +"omni.kit.menu.common" = {} +"omni.kit.menu.create" = {} +"omni.kit.menu.edit" = {} +"omni.kit.menu.file" = {} +"omni.kit.menu.stage" = {} +"omni.kit.menu.utils" = {} +"omni.kit.primitive.mesh" = {} +"omni.kit.profiler.window" = {} +"omni.kit.property.bundle" = {} +"omni.kit.property.layer" = {} +"omni.kit.renderer.core" = {} +"omni.kit.selection" = {} +"omni.kit.stage_column.payload" = {} +"omni.kit.stage_column.variant" = {} +"omni.kit.stage_templates" = {} +"omni.kit.telemetry" = {} +"omni.kit.uiapp" = {} +"omni.kit.viewport.bundle" = {} +"omni.kit.viewport.menubar.lighting" = {} +# "omni.kit.viewport.ready" = {} +"omni.kit.viewport.rtx" = {} +"omni.kit.widget.cache_indicator" = {} +"omni.kit.widget.extended_searchfield" = {} +"omni.kit.widget.filebrowser" = {} +"omni.kit.widget.layers" = {} +"omni.kit.widget.live" = {} +"omni.kit.widget.timeline" = {} +"omni.kit.window.commands" = {} +"omni.kit.window.console" = {} +"omni.kit.window.content_browser" = {} +"omni.kit.window.cursor" = {} +"omni.kit.window.extensions" = {} +"omni.kit.window.file" = {} +"omni.kit.window.filepicker" = {} +"omni.kit.window.property" = {} +"omni.kit.window.script_editor" = {} +"omni.kit.window.stage" = {} +"omni.kit.window.stats" = {order = 1000} +"omni.kit.window.status_bar" = {} +"omni.kit.window.title" = {} +"omni.kit.window.toolbar" = {} +"omni.physx.bundle" = {} +# "omni.physx.fabric" = {} +"omni.physx.tensors" = {} +"omni.replicator.core" = {} +"omni.replicator.replicator_yaml" = {} +"omni.resourcemonitor" = {} +"omni.rtx.settings.core" = {} +"omni.stats" = {} +"omni.syntheticdata" = {} +"omni.usd.schema.scene.visualization" = {} +"omni.warp" = {} +"semantics.schema.editor" = {} +"semantics.schema.property" = {} +"omni.kit.xr.profile.vr" = { version = "106.0.71" } + +[settings] +renderer.active = "rtx" +exts."omni.kit.viewport.menubar.camera".expand = true # Expand the extra-camera settings by default +exts."omni.kit.window.file".useNewFilePicker = true +exts."omni.kit.tool.asset_importer".useNewFilePicker = true +exts."omni.kit.tool.collect".useNewFilePicker = true +exts."omni.kit.widget.layers".useNewFilePicker = true +exts."omni.kit.renderer.core".imgui.enableMips = true +exts."omni.kit.widget.cloud_share".require_access_code = false +exts."omni.kit.pipapi".installCheckIgnoreVersion = true +exts."omni.kit.viewport.window".startup.windowName="Viewport" # Rename from Viewport Next +exts."omni.kit.menu.utils".logDeprecated = false + +# app.content.emptyStageOnStart = false +app.file.ignoreUnsavedOnExit = true # prevents save dialog when exiting + +# deprecate support for old kit.ui.menu +app.menu.legacy_mode = false +# use omni.ui.Menu for the MenuBar +app.menu.compatibility_mode = false +# Setting the port for the embedded http server +exts."omni.services.transport.server.http".port = 8211 + +# default viewport is fill +app.runLoops.rendering_0.fillResolution = false + +exts."omni.kit.test".includeTests = [ "*isaac*" ] + + +[settings.app.python] +# These disable the kit app from also printing out python output, which gets confusing +interceptSysStdOutput = false +logSysStdOutput = false + +[settings.app.settings] +persistent = false +dev_build = false +fabricDefaultStageFrameHistoryCount = 3 # needed for omni.syntheticdata TODO105 Still True? + +[settings.app.window] +title = "OmniGibson" +hideUi = false +_iconSize = 256 +iconPath = "${omni.isaac.kit}/data/omni.isaac.sim.png" + +# width = 1700 +# height = 900 +# x = -1 +# y = -1 + +# Fonts +[setting.app.font] +file = "${fonts}/OpenSans-SemiBold.ttf" +size = 16 + +# [setting.app.runLoops] +# main.rateLimitEnabled = false +# main.rateLimitFrequency = 60 +# main.rateLimitUseBusyLoop = false +# rendering_0.rateLimitEnabled = false + +[settings.exts.'omni.kit.window.extensions'] +# List extensions here we want to show as featured when extension manager is opened +featuredExts = [] + + +[settings] +# MGPU is always on, you can turn it from the settings, and force this off to save even more resource if you +# only want to use a single GPU on your MGPU system +renderer.multiGpu.enabled = true +renderer.multiGpu.autoEnable = true +# This setting forces all GPUs to copy their render results to the main GPU. +# This legacy setting should not be needed anymore +app.gatherRenderResults = false +'rtx-transient'.resourcemanager.enableTextureStreaming = true +# app.hydra.aperture.conform = 4 # in 105.1 pixels are square by default +app.hydraEngine.waitIdle = false +rtx.newDenoiser.enabled = true + +# Enable Iray and pxr by setting this to "rtx,iray,pxr" +renderer.enabled = "rtx" + +physics.autoPopupSimulationOutputWindow=false + +### async rendering settings +omni.replicator.asyncRendering = false +app.asyncRendering = false +app.asyncRenderingLowLatency = false + +### Render thread settings +app.runLoops.main.rateLimitEnabled = false +app.runLoops.main.rateLimitFrequency = 120 +app.runLoops.main.rateLimitUsePrecisionSleep = true +app.runLoops.main.syncToPresent = false +app.runLoops.present.rateLimitFrequency = 60 +app.runLoops.present.rateLimitUsePrecisionSleep = true +app.runLoops.rendering_0.rateLimitFrequency = 120 +app.runLoops.rendering_0.rateLimitUsePrecisionSleep = true +app.runLoops.rendering_0.syncToPresent = false +app.runLoops.rendering_1.rateLimitFrequency = 120 +app.runLoops.rendering_1.rateLimitUsePrecisionSleep = true +app.runLoops.rendering_1.syncToPresent = false +app.runLoopsGlobal.syncToPresent = false +app.vsync = false +exts."omni.kit.renderer.core".present.enabled = false +exts."omni.kit.renderer.core".present.presentAfterRendering = false +persistent.app.viewport.defaults.tickRate = 120 +rtx-transient.dlssg.enabled = false + +app.audio.enabled = false + +privacy.externalBuild = true + +# Enable Vulkan +app.vulkan = true + +# Basic Kit App +################################ +app.versionFile = "${app}/../VERSION" +app.name = "Isaac-Sim" +app.version = "4.2.0" + +# hide NonToggleable Exts +exts."omni.kit.window.extensions".hideNonToggleableExts = true +exts."omni.kit.window.extensions".showFeatureOnly = false + +# Hang Detector +################################ +# app.hangDetector.enabled = false +# app.hangDetector.timeout = 120 + +# RTX Settings +############################### +[settings.rtx] +translucency.worldEps = 0.005 + +# Content Browser +############################### +[settings.exts."omni.kit.window.content_browser"] +enable_thumbnail_generation_images = false # temp fix to avoid leaking python processes + +# Extensions +############################### +[settings.exts."omni.kit.registry.nucleus"] +registries = [ + { name = "kit/default", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/106/shared" }, + { name = "kit/sdk", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/sdk/${kit_version_short}/${kit_git_hash}" }, + { name = "kit/community", url = "https://dw290v42wisod.cloudfront.net/exts/kit/community" }, +] + +[settings.app.extensions] +skipPublishVerification = false +registryEnabled = true + +[settings.exts."omni.kit.window.modifier.titlebar"] +titleFormatString = " Isaac Sim {version:${app}/../SHORT_VERSION,font_color=0x909090,font_size=16} {separator} {file, board=true}" +showFileFullPath = true +icon.file = "${app}/../exts/omni.isaac.app.setup/data/nvidia-omniverse-isaacsim.ico" +icon.size = 256 +defaultFont.name = "Arial" +defaultFont.size = 16 +defaultFont.color = 0xD0D0D0 +separator.color = 0x00B976 +separator.width = 1 +windowBorder.color = 0x0F0F0F +windowBorder.width = 2 +colors.caption = 0x0F0F0F +colors.client = 0x0F0F0F +respondOnMouseUp = true +changeWindowRegion = true + + +# Register extension folder from this repo in kit +[settings.app.exts] +folders = ["${app}/../exts", "${app}/../extscache", "${app}/../extsPhysics", "${app}/../extsUser"] + +[settings.crashreporter.data] +experience = "Isaac Sim Python" + +# Isaac Sim Settings +############################### +[settings.app.renderer] +skipWhileMinimized = false +sleepMsOnFocus = 0 +sleepMsOutOfFocus = 0 +resolution.width=1280 +resolution.height=720 + +[settings.app.livestream] +proto = "ws" +allowResize = true +outDirectory = "${data}" + +# default camera position in meters +[settings.app.viewport] +defaultCamPos.x = 5 +defaultCamPos.y = 5 +defaultCamPos.z = 5 + +[settings.rtx] +raytracing.fractionalCutoutOpacity = false +hydra.enableSemanticSchema = true +mdltranslator.mdlDistilling = false +# descriptorSets=60000 +# reservedDescriptors=500000 +# sceneDb.maxInstances=1000000 +# Enable this for static scenes, improves visual quality +# directLighting.sampledLighting.enabled = true + +[settings.persistent] +app.file.recentFiles = [] +app.stage.upAxis = "Z" +app.stage.timeCodeRange = [0, 1000000] +app.stage.movePrimInPlace = false +app.stage.instanceableOnCreatingReference = false +app.stage.materialStrength = "weakerThanDescendants" + +app.transform.gizmoUseSRT = true +app.viewport.grid.scale = 1.0 +app.viewport.pickingMode = "kind:model.ALL" +app.viewport.camMoveVelocity = 0.05 # 5 m/s +app.viewport.gizmo.scale = 0.01 # scaled to meters +app.viewport.previewOnPeek = false +app.viewport.snapToSurface = false +app.viewport.displayOptions = 31887 # Disable Frame Rate and Resolution by default +app.window.uiStyle = "NvidiaDark" +app.primCreation.DefaultXformOpType = "Scale, Orient, Translate" +app.primCreation.DefaultXformOpOrder="xformOp:translate, xformOp:orient, xformOp:scale" +app.primCreation.typedDefaults.camera.clippingRange = [0.01, 10000000.0] +simulation.minFrameRate = 15 +simulation.defaultMetersPerUnit = 1.0 +omnigraph.updateToUsd = false +omnigraph.useSchemaPrims = true +omnigraph.disablePrimNodes = true +physics.updateToUsd = true +physics.updateVelocitiesToUsd = true +physics.useFastCache = false +physics.visualizationDisplayJoints = false +physics.visualizationSimulationOutput = false +omni.replicator.captureOnPlay = true +exts."omni.anim.navigation.core".navMesh.viewNavMesh = false +exts."omni.anim.navigation.core".navMesh.config.autoRebakeOnChanges = false +resourcemonitor.timeBetweenQueries = 100 + +renderer.startupMessageDisplayed = true # hides the IOMMU popup window + +# Make Detail panel visible by default +app.omniverse.content_browser.options_menu.show_details = true +app.omniverse.filepicker.options_menu.show_details = true + +[settings.ngx] +enabled=true # Enable this for DLSS diff --git a/omnigibson/prims/material_prim.py b/omnigibson/prims/material_prim.py index 3098658a3..fcc414acc 100644 --- a/omnigibson/prims/material_prim.py +++ b/omnigibson/prims/material_prim.py @@ -7,7 +7,7 @@ import omnigibson.lazy as lazy from omnigibson.prims.prim_base import BasePrim from omnigibson.utils.physx_utils import bind_material -from omnigibson.utils.usd_utils import absolute_prim_path_to_scene_relative +from omnigibson.utils.usd_utils import absolute_prim_path_to_scene_relative, get_sdf_value_type_name class MaterialPrim(BasePrim): @@ -70,6 +70,7 @@ def __init__( ): # Other values that will be filled in at runtime self._shader = None + self._shader_node = None # Users of this material: should be a set of BaseObject and BaseSystem self._users = set() @@ -148,6 +149,7 @@ def _post_load(self): # Generate shader reference self._shader = lazy.omni.usd.get_shader_from_material(self._prim) + self._shader_node = lazy.usd.mdl.RegistryUtils.GetShaderNodeForPrim(self._shader.GetPrim()) def bind(self, target_prim_path): """ @@ -194,7 +196,7 @@ def shader_update_asset_paths_with_root_path(self, root_path, relative=False): Otherwise, @root_path will be pre-appended to the original asset paths """ - for inp_name in self.shader_input_names_by_type("SdfAssetPath"): + for inp_name in self.get_shader_input_names_by_type("SdfAssetPath", include_default=True): inp = self.get_input(inp_name) # If the input doesn't have any path, skip if inp is None: @@ -220,7 +222,11 @@ def get_input(self, inp): Returns: any: value of the requested @inp """ - return self._shader.GetInput(inp).Get() + non_default_inp = self._shader.GetInput(inp).Get() + if non_default_inp is not None: + return non_default_inp + + return self._shader_node.GetInput(inp).GetDefaultValue() def set_input(self, inp, val): """ @@ -230,11 +236,14 @@ def set_input(self, inp, val): inp (str): Name of the shader input whose value will be set val (any): Value to set for the input. This should be the valid type for that attribute. """ - # Make sure the input exists first, so we avoid segfaults with "invalid null prim" - assert ( - inp in self.shader_input_names - ), f"Got invalid shader input to set! Current inputs are: {self.shader_input_names}. Got: {inp}" - self._shader.GetInput(inp).Set(val) + # # Make sure the input exists first, so we avoid segfaults with "invalid null prim" + if inp in self.shader_input_names: + self._shader.GetInput(inp).Set(val) + elif inp in self.shader_default_input_names: + input_type = get_sdf_value_type_name(val) + self._shader.CreateInput(inp, input_type).Set(val) + else: + raise ValueError(f"Got invalid shader input to set! Got: {inp}") @property def is_glass(self): @@ -242,7 +251,7 @@ def is_glass(self): Returns: bool: Whether this material is a glass material or not """ - return "glass_color" in self.shader_input_names + return "glass_color" in self.shader_input_names | self.shader_default_input_names @property def shader(self): @@ -260,15 +269,34 @@ def shader_input_names(self): """ return {inp.GetBaseName() for inp in self._shader.GetInputs()} - def shader_input_names_by_type(self, input_type): + @property + def shader_default_input_names(self): + """ + Returns: + set: All the shader input names associated with this material that have default values + """ + return set(self._shader_node.GetInputNames()) + + def get_shader_input_names_by_type(self, input_type, include_default=False): """ Args: input_type (str): input type + include_default (bool): whether to include default inputs Returns: set: All the shader input names associated with this material that match the given input type """ - return {inp.GetBaseName() for inp in self._shader.GetInputs() if inp.GetTypeName().cppTypeName == input_type} + shader_input_names = { + inp.GetBaseName() for inp in self._shader.GetInputs() if inp.GetTypeName().cppTypeName == input_type + } + if not include_default: + return shader_input_names + shader_default_input_names = { + inp_name + for inp_name in self.shader_default_input_names + if self._shader_node.GetInput(inp_name).GetType() == input_type + } + return shader_input_names | shader_default_input_names @property def diffuse_color_constant(self): diff --git a/omnigibson/robots/holonomic_base_robot.py b/omnigibson/robots/holonomic_base_robot.py index 89abac3ee..c7aa370f4 100644 --- a/omnigibson/robots/holonomic_base_robot.py +++ b/omnigibson/robots/holonomic_base_robot.py @@ -331,7 +331,7 @@ def _postprocess_control(self, control, control_type): def teleop_data_to_action(self, teleop_action) -> th.Tensor: action = ManipulationRobot.teleop_data_to_action(self, teleop_action) - action[self.base_action_idx] = th.tensor(teleop_action.base).float() * 0.1 + action[self.base_action_idx] = teleop_action.base.float() return action @cached_property diff --git a/omnigibson/robots/manipulation_robot.py b/omnigibson/robots/manipulation_robot.py index 752447992..da3cf6bf1 100644 --- a/omnigibson/robots/manipulation_robot.py +++ b/omnigibson/robots/manipulation_robot.py @@ -1598,12 +1598,12 @@ def teleop_data_to_action(self, teleop_action) -> th.Tensor: hands = ["left", "right"] if self.n_arms == 2 else ["right"] for i, hand in enumerate(hands): arm_name = self.arm_names[i] - arm_action = th.tensor(teleop_action[hand]).float() + arm_action = teleop_action[hand] # arm action assert isinstance(self._controllers[f"arm_{arm_name}"], InverseKinematicsController) or isinstance( self._controllers[f"arm_{arm_name}"], OperationalSpaceController ), f"Only IK and OSC controllers are supported for arm {arm_name}!" - target_pos, target_orn = arm_action[:3], T.quat2axisangle(T.euler2quat(arm_action[3:6])) + target_pos, target_orn = arm_action[:3], arm_action[3:6] action[self.arm_action_idx[arm_name]] = th.cat((target_pos, target_orn)) # gripper action assert isinstance( diff --git a/omnigibson/robots/r1.py b/omnigibson/robots/r1.py index 3321192c4..c9c4259c5 100644 --- a/omnigibson/robots/r1.py +++ b/omnigibson/robots/r1.py @@ -251,3 +251,9 @@ def disabled_collision_pairs(self): ["base_link", "wheel_link2"], ["base_link", "wheel_link3"], ] + + def teleop_data_to_action(self, teleop_action) -> th.Tensor: + action = HolonomicBaseRobot.teleop_data_to_action(self, teleop_action) + # print("Torso action: ", teleop_action.torso) + # action[self.trunk_action_idx][2] = teleop_action.torso + return action diff --git a/omnigibson/simulator.py b/omnigibson/simulator.py index ebfc01508..67ca2915d 100644 --- a/omnigibson/simulator.py +++ b/omnigibson/simulator.py @@ -73,6 +73,7 @@ m.KIT_FILES = { (4, 0, 0): "omnigibson_4_0_0.kit", (4, 1, 0): "omnigibson_4_1_0.kit", + (4, 2, 0): "omnigibson_4_2_0.kit", } @@ -158,7 +159,6 @@ def _launch_app(): # Enable additional extensions we need lazy.omni.isaac.core.utils.extensions.enable_extension("omni.flowusd") - lazy.omni.isaac.core.utils.extensions.enable_extension("omni.particle.system.bundle") # Additional import for windows if os.name == "nt": diff --git a/omnigibson/systems/micro_particle_system.py b/omnigibson/systems/micro_particle_system.py index a9322638a..0f08befb2 100644 --- a/omnigibson/systems/micro_particle_system.py +++ b/omnigibson/systems/micro_particle_system.py @@ -494,9 +494,6 @@ def initialize(self, scene): self.system_prim = self._create_particle_system() # Get material material = self._get_particle_material_template() - # Load the material if it's newly created and has never been loaded before - if not material.loaded: - material.load() material.add_user(self) self._material = material # Bind the material to the particle system (for isosurface) and the prototypes (for non-isosurface) diff --git a/omnigibson/tasks/behavior_task.py b/omnigibson/tasks/behavior_task.py index 1f8d2c65d..310c18bfb 100644 --- a/omnigibson/tasks/behavior_task.py +++ b/omnigibson/tasks/behavior_task.py @@ -359,13 +359,16 @@ def assign_object_scope_with_cache(self, env): if obj_inst in self.future_obj_instances: entity = None else: - assert obj_inst in inst_to_name, ( - f"BDDL object instance {obj_inst} should exist in cached metadata " - f"from loaded scene, but could not be found!" - ) - name = inst_to_name[obj_inst] - is_system = name in env.scene.available_systems.keys() - entity = env.scene.get_system(name) if is_system else env.scene.object_registry("name", name) + if obj_inst == "agent.n.01_1": + entity = self.get_agent(env) + else: + assert obj_inst in inst_to_name, ( + f"BDDL object instance {obj_inst} should exist in cached metadata " + f"from loaded scene, but could not be found!" + ) + name = inst_to_name[obj_inst] + is_system = name in env.scene.available_systems.keys() + entity = env.scene.get_system(name) if is_system else env.scene.object_registry("name", name) self.object_scope[obj_inst] = BDDLEntity( bddl_inst=obj_inst, entity=entity, diff --git a/omnigibson/utils/config_utils.py b/omnigibson/utils/config_utils.py index dd467c382..8012da699 100644 --- a/omnigibson/utils/config_utils.py +++ b/omnigibson/utils/config_utils.py @@ -2,6 +2,7 @@ import json import os +import numpy as np import torch as th import yaml @@ -74,6 +75,6 @@ def load_default_config(): class TorchEncoder(json.JSONEncoder): def default(self, obj): - if isinstance(obj, th.Tensor): + if isinstance(obj, (th.Tensor, np.ndarray)): return obj.tolist() return json.JSONEncoder.default(self, obj) diff --git a/omnigibson/utils/constants.py b/omnigibson/utils/constants.py index 4cd7b8ab8..5680d54fc 100644 --- a/omnigibson/utils/constants.py +++ b/omnigibson/utils/constants.py @@ -76,6 +76,7 @@ class PrimType(IntEnum): class EmitterType(IntEnum): FIRE = 0 STEAM = 1 + MIST = 2 # Valid primitive mesh types diff --git a/omnigibson/utils/deprecated_utils.py b/omnigibson/utils/deprecated_utils.py index 740407476..a5933ac9e 100644 --- a/omnigibson/utils/deprecated_utils.py +++ b/omnigibson/utils/deprecated_utils.py @@ -20,8 +20,6 @@ from omni.isaac.core.utils.prims import get_prim_at_path, get_prim_parent from omni.kit.primitive.mesh.command import CreateMeshPrimWithDefaultXformCommand as CMPWDXC from omni.kit.primitive.mesh.command import _get_all_evaluators -from omni.particle.system.core.scripts.core import Core as OmniCore -from omni.particle.system.core.scripts.utils import Utils as OmniUtils from omni.replicator.core import random_colours from PIL import Image, ImageDraw from pxr import PhysxSchema, Sdf, Usd, UsdGeom, UsdPhysics, UsdShade @@ -82,96 +80,6 @@ def __init__(self, prim_type: str, **kwargs): assert isinstance(self._evaluator_class, type) -class Utils(OmniUtils): - def create_material(self, name): - material_url = carb.settings.get_settings().get("/exts/omni.particle.system.core/material") - - # TODO: THIS IS THE ONLY LINE WE CHANGE! "/" SHOULD BE "" - material_path = "" - default_prim = self.stage.GetDefaultPrim() - if default_prim: - material_path = default_prim.GetPath().pathString - - if not self.stage.GetPrimAtPath(material_path + "/Looks"): - self.stage.DefinePrim(material_path + "/Looks", "Scope") - material_path += "/Looks/" + name - material_path = ou.get_stage_next_free_path(self.stage, material_path, False) - prim = self.stage.DefinePrim(material_path, "Material") - if material_url: - prim.GetReferences().AddReference(material_url) - else: - carb.log_error("Failed to find material URL in settings") - - return [material_path] - - -class Core(OmniCore): - """ - Subclass that overrides a specific function within Omni's Core class to fix a bug - """ - - def __init__(self, popup_callback: Callable[[str], None], particle_system_name: str): - self._popup_callback = popup_callback - self.utils = Utils() - self.context = ou.get_context() - self.stage = self.context.get_stage() - self.selection = self.context.get_selection() - self.particle_system_name = particle_system_name - self.sub_stage_update = self.context.get_stage_event_stream().create_subscription_to_pop(self.on_stage_update) - self.on_stage_update() - - def get_compute_graph(self, selected_paths, create_new_graph=True, created_paths=None): - """ - Returns the first ComputeGraph found in selected_paths. - If no graph is found and create_new_graph is true, a new graph will be created and its - path appended to created_paths (if provided). - """ - graph = None - graph_paths = [ - path - for path in selected_paths - if self.stage.GetPrimAtPath(path).GetTypeName() in ["ComputeGraph", "OmniGraph"] - ] - - if len(graph_paths) > 0: - graph = ogc.get_graph_by_path(graph_paths[0]) - if len(graph_paths) > 1: - carb.log_warn( - f"Multiple ComputeGraph prims selected. Only the first will be used: {graph.get_path_to_graph()}" - ) - elif create_new_graph: - # If no graph was found in the selected prims, we'll make a new graph. - # TODO: THIS IS THE ONLY LINE THAT WE CHANGE! ONCE FIXED, REMOVE THIS - graph_path = Sdf.Path(f"/OmniGraph/{self.particle_system_name}").MakeAbsolutePath(Sdf.Path.absoluteRootPath) - graph_path = ou.get_stage_next_free_path(self.stage, graph_path, True) - - # prim = self.stage.GetDefaultPrim() - # path = str(prim.GetPath()) if prim else "" - self.stage.DefinePrim("/OmniGraph", "Scope") - - container_graphs = ogc.get_global_container_graphs() - # FIXME: container_graphs[0] should be the simulation orchestration graph, but this may change in the future. - container_graph = container_graphs[0] - result, wrapper_node = ogc.cmds.CreateGraphAsNode( - graph=container_graph, - node_name=Sdf.Path(graph_path).name, - graph_path=graph_path, - evaluator_name="push", - is_global_graph=True, - backed_by_usd=True, - fc_backing_type=ogc.GraphBackingType.GRAPH_BACKING_TYPE_FLATCACHE_SHARED, - pipeline_stage=ogc.GraphPipelineStage.GRAPH_PIPELINE_STAGE_SIMULATION, - ) - graph = wrapper_node.get_wrapped_graph() - - if created_paths is not None: - created_paths.append(graph.get_path_to_graph()) - - carb.log_info(f"No ComputeGraph selected. A new graph has been created at {graph.get_path_to_graph()}") - - return graph - - class ArticulationView(_ArticulationView): """ArticulationView with some additional functionality implemented.""" diff --git a/omnigibson/utils/teleop_utils.py b/omnigibson/utils/teleop_utils.py index 9a48eec31..35d9adb1b 100644 --- a/omnigibson/utils/teleop_utils.py +++ b/omnigibson/utils/teleop_utils.py @@ -1,5 +1,7 @@ import time -from typing import Iterable, Optional, Tuple +from collections.abc import Iterable +from dataclasses import dataclass, field +from typing import Iterable, Literal, Optional, Tuple import torch as th @@ -8,20 +10,48 @@ import omnigibson.utils.transform_utils as T from omnigibson.macros import create_module_macros from omnigibson.objects import USDObject +from omnigibson.prims.geom_prim import VisualGeomPrim +from omnigibson.prims.xform_prim import XFormPrim from omnigibson.robots.robot_base import BaseRobot +from omnigibson.sensors import VisionSensor +from omnigibson.utils.control_utils import IKSolver +from omnigibson.utils.ui_utils import KeyboardEventHandler, create_module_logger +from omnigibson.utils.usd_utils import scene_relative_prim_path_to_absolute try: from telemoma.configs.base_config import teleop_config - from telemoma.human_interface.teleop_core import TeleopAction, TeleopObservation + + # from telemoma.human_interface.teleop_core import TeleopAction, TeleopObservation from telemoma.human_interface.teleop_policy import TeleopPolicy from telemoma.utils.general_utils import AttrDict except ImportError as e: raise e from ValueError("For teleoperation, install telemoma by running 'pip install telemoma'") +# Create module logger +log = create_module_logger(module_name=__name__) + m = create_module_macros(module_path=__file__) m.movement_speed = 0.2 # the speed of the robot base movement +@dataclass +class TeleopAction(AttrDict): + left: th.Tensor = field(default_factory=lambda: th.cat((th.zeros(6), th.ones(1)))) + right: th.Tensor = field(default_factory=lambda: th.cat((th.zeros(6), th.ones(1)))) + base: th.Tensor = field(default_factory=lambda: th.zeros(3)) + torso: float = field(default_factory=lambda: 0.0) + extra: dict = field(default_factory=dict) + + +@dataclass +class TeleopObservation(AttrDict): + left: th.Tensor = field(default_factory=lambda: th.cat((th.zeros(6), th.ones(2)))) + right: th.Tensor = field(default_factory=lambda: th.cat((th.zeros(6), th.ones(2)))) + base: th.Tensor = field(default_factory=lambda: th.zeros(3)) + torso: float = field(default_factory=lambda: 0.0) + extra: dict = field(default_factory=dict) + + class TeleopSystem(TeleopPolicy): """ Base class for teleop policy @@ -36,10 +66,10 @@ def __init__(self, config: AttrDict, robot: BaseRobot, show_control_marker: bool show_control_marker (bool): whether to show a visual marker that indicates the target pose of the control. """ super().__init__(config) - self.teleop_action: TeleopAction = TeleopAction() - self.robot_obs: TeleopObservation = TeleopObservation() + self.teleop_action = TeleopAction() + self.robot_obs = TeleopObservation() self.robot = robot - self.robot_arms = ["left", "right"] if self.robot.n_arms == 2 else ["right"] + self.robot_arms = None if not self.robot else ["left", "right"] if self.robot.n_arms == 2 else ["right"] # robot parameters self.movement_speed = m.movement_speed self.show_control_marker = show_control_marker @@ -119,9 +149,10 @@ def __init__( show_control_marker: bool = False, system: str = "SteamVR", disable_display_output: bool = False, - enable_touchpad_movement: bool = False, - align_anchor_to_robot_base: bool = False, - use_hand_tracking: bool = False, + eef_tracking_mode: Literal["controller", "hand", "disabled"] = "controller", + # TODO: fix this to only take something like a prim path + align_anchor_to: Literal["camera", "base", "touchpad"] = "camera", + view_angle_limits: Optional[Iterable[float]] = None, ) -> None: """ Initializes the VR system @@ -130,32 +161,34 @@ def __init__( show_control_marker (bool): whether to show a control marker system (str): the VR system to use, one of ["OpenXR", "SteamVR"], default is "SteamVR". disable_display_output (bool): whether we will not display output to the VR headset (only use controller tracking), default is False. - enable_touchpad_movement (bool): whether to enable VR system anchor movement by controller, default is False. - align_anchor_to_robot_base (bool): whether to align VR anchor to robot base, default is False. - use_hand_tracking (bool): whether to use hand tracking instead of controllers, default is False. - show_controller (bool): whether to show the controller model in the scene, default is False. - - NOTE: enable_touchpad_movement and align_anchor_to_robot_base cannot be enabled at the same time. - The former is to enable free movement of the VR system (i.e. the user), while the latter is constraining the VR system to the robot pose. - """ + eef_tracking_mode (Literal): whether to use controller tracking or hand tracking, one of ["controller", "hand", "disabled"], default is controller. + align_anchor_to (Literal): specify where the VR view aligns to, one of ["camera", "base", "touchpad"], defaults to robot camera. + The "touchpad" option enables free movement of the VR view (i.e. the user), while the other two constrain the VR view to the either the robot base or camera pose. + view_angle_limits (Iterable): the view angle limits for the VR system (roll, pitch, and yaw) in degrees, default is None. + """ + align_to_prim = isinstance(align_anchor_to, XFormPrim) + assert ( + align_anchor_to + in [ + "camera", + "base", + "touchpad", + ] + or align_to_prim + ), "align_anchor_to must be one of ['camera', 'base', 'touchpad'] or a XFormPrim" + self.align_anchor_to = align_anchor_to + self.anchor_prim = None + if align_to_prim: + self.set_anchor_with_prim(self.align_anchor_to) self.raw_data = {} # enable xr extension lazy.omni.isaac.core.utils.extensions.enable_extension("omni.kit.xr.profile.vr") - self.xr_device_class = lazy.omni.kit.xr.core.XRDeviceClass # run super method super().__init__(teleop_config, robot, show_control_marker) - # we want to further slow down the movement speed if we are using touchpad movement - if enable_touchpad_movement: - self.movement_speed *= 0.3 # get xr core and profile self.xr_core = lazy.omni.kit.xr.core.XRCore.get_singleton() self.vr_profile = self.xr_core.get_profile("vr") self.disable_display_output = disable_display_output - self.enable_touchpad_movement = enable_touchpad_movement - self.align_anchor_to_robot_base = align_anchor_to_robot_base - assert not ( - self.enable_touchpad_movement and self.align_anchor_to_robot_base - ), "enable_touchpad_movement and align_anchor_to_robot_base cannot be True at the same time!" # set avatar if self.show_control_marker: self.vr_profile.set_avatar( @@ -166,9 +199,12 @@ def __init__( lazy.omni.kit.xr.ui.stage.common.XRAvatarManager.get_singleton().create_avatar("empty_avatar", {}) ) # set anchor mode to be custom anchor + # lazy.carb.settings.get_settings().set(self.vr_profile.get_scene_persistent_path() + "anchorMode", "scene_origin") lazy.carb.settings.get_settings().set( - self.vr_profile.get_scene_persistent_path() + "anchorMode", "scene origin" + self.vr_profile.get_scene_persistent_path() + "anchorMode", "custom_anchor" ) + # set override leveled basis to be true (if this is false, headset would not track anchor pitch orientation) + lazy.carb.settings.get_settings().set(self.vr_profile.get_persistent_path() + "overrideLeveledBasis", True) # set vr system lazy.carb.settings.get_settings().set(self.vr_profile.get_persistent_path() + "system/display", system) # set display mode @@ -180,17 +216,130 @@ def __init__( self.hmd = None self.controllers = {} self.trackers = {} - self.xr2og_orn_offset = th.tensor([0.5, -0.5, -0.5, -0.5]) - self.og2xr_orn_offset = th.tensor([-0.5, 0.5, 0.5, -0.5]) # setup event subscriptions self.reset() - self.use_hand_tracking = use_hand_tracking - if use_hand_tracking: + self.eef_tracking_mode = eef_tracking_mode + if eef_tracking_mode == "hand": self.raw_data["hand_data"] = {} self.teleop_action.hand_data = {} self._hand_tracking_subscription = self.xr_core.get_event_stream().create_subscription_to_pop_by_type( lazy.omni.kit.xr.core.XRCoreEventType.hand_joints, self._update_hand_tracking_data, name="hand tracking" ) + self.robot_cameras = ( + [s for s in self.robot.sensors.values() if isinstance(s, VisionSensor)] if self.robot else [] + ) + # TODO: this camera id is specific to R1, because it has 3 cameras, we need to make this more general + self.active_camera_id = 2 + if self.align_anchor_to == "camera" and len(self.robot_cameras) == 0: + raise ValueError("No camera found on robot, cannot align anchor to camera") + # we want to further slow down the movement speed if we are using touchpad movement + if self.align_anchor_to == "touchpad": + self.movement_speed *= 0.3 + # self.global_ik_solver = {} + # for arm in self.robot_arms: + # control_idx = self.robot.arm_control_idx[arm] + # self.global_ik_solver[arm] = IKSolver( + # robot_description_path=self.robot.robot_arm_descriptor_yamls[arm], + # robot_urdf_path=self.robot.urdf_path, + # reset_joint_pos=self.robot.get_joint_positions()[control_idx], + # eef_name=self.robot.eef_link_names[arm], + # ) + self.head_canonical_transformation = None + KeyboardEventHandler.add_keyboard_callback( + key=lazy.carb.input.KeyboardInput.R, + callback_fn=self.register_head_canonical_transformation, + ) + # KeyboardEventHandler.add_keyboard_callback( + # key=lazy.carb.input.KeyboardInput.ESCAPE, + # callback_fn=self.stop, + # ) + self._update_camera_callback = self.xr_core.get_event_stream().create_subscription_to_pop_by_type( + lazy.omni.kit.xr.core.XRCoreEventType.pre_render_update, self._update_camera_pose, name="update camera" + ) + self._view_blackout_prim = None + self._view_angle_limits = ( + [T.deg2rad(limit) for limit in view_angle_limits] if view_angle_limits is not None else None + ) + if self._view_angle_limits is not None: + scene = self.robot.scene + blackout_relative_path = "/view_blackout" + blackout_prim_path = scene_relative_prim_path_to_absolute(scene, blackout_relative_path) + blackout_sphere = lazy.pxr.UsdGeom.Sphere.Define(og.sim.stage, blackout_prim_path) + blackout_sphere.CreateRadiusAttr().Set(0.1) + blackout_sphere.CreateDisplayColorAttr().Set(lazy.pxr.Vt.Vec3fArray([255, 255, 255])) + self._view_blackout_prim = VisualGeomPrim( + relative_prim_path=blackout_relative_path, + name="view_blackout", + ) + self._view_blackout_prim.load(scene) + self._view_blackout_prim.initialize() + self._view_blackout_prim.visible = False + + self._current_trunk_height = 0.5 # Initial height + + def _update_camera_pose(self, e) -> None: + if self.align_anchor_to == "touchpad": + # we use x, y from right controller for 2d movement and y from left controller for z movement + self._move_anchor( + pos_offset=th.cat((th.tensor([self.teleop_action.torso]), self.teleop_action.base[[0, 2]])) + ) + else: + if self.anchor_prim is not None: + reference_frame = self.anchor_prim + elif self.align_anchor_to == "camera": + reference_frame = self.robot_cameras[self.active_camera_id] + elif self.align_anchor_to == "base": + reference_frame = self.robot + else: + raise ValueError(f"Invalid anchor: {self.align_anchor_to}") + + anchor_pos, anchor_orn = reference_frame.get_position_orientation() + + if self.head_canonical_transformation is not None: + current_head_physical_world_pose = self.xr2og(self.hmd.get_physical_world_pose()) + # Find the orientation change from canonical to current physical orientation + _, relative_orientation = T.relative_pose_transform( + *current_head_physical_world_pose, *self.head_canonical_transformation + ) + anchor_orn = T.quat_multiply(anchor_orn, relative_orientation) + + if self._view_blackout_prim is not None: + relative_ori_in_euler = T.quat2euler(relative_orientation) + roll_limit, pitch_limit, yaw_limit = self._view_angle_limits + # OVXR has a different coordinate system than OmniGibson + if ( + abs(relative_ori_in_euler[0]) > pitch_limit + or abs(relative_ori_in_euler[1]) > yaw_limit + or abs(relative_ori_in_euler[2]) > roll_limit + ): + self._view_blackout_prim.set_position_orientation(anchor_pos, anchor_orn) + self._view_blackout_prim.visible = True + else: + self._view_blackout_prim.visible = False + anchor_pose = self.og2xr(anchor_pos, anchor_orn) + self.vr_profile.set_physical_world_to_world_anchor_transform_to_match_xr_device( + anchor_pose.numpy(), self.hmd + ) + + def register_head_canonical_transformation(self): + """ + Here's what we need to do: + 1) Let the user press a button to record head canonical orientation (GELLO and head facing forward) + 2) when the user turn their head, get orientation change from canonical to current physical orientation as R + 3) set the head in virtual world to robot head orientation + R, same position + """ + if self.hmd is None: + log.warning("No HMD found, cannot register head canonical orientation") + return + self.head_canonical_transformation = self.xr2og(self.hmd.get_physical_world_pose()) + + def set_anchor_with_prim(self, prim) -> None: + """ + Set the anchor to a prim + Args: + prim (BasePrim): the prim to set the anchor to + """ + self.anchor_prim = prim def xr2og(self, transform: th.tensor) -> Tuple[th.tensor, th.tensor]: """ @@ -203,7 +352,6 @@ def xr2og(self, transform: th.tensor) -> Tuple[th.tensor, th.tensor]: tuple(th.tensor, th.Tensor): the position and orientation in the OmniGibson coordinate system """ pos, orn = T.mat2pose(th.tensor(transform).T) - orn = T.quat_multiply(orn, self.xr2og_orn_offset) return pos, orn def og2xr(self, pos: th.tensor, orn: th.tensor) -> th.Tensor: @@ -215,7 +363,6 @@ def og2xr(self, pos: th.tensor, orn: th.tensor) -> th.Tensor: Returns: th.tensor: the transform matrix in the Omniverse XR coordinate system """ - orn = T.quat_multiply(self.og2xr_orn_offset, orn) return T.pose2mat((pos, orn)).T.double() def reset(self) -> None: @@ -249,6 +396,14 @@ def start(self) -> None: print("[VRSys] Waiting for VR headset to become active...") self._update_devices() if self.hmd is not None: + print("[VRSys] VR headset connected, put on the headset to start") + # Note that stepping the vr system multiple times is necessary here due to a bug in OVXR plugin + for _ in range(50): + self.update() + og.sim.step() + self.reset_head_transform() + print("[VRSys] VR system is ready") + self.register_head_canonical_transformation() break time.sleep(1) og.sim.step() @@ -261,31 +416,37 @@ def stop(self) -> None: og.sim.step() assert not self.vr_profile.is_enabled(), "[VRSys] VR profile not disabled!" - def update(self) -> None: + def update(self, optimized_for_tour=False) -> None: """ Steps the VR system and update self.teleop_action """ # update raw data - self._update_devices() - self._update_device_transforms() + if not optimized_for_tour: + self._update_devices() + self._update_device_transforms() self._update_button_data() - # Update teleop data based on controller input if not using hand tracking - if not self.use_hand_tracking: - self.teleop_action.base = th.zeros(3) - self.teleop_action.torso = 0.0 - # update right hand related info + # Update teleop data based on controller input + if self.eef_tracking_mode == "controller": + # update eef related info for arm_name, arm in zip(["left", "right"], self.robot_arms): if arm in self.controllers: + controller_pose_in_robot_frame = self._pose_in_robot_frame( + self.raw_data["transforms"]["controllers"][arm][0], + self.raw_data["transforms"]["controllers"][arm][1], + ) self.teleop_action[arm_name] = th.cat( ( - self.raw_data["transforms"]["controllers"][arm][0], - T.quat2euler( + controller_pose_in_robot_frame[0], + T.quat2axisangle( T.quat_multiply( - self.raw_data["transforms"]["controllers"][arm][1], - self.robot.teleop_rotation_offset[self.robot.arm_names[self.robot_arms.index(arm)]], + controller_pose_in_robot_frame[1], + self.robot.teleop_rotation_offset[arm_name], ) ), - [self.raw_data["button_data"][arm]["axis"]["trigger"]], + # When trigger is pressed, this value would be 1.0, otherwise 0.0 + # Our multi-finger gripper controller closes the gripper when the value is -1.0 and opens when > 0.0 + # So we need to negate the value here + th.tensor([-self.raw_data["button_data"][arm]["axis"]["trigger"]], dtype=th.float32), ) ) self.teleop_action.is_valid[arm_name] = self._is_valid_transform( @@ -293,44 +454,35 @@ def update(self) -> None: ) else: self.teleop_action.is_valid[arm_name] = False - # update base and reset info - if "right" in self.controllers: - self.teleop_action.reset["right"] = self.raw_data["button_data"]["right"]["press"]["grip"] - right_axis = self.raw_data["button_data"]["right"]["axis"] - self.teleop_action.base[0] = right_axis["touchpad_y"] * self.movement_speed - self.teleop_action.torso = -right_axis["touchpad_x"] * self.movement_speed - if "left" in self.controllers: - self.teleop_action.reset["left"] = self.raw_data["button_data"]["left"]["press"]["grip"] - left_axis = self.raw_data["button_data"]["left"]["axis"] - self.teleop_action.base[1] = -left_axis["touchpad_x"] * self.movement_speed - self.teleop_action.base[2] = left_axis["touchpad_y"] * self.movement_speed - # update head related info - self.teleop_action.head = th.cat( - (self.raw_data["transforms"]["head"][0], th.tensor(T.quat2euler(self.raw_data["transforms"]["head"][1]))) - ) - self.teleop_action.is_valid["head"] = self._is_valid_transform(self.raw_data["transforms"]["head"]) - # Optionally move anchor - if self.enable_touchpad_movement: - # we use x, y from right controller for 2d movement and y from left controller for z movement - self._move_anchor( - pos_offset=th.cat((th.tensor([self.teleop_action.torso]), self.teleop_action.base[[0, 2]])) + + # update base, torso, and reset info + self.teleop_action.base = th.zeros(3) + self.teleop_action.torso = 0.0 + for controller_name in self.controllers.keys(): + self.teleop_action.reset[controller_name] = self.raw_data["button_data"][controller_name]["press"]["grip"] + axis = self.raw_data["button_data"][controller_name]["axis"] + if controller_name == "right": + self.teleop_action.base[0] = axis["touchpad_y"] * self.movement_speed + self.teleop_action.torso = -axis["touchpad_x"] * self.movement_speed + elif controller_name == "left": + self.teleop_action.base[1] = -axis["touchpad_x"] * self.movement_speed + self.teleop_action.base[2] = axis["touchpad_y"] * self.movement_speed + if not optimized_for_tour: + # update head related info + self.teleop_action.head = th.cat( + (self.raw_data["transforms"]["head"][0], T.quat2euler(self.raw_data["transforms"]["head"][1])) ) - if self.align_anchor_to_robot_base: - robot_base_pos, robot_base_orn = self.robot.get_position_orientation() - self.vr_profile.set_virtual_world_anchor_transform(self.og2xr(robot_base_pos, robot_base_orn[[0, 2, 1, 3]])) + self.teleop_action.is_valid["head"] = self._is_valid_transform(self.raw_data["transforms"]["head"]) - def teleop_data_to_action(self) -> th.Tensor: + def get_robot_teleop_action(self) -> th.Tensor: """ - Generate action data from VR input for robot teleoperation + Get the robot teleop action Returns: - th.tensor: array of action data + th.tensor: the robot teleop action """ - # optionally update control marker - if self.show_control_marker: - self.update_control_marker() return self.robot.teleop_data_to_action(self.teleop_action) - def reset_transform_mapping(self, arm: str = "right") -> None: + def snap_controller_to_eef(self, arm: str = "right") -> None: """ Snap device to the robot end effector (ManipulationRobot only) Args: @@ -342,21 +494,72 @@ def reset_transform_mapping(self, arm: str = "right") -> None: ].get_position_orientation()[0] target_transform = self.og2xr(pos=robot_eef_pos, orn=robot_base_orn) self.vr_profile.set_physical_world_to_world_anchor_transform_to_match_xr_device( - target_transform, self.controllers[arm] + target_transform.numpy(), self.controllers[arm] ) - def set_initial_transform(self, pos: Iterable[float], orn: Iterable[float] = [0, 0, 0, 1]) -> None: + def snap_eef_to_controller(self, arm: str = "right") -> None: """ - Function that sets the initial transform of the VR system (w.r.t.) head - Note that stepping the vr system multiple times is necessary here due to a bug in OVXR plugin + Snap robot end effector to the device (ManipulationRobot only) Args: - pos(Iterable[float]): initial position of the vr system - orn(Iterable[float]): initial orientation of the vr system + arm(str): name of the arm, one of "left" or "right". Default is "right". """ - for _ in range(10): - self.update() - og.sim.step() - self.vr_profile.set_physical_world_to_world_anchor_transform_to_match_xr_device(self.og2xr(pos, orn), self.hmd) + self.robot.keep_still() + pos, quat = self.raw_data["transforms"]["controllers"][arm] + ik_solver = self.global_ik_solver[arm] + control_idx = self.robot.arm_control_idx[arm] + joint_pos = ik_solver.solve( + target_pos=pos, + target_quat=quat, + tolerance_pos=0.01, + tolerance_quat=0.01, + weight_pos=20.0, + weight_quat=0.05, + max_iterations=100, + initial_joint_pos=self.robot.get_joint_positions()[control_idx], + ) + if joint_pos is not None: + self.robot.set_joint_positions(joint_pos, indices=control_idx, drive=False) + print("Snap succeeded.") + else: + print("Snap failed. No IK solution found.") + + def reset_head_transform(self) -> None: + """ + Function that resets the transform of the VR system (w.r.t.) head + """ + if self.align_anchor_to == "touchpad": + pos = th.tensor([0.0, 0.0, 1.0]) + orn = th.tensor([0.0, 0.0, 0.0, 1.0]) + else: + if self.anchor_prim is not None: + reference_frame = self.anchor_prim + elif self.align_anchor_to == "camera": + reference_frame = self.robot_cameras[self.active_camera_id] + elif self.align_anchor_to == "base": + reference_frame = self.robot + else: + raise ValueError(f"Invalid anchor: {self.align_anchor_to}") + pos, orn = reference_frame.get_position_orientation() + if self.robot: + self.robot.keep_still() + try: + self.vr_profile.set_physical_world_to_world_anchor_transform_to_match_xr_device( + self.og2xr(pos, orn).numpy(), self.hmd + ) + except Exception as e: + pass + + def _pose_in_robot_frame(self, pos: th.tensor, orn: th.tensor) -> Tuple[th.tensor, th.tensor]: + """ + Get the pose in the robot frame + Args: + pos (th.tensor): the position in the world frame + orn (th.tensor): the orientation in the world frame + Returns: + tuple(th.tensor, th.tensor): the position and orientation in the robot frame + """ + robot_base_pos, robot_base_orn = self.robot.get_position_orientation() + return T.relative_pose_transform(pos, orn, robot_base_pos, robot_base_orn) def _move_anchor( self, pos_offset: Optional[Iterable[float]] = None, rot_offset: Optional[Iterable[float]] = None @@ -369,62 +572,62 @@ def _move_anchor( """ if pos_offset is not None: # note that x is forward, y is down, z is left for ovxr, but x is forward, y is left, z is up for og - pos_offset = th.tensor([-pos_offset[0], pos_offset[2], -pos_offset[1]]).double() + pos_offset = th.tensor([-pos_offset[0], pos_offset[2], -pos_offset[1]]).double().tolist() self.vr_profile.add_move_physical_world_relative_to_device(pos_offset) if rot_offset is not None: - rot_offset = th.tensor(rot_offset).double() + rot_offset = th.tensor(rot_offset).double().tolist() self.vr_profile.add_rotate_physical_world_around_device(rot_offset) + # TODO: check if this is necessary def _is_valid_transform(self, transform: Tuple[th.tensor, th.tensor]) -> bool: """ Determine whether the transform is valid (ovxr plugin will return a zero position and rotation if not valid) """ - return th.any(transform[0] != th.zeros(3)) and th.any(transform[1] != self.og2xr_orn_offset) + return th.any(transform[0] != th.zeros(3)) def _update_devices(self) -> None: """ Update the VR device list """ for device in self.vr_profile.get_device_list(): - if device.get_class() == self.xr_device_class.xrdisplaydevice: + device_class = device.get_class() + if device_class == lazy.omni.kit.xr.core.XRDeviceClass.xrdisplaydevice: self.hmd = device - elif device.get_class() == self.xr_device_class.xrcontroller: + elif device_class == lazy.omni.kit.xr.core.XRDeviceClass.xrcontroller: # we want the first 2 controllers to be corresponding to the left and right hand d_idx = device.get_index() controller_name = ["left", "right"][d_idx] if d_idx < 2 else f"controller_{d_idx+1}" self.controllers[controller_name] = device - elif device.get_class() == self.xr_device_class.xrtracker: + elif device_class == lazy.omni.kit.xr.core.XRDeviceClass.xrtracker: self.trackers[device.get_index()] = device def _update_device_transforms(self) -> None: """ Get the transform matrix of each VR device *in world frame* and store in self.raw_data """ - transforms = {} - transforms["head"] = self.xr2og(self.hmd.get_virtual_world_pose()) - transforms["controllers"] = {} - transforms["trackers"] = {} - for controller_name in self.controllers: - transforms["controllers"][controller_name] = self.xr2og( - self.controllers[controller_name].get_virtual_world_pose() - ) - for tracker_index in self.trackers: - transforms["trackers"][tracker_index] = self.xr2og(self.trackers[tracker_index].get_virtual_world_pose()) - self.raw_data["transforms"] = transforms + assert self.hmd is not None, "VR headset device not found" + self.raw_data["transforms"] = { + "head": self.xr2og(self.hmd.get_virtual_world_pose()), + "controllers": { + name: self.xr2og(controller.get_virtual_world_pose()) for name, controller in self.controllers.items() + }, + "trackers": { + index: self.xr2og(tracker.get_virtual_world_pose()) for index, tracker in self.trackers.items() + }, + } def _update_button_data(self): """ Get the button data for each controller and store in self.raw_data - Returns: - dict: a dictionary of whether each button is pressed or touched, and the axis state for touchpad and joysticks - """ - button_data = {} - for controller_name in self.controllers: - button_data[controller_name] = {} - button_data[controller_name]["press"] = self.controllers[controller_name].get_button_press_state() - button_data[controller_name]["touch"] = self.controllers[controller_name].get_button_touch_state() - button_data[controller_name]["axis"] = self.controllers[controller_name].get_axis_state() - self.raw_data["button_data"] = button_data + """ + self.raw_data["button_data"] = { + name: { + "press": controller.get_button_press_state(), + "touch": controller.get_button_touch_state(), + "axis": controller.get_axis_state(), + } + for name, controller in self.controllers.items() + } def _update_hand_tracking_data(self, e) -> None: """ diff --git a/omnigibson/utils/usd_utils.py b/omnigibson/utils/usd_utils.py index 41c847fb7..e592fbf9f 100644 --- a/omnigibson/utils/usd_utils.py +++ b/omnigibson/utils/usd_utils.py @@ -1876,6 +1876,34 @@ def delete_or_deactivate_prim(prim_path): return True +def get_sdf_value_type_name(val): + """ + Determines the appropriate Sdf value type based on the input value. + + Args: + val: The input value to determine the type for. + + Returns: + lazy.pxr.Sdf.ValueTypeName: The corresponding Sdf value type. + + Raises: + ValueError: If the input value type is not supported. + """ + SDF_TYPE_MAPPING = { + lazy.pxr.Gf.Vec3f: lazy.pxr.Sdf.ValueTypeNames.Float3, + lazy.pxr.Gf.Vec2f: lazy.pxr.Sdf.ValueTypeNames.Float2, + lazy.pxr.Sdf.AssetPath: lazy.pxr.Sdf.ValueTypeNames.Asset, + int: lazy.pxr.Sdf.ValueTypeNames.Int, + float: lazy.pxr.Sdf.ValueTypeNames.Float, + bool: lazy.pxr.Sdf.ValueTypeNames.Bool, + str: lazy.pxr.Sdf.ValueTypeNames.String, + } + for type_, usd_type in SDF_TYPE_MAPPING.items(): + if isinstance(val, type_): + return usd_type + raise ValueError(f"Unsupported input type: {type(val)}") + + import omnigibson.utils.transform_utils as TT diff --git a/tests/test_robot_teleoperation.py b/tests/test_robot_teleoperation.py index 1e922b997..e1e92fdf5 100644 --- a/tests/test_robot_teleoperation.py +++ b/tests/test_robot_teleoperation.py @@ -45,7 +45,7 @@ def test_teleop(): # test moving robot arm teleop_action.right = th.cat(([0.01], th.zeros(6))) for _ in range(50): - action = robot.teleop_data_to_action(teleop_action) + action = robot.get_robot_teleop_action(teleop_action) env.step(action) cur_eef_pose = robot.links[robot.eef_link_names[robot.default_arm]].get_position_orientation() assert cur_eef_pose[0][0] - start_eef_pose[0][0] > 0.02, "Robot arm not moving forward" @@ -54,7 +54,7 @@ def test_teleop(): teleop_action.right = th.zeros(7) teleop_action.base = th.tensor([0.1, 0, 0.1]) for _ in range(50): - action = robot.teleop_data_to_action(teleop_action) + action = robot.get_robot_teleop_action(teleop_action) env.step(action) cur_base_pose = robot.get_position_orientation() assert cur_base_pose[0][0] - start_base_pose[0][0] > 0.02, "robot base not moving forward" diff --git a/tests/test_transform_utils.py b/tests/test_transform_utils.py index 318dfae81..d7eaac50f 100644 --- a/tests/test_transform_utils.py +++ b/tests/test_transform_utils.py @@ -358,7 +358,7 @@ def test_quat_slerp(self): key_rots = R.from_quat(np.stack([q1.cpu().numpy(), q2.cpu().numpy()])) key_times = [0, 1] slerp = Slerp(key_times, key_rots) - scipy_q_slerp = slerp([t.item()]).as_quat()[0].astype(NumpyTypes.FLOAT32) + scipy_q_slerp = slerp(t).as_quat()[0].astype(NumpyTypes.FLOAT32) assert quaternions_close(q_slerp, th.from_numpy(scipy_q_slerp)) assert_close(th.norm(q_slerp), th.tensor(1.0))