Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Upgrade to isaac sim 4.2 & VR #906

Draft
wants to merge 47 commits into
base: og-develop
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
47 commits
Select commit Hold shift + click to select a range
4c9d1e1
Upgrade to isaac sim 4.2
hang-yin Sep 25, 2024
13fe889
Add version 4.2 to KIT_FILES
hang-yin Sep 25, 2024
36dee3b
Merge branch 'og-develop' of https://github.com/StanfordVL/OmniGibson…
hang-yin Oct 2, 2024
5c2d25a
Merge branch 'og-develop' of https://github.com/StanfordVL/OmniGibson…
hang-yin Oct 3, 2024
5cddc03
Fix ObjectsInFOVOfRobot test
hang-yin Oct 7, 2024
14051b2
Isaac 4 2 update
hang-yin Oct 7, 2024
c303a0b
Deprecate projection emitter
hang-yin Oct 7, 2024
ffdce57
Retrieve default shader input from shader node
hang-yin Oct 7, 2024
1e13d9e
Merge branch 'og-develop' into isaac_4_2
hang-yin Oct 7, 2024
aab3b3b
Unbreak ObjectsInFOVOfRobot test
hang-yin Oct 7, 2024
1e04111
Fix transform util
hang-yin Oct 7, 2024
8a7f3af
Fix material prim shader input bug
hang-yin Oct 8, 2024
c8cf5b9
Rename shader_input_names_by_type
hang-yin Oct 8, 2024
99f0999
Update robot pictures
hang-yin Oct 8, 2024
c210870
Mist effect initial implementation
hang-yin Oct 23, 2024
d03bfd4
VR scene tour demo without robot control
hang-yin Oct 31, 2024
4aeed8f
VR robot control with A1 demo
hang-yin Oct 31, 2024
97e85cc
Small infra changes for VR teleop
hang-yin Oct 31, 2024
552d87c
Teleop utils refactor, WIP
hang-yin Oct 31, 2024
90efba5
Update 4-2-0 kit file to include vr/xr extension
hang-yin Oct 31, 2024
2e6e63a
Merge branch 'isaac_4_2' of https://github.com/StanfordVL/OmniGibson …
hang-yin Nov 4, 2024
bc74a5c
WIP
hang-yin Nov 27, 2024
8f41145
Merge branch 'asset-conversion' of https://github.com/StanfordVL/Omni…
hang-yin Nov 27, 2024
bf3c2f9
Minor updates
hang-yin Dec 2, 2024
cfb5f30
Merge branch 'isaac_4_2' of https://github.com/StanfordVL/OmniGibson …
hang-yin Dec 2, 2024
f30bf9e
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Dec 2, 2024
397a34d
Clean up vr utils
hang-yin Dec 3, 2024
0d808f2
Merge branch 'feat/np-opt' of https://github.com/StanfordVL/OmniGibso…
hang-yin Dec 4, 2024
54a351d
Allow headset to follow xformprim
hang-yin Dec 5, 2024
e73e1ab
Merge branch 'feat/np-opt' of https://github.com/StanfordVL/OmniGibso…
hang-yin Dec 5, 2024
d8d009e
Merge branch 'feat/np-opt' of https://github.com/StanfordVL/OmniGibso…
hang-yin Dec 5, 2024
20d4da2
Numpy optimization and data collection wrapper bug fixes
hang-yin Dec 9, 2024
4377a57
Allow VR free head orientation motion; optimize VR for data collection
hang-yin Dec 9, 2024
deccf82
Add view angle limits to VR
hang-yin Dec 12, 2024
40cc3f0
Merge branch 'og-develop' of https://github.com/StanfordVL/OmniGibson…
hang-yin Dec 17, 2024
f455421
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Dec 17, 2024
ffa72bc
Merge branch 'feat/np-opt' of https://github.com/StanfordVL/OmniGibso…
hang-yin Dec 18, 2024
1c57085
Tiny bug fixes
hang-yin Dec 18, 2024
7d02e86
Merge branch 'isaac_4_2' of https://github.com/StanfordVL/OmniGibson …
hang-yin Dec 18, 2024
9615cd6
Write videos with data wrapper
hang-yin Dec 20, 2024
c10ed28
Add recording flag to data wrapper and VR support
hang-yin Dec 20, 2024
e935957
tmp fix
hang-yin Jan 19, 2025
f2c154a
Merge branch 'feat/data-wrapper-vr' of https://github.com/StanfordVL/…
hang-yin Jan 20, 2025
de6850c
TMP changes
hang-yin Jan 21, 2025
e944a68
quick change
hang-yin Jan 21, 2025
c73e7b8
tmp changes
hang-yin Jan 22, 2025
6a73adb
VR scene tour fix
hang-yin Feb 12, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file modified docs/assets/robots/A1.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified docs/assets/robots/R1.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified docs/assets/robots/Stretch.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
109 changes: 93 additions & 16 deletions omnigibson/envs/data_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from pathlib import Path

import h5py
import imageio
import torch as th

import omnigibson as og
Expand Down Expand Up @@ -60,6 +61,8 @@ def __init__(self, env, output_path, only_successes=True):
self.add_metadata(group=data_grp, name="config", data=config)
self.add_metadata(group=data_grp, name="scene_file", data=scene_file)

self.is_recording = True

# Run super
super().__init__(env=env)

Expand All @@ -86,14 +89,28 @@ def step(self, action):
next_obs, reward, terminated, truncated, info = self.env.step(action)
self.step_count += 1

self._record_step_trajectory(action, next_obs, reward, terminated, truncated, info)

return next_obs, reward, terminated, truncated, info

def _record_step_trajectory(self, action, obs, reward, terminated, truncated, info):
"""
Record the current step data to the trajectory history

Args:
action (th.Tensor): action deployed resulting in @obs
obs (dict): state, i.e. observation
reward (float): reward, i.e. reward at this current timestep
terminated (bool): terminated, i.e. whether this episode ended due to a failure or success
truncated (bool): truncated, i.e. whether this episode ended due to a time limit etc.
info (dict): info, i.e. dictionary with any useful information
"""
# Aggregate step data
step_data = self._parse_step_data(action, next_obs, reward, terminated, truncated, info)
step_data = self._parse_step_data(action, obs, reward, terminated, truncated, info)

# Update obs and traj history
self.current_traj_history.append(step_data)
self.current_obs = next_obs

return next_obs, reward, terminated, truncated, info
self.current_obs = obs

def _parse_step_data(self, action, obs, reward, terminated, truncated, info):
"""
Expand Down Expand Up @@ -188,13 +205,22 @@ def process_traj_to_hdf5(self, traj_data, traj_grp_name, nested_keys=("obs",)):

return traj_grp

@property
def should_save_current_episode(self):
"""
Returns:
bool: Whether the current episode should be saved or discarded
"""
# Only save successful demos and if actually recording
success = self.env.task.success or not self.only_successes
return success and self.hdf5_file is not None

def flush_current_traj(self):
"""
Flush current trajectory data
"""
# Only save successful demos and if actually recording
success = self.env.task.success or not self.only_successes
if success and self.hdf5_file is not None:
if self.should_save_current_episode:
traj_grp_name = f"demo_{self.traj_count}"
traj_grp = self.process_traj_to_hdf5(self.current_traj_history, traj_grp_name, nested_keys=["obs"])
self.traj_count += 1
Expand Down Expand Up @@ -252,14 +278,17 @@ class DataCollectionWrapper(DataWrapper):
dataset!
"""

def __init__(self, env, output_path, viewport_camera_path="/World/viewer_camera", only_successes=True):
def __init__(
self, env, output_path, viewport_camera_path="/World/viewer_camera", only_successes=True, use_vr=False
):
"""
Args:
env (Environment): The environment to wrap
output_path (str): path to store hdf5 data file
viewport_camera_path (str): prim path to the camera to use when rendering the main viewport during
data collection
only_successes (bool): Whether to only save successful episodes
use_vr (bool): Whether to use VR headset for data collection
"""
# Store additional variables needed for optimized data collection

Expand All @@ -270,6 +299,9 @@ def __init__(self, env, output_path, viewport_camera_path="/World/viewer_camera"
# the given simulator step. See add_transition_info() for more info
self.current_transitions = dict()

self._is_recording = True
self.use_vr = use_vr

# Add callbacks on import / remove objects and systems
og.sim.add_callback_on_system_init(
name="data_collection", callback=lambda system: self.add_transition_info(obj=system, add=True)
Expand All @@ -290,6 +322,18 @@ def __init__(self, env, output_path, viewport_camera_path="/World/viewer_camera"
# Configure the simulator to optimize for data collection
self._optimize_sim_for_data_collection(viewport_camera_path=viewport_camera_path)

@property
def is_recording(self):
return self._is_recording

@is_recording.setter
def is_recording(self, value: bool):
self._is_recording = value

def _record_step_trajectory(self, action, obs, reward, terminated, truncated, info):
if self.is_recording:
super()._record_step_trajectory(action, obs, reward, terminated, truncated, info)

def _optimize_sim_for_data_collection(self, viewport_camera_path):
"""
Configures the simulator to optimize for data collection
Expand All @@ -310,12 +354,14 @@ def _optimize_sim_for_data_collection(self, viewport_camera_path):
# toggling these settings to be True -> False -> True
# Only setting it to True once will actually freeze the GUI for some reason!
if not gm.HEADLESS:
lazy.carb.settings.get_settings().set_bool("/app/asyncRendering", True)
lazy.carb.settings.get_settings().set_bool("/app/asyncRenderingLowLatency", True)
lazy.carb.settings.get_settings().set_bool("/app/asyncRendering", False)
lazy.carb.settings.get_settings().set_bool("/app/asyncRenderingLowLatency", False)
lazy.carb.settings.get_settings().set_bool("/app/asyncRendering", True)
lazy.carb.settings.get_settings().set_bool("/app/asyncRenderingLowLatency", True)
# Async rendering does not work in VR mode
if not self.use_vr:
lazy.carb.settings.get_settings().set_bool("/app/asyncRendering", True)
lazy.carb.settings.get_settings().set_bool("/app/asyncRenderingLowLatency", True)
lazy.carb.settings.get_settings().set_bool("/app/asyncRendering", False)
lazy.carb.settings.get_settings().set_bool("/app/asyncRenderingLowLatency", False)
lazy.carb.settings.get_settings().set_bool("/app/asyncRendering", True)
lazy.carb.settings.get_settings().set_bool("/app/asyncRenderingLowLatency", True)

# Disable mouse grabbing since we're only using the UI passively
lazy.carb.settings.get_settings().set_bool("/physics/mouseInteractionEnabled", False)
Expand Down Expand Up @@ -384,6 +430,11 @@ def flush_current_traj(self):
self.max_state_size = 0
self.current_transitions = dict()

@property
def should_save_current_episode(self):
# In addition to default conditions, we only save the current episode if we are actually recording
return super().should_save_current_episode and self.is_recording

def add_transition_info(self, obj, add=True):
"""
Adds transition info to the current sim step for specific object @obj.
Expand Down Expand Up @@ -470,6 +521,10 @@ def create_from_hdf5(
if config["task"]["type"] == "BehaviorTask":
config["task"]["online_object_sampling"] = False

# Because we're loading directly from the cached scene file, we need to disable any additional objects that are being added since
# they will already be cached in the original scene file
config["objects"] = []

# Set observation modalities and update sensor config
for robot_cfg in config["robots"]:
robot_cfg["obs_modalities"] = robot_obs_modalities
Expand Down Expand Up @@ -523,15 +578,21 @@ def _parse_step_data(self, action, obs, reward, terminated, truncated, info):
step_data["truncated"] = truncated
return step_data

def playback_episode(self, episode_id, record=True):
def playback_episode(self, episode_id, record=True, video_path=None, video_writer=None):
"""
Playback episode @episode_id, and optionally record observation data if @record is True

Args:
episode_id (int): Episode to playback. This should be a valid demo ID number from the inputted collected
data hdf5 file
record (bool): Whether to record data during playback or not
video_path (None or str): If specified, path to write the playback video to
video_writer (None or str): If specified, an imageio video writer to use for writing the video (can be specified in place of @video_path)
"""
using_external_writer = video_writer is not None
if video_writer is None and video_path is not None:
video_writer = imageio.get_writer(video_path, fps=30)

data_grp = self.input_hdf5["data"]
assert f"demo_{episode_id}" in data_grp, f"No valid episode with ID {episode_id} found!"
traj_grp = data_grp[f"demo_{episode_id}"]
Expand Down Expand Up @@ -597,17 +658,33 @@ def playback_episode(self, episode_id, record=True):
)
self.current_traj_history.append(step_data)

# If writing video, save the current frame
if video_writer is not None:
video_writer.append_data(og.sim.viewer_camera.get_obs()[0]["rgb"][:, :, :3].numpy())

self.step_count += 1

if record:
self.flush_current_traj()

def playback_dataset(self, record=True):
# If we weren't using an external writer but we're still writing a video, close the writer
if video_writer is not None and not using_external_writer:
video_writer.close()

def playback_dataset(self, record=True, video_path=None, video_writer=None):
"""
Playback all episodes from the input HDF5 file, and optionally record observation data if @record is True

Args:
record (bool): Whether to record data during playback or not
video_path (None or str): If specified, path to write the playback video to
video_writer (None or str): If specified, an imageio video writer to use for writing the video (can be specified in place of @video_path)
"""
if video_writer is None and video_path is not None:
video_writer = imageio.get_writer(video_path, fps=30)
for episode_id in range(self.input_hdf5["data"].attrs["n_episodes"]):
self.playback_episode(episode_id=episode_id, record=record)
self.playback_episode(episode_id=episode_id, record=record, video_path=None, video_writer=video_writer)

# Close the video writer at the end if created
if video_writer is not None:
video_writer.close()
2 changes: 1 addition & 1 deletion omnigibson/envs/env_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -717,7 +717,7 @@ def reset(self, get_obs=True, **kwargs):
log.error(f"Expected: {exp_obs[k]}")
log.error(f"Received: {real_obs[k]}")

raise ValueError("Observation space does not match returned observations!")
# raise ValueError("Observation space does not match returned observations!")

return obs, {}

Expand Down
104 changes: 104 additions & 0 deletions omnigibson/examples/teleoperation/vr_robot_control_demo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
"""
Example script for interacting with OmniGibson scenes with VR and BehaviorRobot.
"""

import torch as th

import omnigibson as og
from omnigibson.macros import gm
from omnigibson.utils.teleop_utils import OVXRSystem

gm.ENABLE_OBJECT_STATES = False
gm.ENABLE_TRANSITION_RULES = False
gm.ENABLE_FLATCACHE = True
gm.GUI_VIEWPORT_ONLY = True

# import torch._dynamo
# torch._dynamo.config.suppress_errors = True


def main():
"""
Spawn a BehaviorRobot in Rs_int and users can navigate around and interact with the scene using VR.
"""
# Create the config for generating the environment we want
scene_cfg = {"type": "InteractiveTraversableScene", "scene_model": "Rs_int"}
robot0_cfg = {
"type": "R1",
"obs_modalities": ["rgb"],
"controller_config": {
"arm_left": {
"name": "InverseKinematicsController",
"mode": "absolute_pose",
"command_input_limits": None,
"command_output_limits": None,
},
"arm_right": {
"name": "InverseKinematicsController",
"mode": "absolute_pose",
"command_input_limits": None,
"command_output_limits": None,
},
"gripper_left": {"name": "MultiFingerGripperController", "command_input_limits": "default"},
"gripper_right": {"name": "MultiFingerGripperController", "command_input_limits": "default"},
},
"action_normalize": False,
"reset_joint_pos": [
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
-1.8000,
-0.8000,
0.0000,
-0.0068,
0.0059,
2.6054,
2.5988,
-1.4515,
-1.4478,
-0.0065,
0.0052,
1.5670,
-1.5635,
-1.1428,
1.1610,
0.0087,
0.0087,
0.0087,
0.0087,
],
}
cfg = dict(scene=scene_cfg, robots=[robot0_cfg])

# Create the environment
env = og.Environment(configs=cfg)
env.reset()
# start vrsys
vrsys = OVXRSystem(
robot=env.robots[0],
show_control_marker=True,
system="SteamVR",
eef_tracking_mode="controller",
align_anchor_to="camera",
# roll, pitch, yaw
view_angle_limits=[180, 30, 30],
)
vrsys.start()

for _ in range(3000):
# update the VR system
vrsys.update()
# get the action from the VR system and step the environment
env.step(vrsys.get_robot_teleop_action())

print("Cleaning up...")
vrsys.stop()
og.clear()


if __name__ == "__main__":
main()
Loading