From faac6e7cfbd010c1aad784201056485dc34fa423 Mon Sep 17 00:00:00 2001 From: Bruk Date: Fri, 16 Jan 2026 15:53:09 +0100 Subject: [PATCH 1/6] feat(devices): add Meta Quest body tracking via OSC protocol Add support for Meta Quest body tracking through OSC protocol while maintaining compatibility with existing Vision Pro/OpenXR support. Changes: - Add OscReceiver class for receiving body tracking data via OSC - Add BODY_TRACKING requirement to RetargeterBase - Add BODY TrackingTarget to DeviceBase enum - Lazily initialize OSC receiver only when body tracking is needed - Add configurable OSC port via OpenXRDeviceCfg.body_osc_port - Add Se2Keyboard toggle controls for teleoperation and recording - Pre-compute joint index mappings in GR1T2Retargeter for performance - Export body tracking constants and classes in openxr __init__.py - Add documentation for Meta Quest setup with ALVR and SteamVR --- docs/source/how-to/index.rst | 12 + .../source/how-to/metaquest_body_tracking.rst | 338 ++++++++++++++++++ .../teleoperation/teleop_se3_agent.py | 11 +- scripts/tools/record_demos.py | 12 +- .../isaaclab/isaaclab/devices/device_base.py | 1 + .../isaaclab/devices/openxr/__init__.py | 6 +- .../isaaclab/devices/openxr/openxr_device.py | 55 +++ .../isaaclab/devices/openxr/osc_receiver.py | 256 +++++++++++++ .../humanoid/fourier/gr1t2_retargeter.py | 87 ++++- .../isaaclab/devices/retargeter_base.py | 1 + .../pick_place/pickplace_gr1t2_env_cfg.py | 12 + 11 files changed, 769 insertions(+), 22 deletions(-) create mode 100644 docs/source/how-to/metaquest_body_tracking.rst create mode 100644 source/isaaclab/isaaclab/devices/openxr/osc_receiver.py diff --git a/docs/source/how-to/index.rst b/docs/source/how-to/index.rst index 02c0ff99ae1..e446be81c3a 100644 --- a/docs/source/how-to/index.rst +++ b/docs/source/how-to/index.rst @@ -161,6 +161,18 @@ with directional force feedback in Isaac Lab. haply_teleoperation +Setting up Meta Quest Body Tracking +----------------------------------- + +This guide explains how to use Meta Quest 3 with ALVR and SteamVR for full-body +teleoperation in Isaac Lab via OSC protocol. + +.. toctree:: + :maxdepth: 1 + + metaquest_body_tracking + + Understanding Simulation Performance ------------------------------------ diff --git a/docs/source/how-to/metaquest_body_tracking.rst b/docs/source/how-to/metaquest_body_tracking.rst new file mode 100644 index 00000000000..768456d07d0 --- /dev/null +++ b/docs/source/how-to/metaquest_body_tracking.rst @@ -0,0 +1,338 @@ +.. _metaquest-body-tracking: + +Setting up Meta Quest Body Tracking +=================================== + +.. currentmodule:: isaaclab + +`Meta Quest 3`_ provides inside-out body tracking that can be used for full-body robot +teleoperation. By combining ALVR (Air Light VR) with SteamVR, body tracking data can be +streamed to Isaac Lab via the OSC (Open Sound Control) protocol. + +This guide explains how to set up Meta Quest 3 body tracking with Isaac Lab for +humanoid robot teleoperation. + +.. _Meta Quest 3: https://www.meta.com/quest/quest-3/ + + +Overview +-------- + +Using Meta Quest body tracking with Isaac Lab involves the following components: + +* **Meta Quest 3** captures body pose using inside-out tracking cameras + +* **ALVR** (nightly build) streams VR content and forwards body tracking data via OSC + +* **SteamVR** provides the OpenXR runtime for hand and head tracking + +* **Isaac Lab** receives OSC body data and OpenXR hand/head data for robot control + +This guide will walk you through: + +* :ref:`metaquest-system-requirements` +* :ref:`metaquest-installation` +* :ref:`metaquest-alvr-setup` +* :ref:`metaquest-running-demo` +* :ref:`metaquest-troubleshooting` + + +.. _metaquest-system-requirements: + +System Requirements +------------------- + +Hardware Requirements +~~~~~~~~~~~~~~~~~~~~~ + +* **Isaac Lab Workstation** + + * Ubuntu 22.04 or Ubuntu 24.04 + * CPU: 8-Core Intel Core i7 or AMD Ryzen 7 (or higher) + * Memory: 32GB RAM (64GB recommended) + * GPU: NVIDIA RTX 3070 or higher (tested on RTX 4090) + * Network: 5GHz WiFi (dedicated router recommended for low latency) + +* **Meta Quest 3** + + * Meta Quest 3 headset with Developer Mode enabled + * ALVR client installed via SideQuest or APK + * Connected to the same 5GHz WiFi network as the workstation + +Software Requirements +~~~~~~~~~~~~~~~~~~~~~ + +* Isaac Lab (follow the :ref:`installation guide `) +* Steam and SteamVR +* ALVR Nightly Build (v21.0.0-dev11 or newer) +* ``python-osc`` Python package + + +.. _metaquest-installation: + +Installation +------------ + +1. Install Steam and SteamVR +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Install Steam from Ubuntu repositories: + +.. code:: bash + + sudo apt install steam + +Launch Steam, create an account if needed, and install SteamVR from the Steam Store. + +2. Install ALVR Nightly Build +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. important:: + + You must use the **nightly build** of ALVR. Stable releases do not include + body tracking support for Quest 3. + +Download the latest nightly release from the `ALVR GitHub releases`_ page. +Look for releases tagged with ``nightly`` in the name. + +.. _ALVR GitHub releases: https://github.com/alvr-org/ALVR/releases + +Extract and run the ALVR Launcher: + +.. code:: bash + + tar -xzf alvr_launcher_linux.tar.gz + cd alvr_launcher_linux + ./alvr_launcher + +3. Install ALVR Client on Quest 3 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Enable Developer Mode on your Quest 3: + +1. Install the Meta Quest app on your phone +2. Go to Settings > Developer Mode > Enable + +Install the ALVR client APK using SideQuest or ADB: + +.. code:: bash + + adb install alvr_client_quest.apk + +4. Install Python OSC Package +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Install the ``python-osc`` package for receiving body tracking data: + +.. code:: bash + + ./isaaclab.sh -p -m pip install python-osc + + +.. _metaquest-alvr-setup: + +ALVR Configuration +------------------ + +1. Launch ALVR +~~~~~~~~~~~~~~ + +Start the ALVR Launcher on your PC. The dashboard will open in your browser. + +2. Connect Quest 3 +~~~~~~~~~~~~~~~~~~ + +1. Put on your Quest 3 and launch the ALVR client +2. The headset should auto-discover your PC on the same network +3. Click "Trust" on the PC dashboard to pair the headset + +3. Configure Body Tracking +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In the ALVR dashboard, navigate to **Settings** and configure: + +**Headset Tab:** + +* Set "Hand tracking interaction" to **SteamVR Input 2.0** + +**Body Tracking Tab:** + +* Enable "Body tracking" +* Set sink type to **VRChat Body OSC** +* Set OSC port to **9000** (default) + +**Hand Tracking Offsets (Optional):** + +If hand positions appear offset, adjust in the Headset tab: + +* Position offset: ``(0, -0.02, 0)`` +* Rotation offset: ``(0, 5, 0)`` degrees + +4. Set SteamVR as OpenXR Runtime +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Open SteamVR settings: + +1. Go to **Developer** tab +2. Click "Set SteamVR as OpenXR Runtime" + +This ensures Isaac Lab uses SteamVR for OpenXR hand and head tracking. + + +.. _metaquest-running-demo: + +Running the Demo +---------------- + +The teleoperation demo uses OpenXR for hand/head tracking and OSC for body tracking. + +Basic Usage +~~~~~~~~~~~ + +.. code:: bash + + # Ensure ALVR and SteamVR are running with Quest 3 connected + ./isaaclab.sh -p scripts/environments/teleoperation/teleop_se3_agent.py \ + --task Isaac-PickPlace-GR1T2-Abs-v0 \ + --teleop_device handtracking \ + --enable_pinocchio + +The demo will: + +1. Connect to SteamVR for OpenXR hand and head tracking +2. Start an OSC server on port 9000 for body tracking data +3. Spawn the GR1T2 humanoid robot in simulation +4. Map your body movements to robot joint commands + +Keyboard Controls +~~~~~~~~~~~~~~~~~ + +* **S key**: Toggle teleoperation on/off +* **R key**: Reset simulation + +When teleoperation is active, red sphere markers show tracked body joint positions. + +Recording Demonstrations +~~~~~~~~~~~~~~~~~~~~~~~~ + +To record teleoperation demonstrations for imitation learning: + +.. code:: bash + + ./isaaclab.sh -p scripts/tools/record_demos.py \ + --task Isaac-PickPlace-GR1T2-Abs-v0 \ + --teleop_device handtracking \ + --enable_pinocchio + +Press **S** to start/stop recording sessions. + +Custom OSC Port +~~~~~~~~~~~~~~~ + +If port 9000 is in use, configure a different port in ALVR and pass it to your script +by modifying the ``OpenXRDeviceCfg``: + +.. code:: python + + from isaaclab.devices.openxr import OpenXRDeviceCfg + + device_cfg = OpenXRDeviceCfg( + body_osc_port=9001, # Custom port + ) + + +Data Flow +--------- + +The body tracking data flows through the following pipeline: + +.. code:: + + Quest 3 Body Tracking + | + v + ALVR (VRChat OSC sink) + | + v (UDP port 9000) + OscReceiver (Isaac Lab) + | + v + OpenXRDevice._calculate_body_trackers() + | + v + Retargeter (e.g., GR1T2Retargeter) + | + v + Robot Joint Commands + +**Tracked Body Joints:** + +The following 8 body joints are tracked via OSC (head is tracked via OpenXR): + +* Hip, Chest +* Left/Right Foot +* Left/Right Knee +* Left/Right Elbow + + +.. _metaquest-troubleshooting: + +Troubleshooting +--------------- + +No Body Tracking Data +~~~~~~~~~~~~~~~~~~~~~ + +**Problem**: Body tracking markers not appearing in simulation + +Solutions: + +* Verify ALVR body tracking is enabled with VRChat OSC sink +* Check OSC port matches (default: 9000) +* Ensure Quest 3 can see your full body (stand back from obstacles) +* Check firewall allows UDP traffic on port 9000 + +High Latency +~~~~~~~~~~~~ + +**Problem**: Noticeable delay between movements and robot response + +Solutions: + +* Use a dedicated 5GHz WiFi router +* Reduce distance between Quest 3 and router +* Close bandwidth-intensive applications +* In ALVR, try reducing video bitrate + +Hand Tracking Offset +~~~~~~~~~~~~~~~~~~~~ + +**Problem**: Virtual hands appear offset from real hand positions + +Solutions: + +* Adjust hand tracking offsets in ALVR settings +* Recalibrate Quest 3 guardian boundary +* Ensure good lighting for inside-out tracking + +SteamVR Not Detecting Quest +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**Problem**: SteamVR shows "Headset Not Detected" + +Solutions: + +* Verify ALVR shows "Connected" status +* Restart SteamVR after ALVR connects +* Check SteamVR is set as OpenXR runtime + + +Next Steps +---------- + +* **Customize retargeting**: Implement custom retargeters for different robot morphologies +* **Add force feedback**: Combine with haptic devices for bidirectional teleoperation +* **Record datasets**: Use the recording tools to collect demonstration data for imitation learning + +For more information on device APIs, see :class:`~isaaclab.devices.OpenXRDevice` and +:class:`~isaaclab.devices.openxr.BodyOscReceiver` in the API documentation. diff --git a/scripts/environments/teleoperation/teleop_se3_agent.py b/scripts/environments/teleoperation/teleop_se3_agent.py index 8492ad77f3c..bd516060ce6 100644 --- a/scripts/environments/teleoperation/teleop_se3_agent.py +++ b/scripts/environments/teleoperation/teleop_se3_agent.py @@ -64,7 +64,7 @@ import gymnasium as gym import torch -from isaaclab.devices import Se3Gamepad, Se3GamepadCfg, Se3Keyboard, Se3KeyboardCfg, Se3SpaceMouse, Se3SpaceMouseCfg +from isaaclab.devices import Se3Gamepad, Se3GamepadCfg, Se3Keyboard, Se3KeyboardCfg, Se3SpaceMouse, Se3SpaceMouseCfg, Se2Keyboard, Se2KeyboardCfg from isaaclab.devices.openxr import remove_camera_configs from isaaclab.devices.teleop_device_factory import create_teleop_device from isaaclab.envs import ManagerBasedRLEnvCfg @@ -170,6 +170,11 @@ def stop_teleoperation() -> None: teleoperation_active = False print("Teleoperation deactivated") + def toggle_teleoperation() -> None: + nonlocal teleoperation_active + teleoperation_active = not teleoperation_active + print(f"Teleoperation toggled: {teleoperation_active}") + # Create device config if not already in env_cfg teleoperation_callbacks: dict[str, Callable[[], None]] = { "R": reset_recording_instance, @@ -244,6 +249,10 @@ def stop_teleoperation() -> None: print("Teleoperation started. Press 'R' to reset the environment.") + keyboard = Se2Keyboard(Se2KeyboardCfg()) + keyboard.add_callback("R", reset_recording_instance) + keyboard.add_callback("S", toggle_teleoperation) + # simulate environment while simulation_app.is_running(): try: diff --git a/scripts/tools/record_demos.py b/scripts/tools/record_demos.py index 6bb8dea5707..bf471234117 100644 --- a/scripts/tools/record_demos.py +++ b/scripts/tools/record_demos.py @@ -100,7 +100,7 @@ import omni.ui as ui -from isaaclab.devices import Se3Keyboard, Se3KeyboardCfg, Se3SpaceMouse, Se3SpaceMouseCfg +from isaaclab.devices import Se3Keyboard, Se3KeyboardCfg, Se3SpaceMouse, Se3SpaceMouseCfg, Se2Keyboard, Se2KeyboardCfg from isaaclab.devices.openxr import remove_camera_configs from isaaclab.devices.teleop_device_factory import create_teleop_device @@ -446,6 +446,16 @@ def stop_recording_instance(): teleop_interface = setup_teleop_device(teleoperation_callbacks) teleop_interface.add_callback("R", reset_recording_instance) + # Add keyboard control for VR mode + def toggle_recording_instance(): + nonlocal running_recording_instance + running_recording_instance = not running_recording_instance + print(f"Recording toggled: {running_recording_instance}") + + keyboard = Se2Keyboard(Se2KeyboardCfg()) + keyboard.add_callback("R", reset_recording_instance) + keyboard.add_callback("S", toggle_recording_instance) + # Reset before starting env.sim.reset() env.reset() diff --git a/source/isaaclab/isaaclab/devices/device_base.py b/source/isaaclab/isaaclab/devices/device_base.py index a434bcc73cf..90f37362229 100644 --- a/source/isaaclab/isaaclab/devices/device_base.py +++ b/source/isaaclab/isaaclab/devices/device_base.py @@ -141,6 +141,7 @@ class TrackingTarget(Enum): HEAD = 2 CONTROLLER_LEFT = 3 CONTROLLER_RIGHT = 4 + BODY = 5 class MotionControllerDataRowIndex(Enum): """Rows in the motion-controller 2x7 array.""" diff --git a/source/isaaclab/isaaclab/devices/openxr/__init__.py b/source/isaaclab/isaaclab/devices/openxr/__init__.py index 030fdbdd00b..2b2f1d61f1d 100644 --- a/source/isaaclab/isaaclab/devices/openxr/__init__.py +++ b/source/isaaclab/isaaclab/devices/openxr/__init__.py @@ -3,8 +3,10 @@ # # SPDX-License-Identifier: BSD-3-Clause -"""Keyboard device for SE(2) and SE(3) control.""" +"""OpenXR and body tracking devices for teleoperation.""" +from .osc_receiver import BODY_JOINT_NAMES, BodyOscReceiver +from .common import HAND_JOINT_NAMES from .manus_vive import ManusVive, ManusViveCfg -from .openxr_device import OpenXRDevice, OpenXRDeviceCfg +from .openxr_device import BODY_TRACKER_NAMES, OpenXRDevice, OpenXRDeviceCfg from .xr_cfg import XrAnchorRotationMode, XrCfg, remove_camera_configs diff --git a/source/isaaclab/isaaclab/devices/openxr/openxr_device.py b/source/isaaclab/isaaclab/devices/openxr/openxr_device.py index 49f423fe8a0..817f4c526f5 100644 --- a/source/isaaclab/isaaclab/devices/openxr/openxr_device.py +++ b/source/isaaclab/isaaclab/devices/openxr/openxr_device.py @@ -24,9 +24,22 @@ from isaaclab.devices.retargeter_base import RetargeterBase from ..device_base import DeviceBase, DeviceCfg +from .osc_receiver import BodyOscReceiver from .xr_anchor_utils import XrAnchorSynchronizer from .xr_cfg import XrCfg +# Body tracker names for OpenXR (excludes "head" since head is tracked via OpenXR directly) +BODY_TRACKER_NAMES: tuple[str, ...] = ( + "hip", + "chest", + "left_foot", + "right_foot", + "left_knee", + "right_knee", + "left_elbow", + "right_elbow", +) + # For testing purposes, we need to mock the XRCore, XRPoseValidityFlags classes XRCore = None XRPoseValidityFlags = None @@ -96,6 +109,11 @@ def __init__( self._previous_joint_poses_left = {name: default_pose.copy() for name in HAND_JOINT_NAMES} self._previous_joint_poses_right = {name: default_pose.copy() for name in HAND_JOINT_NAMES} self._previous_headpose = default_pose.copy() + self._previous_body_trackers = {name: default_pose.copy() for name in BODY_TRACKER_NAMES} + + # Body tracking via OSC - lazily initialized when needed + self._osc_receiver: BodyOscReceiver | None = None + self._body_osc_port = cfg.body_osc_port if self._xr_cfg.anchor_prim_path is not None: anchor_path = self._xr_cfg.anchor_prim_path @@ -222,6 +240,7 @@ def reset(self): self._previous_joint_poses_left = {name: default_pose.copy() for name in HAND_JOINT_NAMES} self._previous_joint_poses_right = {name: default_pose.copy() for name in HAND_JOINT_NAMES} self._previous_headpose = default_pose.copy() + self._previous_body_trackers = {name: default_pose.copy() for name in BODY_TRACKER_NAMES} if hasattr(self, "_anchor_sync") and self._anchor_sync is not None: self._anchor_sync.reset() @@ -262,6 +281,9 @@ def _get_raw_data(self) -> Any: if RetargeterBase.Requirement.HEAD_TRACKING in self._required_features: data[DeviceBase.TrackingTarget.HEAD] = self._calculate_headpose() + if RetargeterBase.Requirement.BODY_TRACKING in self._required_features: + data[DeviceBase.TrackingTarget.BODY] = self._calculate_body_trackers() + if RetargeterBase.Requirement.MOTION_CONTROLLER in self._required_features: # Optionally include motion controller pose+inputs if devices are available try: @@ -361,6 +383,34 @@ def _calculate_headpose(self) -> np.ndarray: return self._previous_headpose + def _calculate_body_trackers(self) -> dict[str, np.ndarray]: + """Calculate and update body tracker poses from OSC receiver. + + Returns: + Dictionary mapping tracker names to their poses (7-element arrays). + """ + # Lazily initialize OSC receiver on first use + if self._osc_receiver is None: + self._osc_receiver = BodyOscReceiver(port=self._body_osc_port) + logger.info(f"Initialized body OSC receiver on port {self._body_osc_port}") + + for tracker_name in BODY_TRACKER_NAMES: + if tracker_name == "head": + # Head is tracked via OpenXR, not OSC + continue + try: + tracker_pose = self._osc_receiver.get_pose(tracker_name) + position = tracker_pose[:3] + quat = tracker_pose[3:] + # Convert from (x,y,z,qx,qy,qz,qw) to (x,y,z,qw,qx,qy,qz) + self._previous_body_trackers[tracker_name] = np.array( + [position[0], position[1], position[2], quat[3], quat[0], quat[1], quat[2]], + dtype=np.float32, + ) + except (ValueError, IndexError): + continue + return self._previous_body_trackers + # ----------------------------- # Controller button binding utilities # ----------------------------- @@ -508,4 +558,9 @@ class OpenXRDeviceCfg(DeviceCfg): """Configuration for OpenXR devices.""" xr_cfg: XrCfg | None = None + """XR-specific configuration for anchor, rotation, and rendering settings.""" + + body_osc_port: int = 9000 + """UDP port for receiving body tracking data via OSC protocol.""" + class_type: type[DeviceBase] = OpenXRDevice diff --git a/source/isaaclab/isaaclab/devices/openxr/osc_receiver.py b/source/isaaclab/isaaclab/devices/openxr/osc_receiver.py new file mode 100644 index 00000000000..fa3172be3b6 --- /dev/null +++ b/source/isaaclab/isaaclab/devices/openxr/osc_receiver.py @@ -0,0 +1,256 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""OSC receiver for body tracking data from external sources (e.g., Meta Quest).""" + +from __future__ import annotations + +import threading + +import numpy as np +from pythonosc.dispatcher import Dispatcher +from pythonosc.osc_server import ThreadingOSCUDPServer + +# Body tracking joint names compatible with VRChat OSC body tracking protocol +BODY_JOINT_NAMES: tuple[str, ...] = ( + "head", + "hip", + "chest", + "left_foot", + "right_foot", + "left_knee", + "right_knee", + "left_elbow", + "right_elbow", +) + +NUM_BODY_JOINTS: int = len(BODY_JOINT_NAMES) +DOF_PER_JOINT: int = 7 # 3 position + 4 rotation (quaternion) + +def _normalize(v: np.ndarray, eps: float = 1e-6) -> np.ndarray: + """Normalize a vector, returning unchanged if norm is below epsilon.""" + norm = np.linalg.norm(v) + return v if norm < eps else v / norm + + +def _rotation_matrix_to_quat(R: np.ndarray) -> np.ndarray: + """Convert a 3x3 rotation matrix to quaternion (x, y, z, w).""" + trace = np.trace(R) + if trace > 0.0: + s = 0.5 / np.sqrt(trace + 1.0) + qw = 0.25 / s + qx = (R[2, 1] - R[1, 2]) * s + qy = (R[0, 2] - R[2, 0]) * s + qz = (R[1, 0] - R[0, 1]) * s + elif R[0, 0] > R[1, 1] and R[0, 0] > R[2, 2]: + s = 2.0 * np.sqrt(1.0 + R[0, 0] - R[1, 1] - R[2, 2]) + qw = (R[2, 1] - R[1, 2]) / s + qx = 0.25 * s + qy = (R[0, 1] + R[1, 0]) / s + qz = (R[0, 2] + R[2, 0]) / s + elif R[1, 1] > R[2, 2]: + s = 2.0 * np.sqrt(1.0 + R[1, 1] - R[0, 0] - R[2, 2]) + qw = (R[0, 2] - R[2, 0]) / s + qx = (R[0, 1] + R[1, 0]) / s + qy = 0.25 * s + qz = (R[1, 2] + R[2, 1]) / s + else: + s = 2.0 * np.sqrt(1.0 + R[2, 2] - R[0, 0] - R[1, 1]) + qw = (R[1, 0] - R[0, 1]) / s + qx = (R[0, 2] + R[2, 0]) / s + qy = (R[1, 2] + R[2, 1]) / s + qz = 0.25 * s + + return _normalize(np.array([qx, qy, qz, qw], dtype=np.float32)) + + +# Default world up vector for coordinate frame construction +_DEFAULT_UP_REF = np.array([0.0, 0.0, 1.0], dtype=np.float32) +_ALT_UP_REF = np.array([0.0, 1.0, 0.0], dtype=np.float32) + + +def _quat_from_forward_up(forward: np.ndarray, up_ref: np.ndarray = _DEFAULT_UP_REF) -> np.ndarray: + """Build a quaternion (x, y, z, w) from a forward direction vector. + + Constructs an orthonormal basis where +X is forward, +Z is up, and +Y completes + the right-handed coordinate system. + + Args: + forward: 3D direction vector in world coordinates for the +X axis. + up_ref: Reference up vector (world), defaults to [0, 0, 1]. + + Returns: + Quaternion as (x, y, z, w) numpy array. + """ + f = _normalize(forward.astype(np.float32)) + if np.allclose(f, 0.0): + return np.array([0.0, 0.0, 0.0, 1.0], dtype=np.float32) + + # Project up_ref onto plane orthogonal to forward + u_ref = up_ref.astype(np.float32) + up_proj = u_ref - np.dot(u_ref, f) * f + if np.linalg.norm(up_proj) < 1e-6: + # Forward is nearly parallel to up_ref; use alternate + up_proj = _ALT_UP_REF - np.dot(_ALT_UP_REF, f) * f + + z_axis = _normalize(up_proj) + x_axis = f + y_axis = _normalize(np.cross(z_axis, x_axis)) + + R = np.stack([x_axis, y_axis, z_axis], axis=1) + return _rotation_matrix_to_quat(R) + + +class BodyOscReceiver: + """Receives body tracking data via OSC protocol. + + This class listens for OSC messages containing body joint positions and + computes heuristic orientations based on joint relationships. Compatible + with Meta Quest body tracking via OSC forwarding. + + The data format for each joint is [x, y, z, qx, qy, qz, qw]. + """ + + # Mapping of joint pairs for heuristic rotation computation: (source, target) + _ROTATION_PAIRS: tuple[tuple[str, str], ...] = ( + ("hip", "chest"), # Hip forward: hip -> chest + ("chest", "head"), # Chest forward: chest -> head + ("chest", "head"), # Head forward: same as chest + ("hip", "left_foot"), # Left foot forward: hip -> left_foot + ("hip", "right_foot"), # Right foot forward: hip -> right_foot + ("hip", "left_knee"), # Left knee forward: hip -> left_knee + ("hip", "right_knee"), # Right knee forward: hip -> right_knee + ("chest", "left_elbow"), # Left elbow forward: chest -> left_elbow + ("chest", "right_elbow"), # Right elbow forward: chest -> right_elbow + ) + + def __init__(self, ip: str = "127.0.0.1", port: int = 9000): + """Initialize the OSC body tracking receiver. + + Args: + ip: IP address to listen on. + port: UDP port to listen on. + """ + self._joint_index = {name: i for i, name in enumerate(BODY_JOINT_NAMES)} + self._data = np.zeros((NUM_BODY_JOINTS, DOF_PER_JOINT), dtype=np.float32) + self._data[:, 3:] = np.array([0.0, 0.0, 0.0, 1.0], dtype=np.float32) + self._lock = threading.Lock() + + dispatcher = Dispatcher() + dispatcher.map("/tracking/trackers/*/position", self._on_position) + + self._server = ThreadingOSCUDPServer((ip, port), dispatcher) + self._thread = threading.Thread(target=self._server.serve_forever, daemon=True) + self._thread.start() + + def _on_position(self, addr: str, *args) -> None: + """Handle incoming OSC position messages. + + Args: + addr: OSC address (e.g., /tracking/trackers/head/position) + args: Position values (x, y, z) + """ + if len(args) < 3: + return + + parts = addr.split("/") + if len(parts) < 5: + return + + token = parts[3] + idx = self._joint_index.get(token) + if idx is None: + try: + idx = int(token) + except ValueError: + return + + if idx < 0 or idx >= NUM_BODY_JOINTS: + return + + # Note: coordinate swizzle from OSC format (x, z, y) to internal (x, y, z) + x, z, y = args[:3] + with self._lock: + self._data[idx, 0:3] = [x, y, z] + + def recompute_rotations(self) -> None: + """Recompute heuristic rotations for all joints based on current positions.""" + with self._lock: + self._recompute_rotations_locked() + + def _recompute_rotations_locked(self) -> None: + """Recompute rotations without acquiring lock. Caller must hold self._lock.""" + pos = self._data[:, 0:3] + + # Joint rotation targets based on _ROTATION_PAIRS order + target_joints = ( + "hip", "chest", "head", + "left_foot", "right_foot", + "left_knee", "right_knee", + "left_elbow", "right_elbow", + ) + + for target_joint, (source, dest) in zip(target_joints, self._ROTATION_PAIRS): + source_idx = self._joint_index.get(source) + dest_idx = self._joint_index.get(dest) + target_idx = self._joint_index.get(target_joint) + + if source_idx is not None and dest_idx is not None and target_idx is not None: + forward = pos[dest_idx] - pos[source_idx] + self._data[target_idx, 3:7] = _quat_from_forward_up(forward) + + def get_flat(self) -> np.ndarray: + """Return all joint data as a flat array. + + Returns: + Flat array of shape (NUM_BODY_JOINTS * 7,) with all joint poses. + """ + with self._lock: + return self._data.reshape(-1).copy() + + def get_matrix(self) -> np.ndarray: + """Return all joint data as a matrix. + + Returns: + Array of shape (NUM_BODY_JOINTS, 7) with all joint poses. + """ + with self._lock: + return self._data.copy() + + def get_position(self, joint_name: str) -> np.ndarray: + """Get the position of a specific joint. + + Args: + joint_name: Name of the joint (must be in BODY_JOINT_NAMES). + + Returns: + Position array of shape (3,). + + Raises: + ValueError: If joint_name is not found. + """ + idx = self._joint_index.get(joint_name) + if idx is None: + raise ValueError(f"Unknown joint name: {joint_name}") + with self._lock: + return self._data[idx, 0:3].copy() + + def get_pose(self, joint_name: str) -> np.ndarray: + """Get the full pose (position + orientation) of a specific joint. + + Args: + joint_name: Name of the joint (must be in BODY_JOINT_NAMES). + + Returns: + Pose array of shape (7,) as [x, y, z, qx, qy, qz, qw]. + + Raises: + ValueError: If joint_name is not found. + """ + idx = self._joint_index.get(joint_name) + if idx is None: + raise ValueError(f"Unknown joint name: {joint_name}") + with self._lock: + return self._data[idx].copy() diff --git a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/gr1t2_retargeter.py b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/gr1t2_retargeter.py index 0f95d4b9d75..3970e07a80c 100644 --- a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/gr1t2_retargeter.py +++ b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/gr1t2_retargeter.py @@ -16,6 +16,7 @@ from isaaclab.devices.device_base import DeviceBase from isaaclab.devices.retargeter_base import RetargeterBase, RetargeterCfg from isaaclab.markers import VisualizationMarkers, VisualizationMarkersCfg +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR # This import exception is suppressed because gr1_t2_dex_retargeting_utils depends # on pinocchio which is not available on Windows. @@ -48,21 +49,51 @@ def __init__( self._hand_joint_names = cfg.hand_joint_names self._hands_controller = GR1TR2DexRetargeting(self._hand_joint_names) + # Pre-compute joint index mappings for faster retargeting + self._left_joint_indices = [ + self._hand_joint_names.index(name) for name in self._hands_controller.get_left_joint_names() + ] + self._right_joint_indices = [ + self._hand_joint_names.index(name) for name in self._hands_controller.get_right_joint_names() + ] + self._num_joints = len(self._hands_controller.get_joint_names()) + # Initialize visualization if enabled self._enable_visualization = cfg.enable_visualization self._num_open_xr_hand_joints = cfg.num_open_xr_hand_joints self._sim_device = cfg.sim_device if self._enable_visualization: + sphere_marker_cfg = VisualizationMarkersCfg( + prim_path="/Visuals/sphere_markers", + markers={ + "joint": sim_utils.SphereCfg( + radius=0.03, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)), + ), + }, + ) marker_cfg = VisualizationMarkersCfg( prim_path="/Visuals/markers", + markers={ + "frame": sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/UIElements/frame_prim.usd", + scale=(0.01, 0.01, 0.01), + ), + } + ) + # Green spheres for IK targets (after transform) + ik_target_marker_cfg = VisualizationMarkersCfg( + prim_path="/Visuals/ik_target_markers", markers={ "joint": sim_utils.SphereCfg( - radius=0.005, - visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)), + radius=0.03, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0)), ), }, ) self._markers = VisualizationMarkers(marker_cfg) + self._sphere_markers = VisualizationMarkers(sphere_marker_cfg) + self._ik_target_markers = VisualizationMarkers(ik_target_marker_cfg) def retarget(self, data: dict) -> torch.Tensor: """Convert hand joint poses to robot end-effector commands. @@ -80,42 +111,62 @@ def retarget(self, data: dict) -> torch.Tensor: # Access the left and right hand data using the enum key left_hand_poses = data[DeviceBase.TrackingTarget.HAND_LEFT] right_hand_poses = data[DeviceBase.TrackingTarget.HAND_RIGHT] + body_poses = data[DeviceBase.TrackingTarget.BODY] left_wrist = left_hand_poses.get("wrist") right_wrist = right_hand_poses.get("wrist") + left_palm = left_hand_poses.get("palm") + right_palm = right_hand_poses.get("palm") + + left_wrist[3:] = left_palm[3:] + right_wrist[3:] = right_palm[3:] + if self._enable_visualization: joints_position = np.zeros((self._num_open_xr_hand_joints, 3)) + joints_orientation = np.zeros((self._num_open_xr_hand_joints, 4)) joints_position[::2] = np.array([pose[:3] for pose in left_hand_poses.values()]) joints_position[1::2] = np.array([pose[:3] for pose in right_hand_poses.values()]) + joints_orientation[::2] = np.array([pose[3:] for pose in left_hand_poses.values()]) + joints_orientation[1::2] = np.array([pose[3:] for pose in right_hand_poses.values()]) + + body_joints_position = np.array([pose[:3] for pose in body_poses.values()]) + body_joints_orientation = np.array([pose[3:] for pose in body_poses.values()]) - self._markers.visualize(translations=torch.tensor(joints_position, device=self._sim_device)) + self._markers.visualize(translations=torch.tensor(joints_position, device=self._sim_device), + orientations=torch.tensor(joints_orientation, device=self._sim_device)) - # Create array of zeros with length matching number of joint names - left_hands_pos = self._hands_controller.compute_left(left_hand_poses) - indexes = [self._hand_joint_names.index(name) for name in self._hands_controller.get_left_joint_names()] - left_retargeted_hand_joints = np.zeros(len(self._hands_controller.get_joint_names())) - left_retargeted_hand_joints[indexes] = left_hands_pos - left_hand_joints = left_retargeted_hand_joints + self._sphere_markers.visualize(translations=torch.tensor(body_joints_position, device=self._sim_device), + orientations=torch.tensor(body_joints_orientation, device=self._sim_device)) - right_hands_pos = self._hands_controller.compute_right(right_hand_poses) - indexes = [self._hand_joint_names.index(name) for name in self._hands_controller.get_right_joint_names()] - right_retargeted_hand_joints = np.zeros(len(self._hands_controller.get_joint_names())) - right_retargeted_hand_joints[indexes] = right_hands_pos - right_hand_joints = right_retargeted_hand_joints - retargeted_hand_joints = left_hand_joints + right_hand_joints + # Compute retargeted hand joints using pre-computed index mappings + retargeted_hand_joints = np.zeros(self._num_joints, dtype=np.float32) + retargeted_hand_joints[self._left_joint_indices] = self._hands_controller.compute_left(left_hand_poses) + retargeted_hand_joints[self._right_joint_indices] = self._hands_controller.compute_right(right_hand_poses) # Convert numpy arrays to tensors and concatenate them - left_wrist_tensor = torch.tensor(left_wrist, dtype=torch.float32, device=self._sim_device) - right_wrist_tensor = torch.tensor(self._retarget_abs(right_wrist), dtype=torch.float32, device=self._sim_device) + left_wrist_transformed = self._retarget_abs(left_wrist) + right_wrist_transformed = self._retarget_abs(right_wrist) + + # Visualize IK targets (after transform) as green spheres + if self._enable_visualization: + ik_targets_pos = np.array([left_wrist_transformed[:3], right_wrist_transformed[:3]]) + ik_targets_quat = np.array([left_wrist_transformed[3:], right_wrist_transformed[3:]]) + self._ik_target_markers.visualize( + translations=torch.tensor(ik_targets_pos, device=self._sim_device), + orientations=torch.tensor(ik_targets_quat, device=self._sim_device) + ) + + left_wrist_tensor = torch.tensor(left_wrist_transformed, dtype=torch.float32, device=self._sim_device) + right_wrist_tensor = torch.tensor(right_wrist_transformed, dtype=torch.float32, device=self._sim_device) hand_joints_tensor = torch.tensor(retargeted_hand_joints, dtype=torch.float32, device=self._sim_device) # Combine all tensors into a single tensor return torch.cat([left_wrist_tensor, right_wrist_tensor, hand_joints_tensor]) def get_requirements(self) -> list[RetargeterBase.Requirement]: - return [RetargeterBase.Requirement.HAND_TRACKING] + return [RetargeterBase.Requirement.HAND_TRACKING, RetargeterBase.Requirement.BODY_TRACKING] def _retarget_abs(self, wrist: np.ndarray) -> np.ndarray: """Handle absolute pose retargeting. diff --git a/source/isaaclab/isaaclab/devices/retargeter_base.py b/source/isaaclab/isaaclab/devices/retargeter_base.py index fcd443a155b..2282eb297c8 100644 --- a/source/isaaclab/isaaclab/devices/retargeter_base.py +++ b/source/isaaclab/isaaclab/devices/retargeter_base.py @@ -43,6 +43,7 @@ class Requirement(Enum): HAND_TRACKING = "hand_tracking" HEAD_TRACKING = "head_tracking" MOTION_CONTROLLER = "motion_controller" + BODY_TRACKING = "body_tracking" @abstractmethod def retarget(self, data: Any) -> Any: diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_env_cfg.py index ba6c5d38513..0a2cd002238 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_env_cfg.py @@ -416,5 +416,17 @@ def __post_init__(self): sim_device=self.sim.device, xr_cfg=self.xr, ), + "metaquesthandtracking": OpenXRDeviceCfg( + retargeters=[ + GR1T2RetargeterCfg( + enable_visualization=True, + num_open_xr_hand_joints=2 * self.NUM_OPENXR_HAND_JOINTS, + sim_device=self.sim.device, + hand_joint_names=self.actions.upper_body_ik.hand_joint_names, + ), + ], + sim_device=self.sim.device, + xr_cfg=self.xr, + ), } ) From 533461f5cb1cbd187c7db273ed0fa8f05c4149c5 Mon Sep 17 00:00:00 2001 From: Bruk Gebregziabher Date: Mon, 19 Jan 2026 10:32:43 +0100 Subject: [PATCH 2/6] style: format code and add contribution requirements for Meta Quest Format imports and improve code alignment for Meta Quest body tracking feature. Add missing contribution requirements: - Add changelog entry for version 0.53.3 - Bump extension version to 0.53.3 - Add author name to CONTRIBUTORS.md - Add unit tests for BodyOscReceiver class --- CONTRIBUTORS.md | 1 + .../teleoperation/teleop_se3_agent.py | 11 +- scripts/tools/record_demos.py | 2 +- source/isaaclab/docs/CHANGELOG.rst | 7 + .../isaaclab/devices/openxr/osc_receiver.py | 26 +- .../humanoid/fourier/gr1t2_retargeter.py | 16 +- .../test/devices/test_osc_receiver.py | 230 ++++++++++++++++++ 7 files changed, 275 insertions(+), 18 deletions(-) create mode 100644 source/isaaclab/test/devices/test_osc_receiver.py diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 9fbfe7f1bf3..7b64b2c037d 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -52,6 +52,7 @@ Guidelines for modifications: * Bikram Pandit * Bingjie Tang * Brayden Zhang +* Bruk Gebregziabher * Brian Bingham * Brian McCann * Cameron Upright diff --git a/scripts/environments/teleoperation/teleop_se3_agent.py b/scripts/environments/teleoperation/teleop_se3_agent.py index bd516060ce6..6c79258b16a 100644 --- a/scripts/environments/teleoperation/teleop_se3_agent.py +++ b/scripts/environments/teleoperation/teleop_se3_agent.py @@ -64,7 +64,16 @@ import gymnasium as gym import torch -from isaaclab.devices import Se3Gamepad, Se3GamepadCfg, Se3Keyboard, Se3KeyboardCfg, Se3SpaceMouse, Se3SpaceMouseCfg, Se2Keyboard, Se2KeyboardCfg +from isaaclab.devices import ( + Se2Keyboard, + Se2KeyboardCfg, + Se3Gamepad, + Se3GamepadCfg, + Se3Keyboard, + Se3KeyboardCfg, + Se3SpaceMouse, + Se3SpaceMouseCfg, +) from isaaclab.devices.openxr import remove_camera_configs from isaaclab.devices.teleop_device_factory import create_teleop_device from isaaclab.envs import ManagerBasedRLEnvCfg diff --git a/scripts/tools/record_demos.py b/scripts/tools/record_demos.py index bf471234117..f1f28fbee24 100644 --- a/scripts/tools/record_demos.py +++ b/scripts/tools/record_demos.py @@ -100,7 +100,7 @@ import omni.ui as ui -from isaaclab.devices import Se3Keyboard, Se3KeyboardCfg, Se3SpaceMouse, Se3SpaceMouseCfg, Se2Keyboard, Se2KeyboardCfg +from isaaclab.devices import Se2Keyboard, Se2KeyboardCfg, Se3Keyboard, Se3KeyboardCfg, Se3SpaceMouse, Se3SpaceMouseCfg from isaaclab.devices.openxr import remove_camera_configs from isaaclab.devices.teleop_device_factory import create_teleop_device diff --git a/source/isaaclab/docs/CHANGELOG.rst b/source/isaaclab/docs/CHANGELOG.rst index 407f4698188..905d5562ac1 100644 --- a/source/isaaclab/docs/CHANGELOG.rst +++ b/source/isaaclab/docs/CHANGELOG.rst @@ -10,11 +10,18 @@ Added * Added Fabric backend support to :class:`~isaaclab.sim.views.XformPrimView` for GPU-accelerated batch transform operations on all Boundable prims using Warp kernels. * Added :mod:`~isaaclab.sim.utils.fabric_utils` module with Warp kernels for efficient Fabric matrix operations. +* Added :class:`~isaaclab.devices.openxr.BodyOscReceiver` for receiving body tracking data from Meta Quest 3 via OSC protocol. +* Added support for Meta Quest 3 body tracking in :class:`~isaaclab.devices.OpenXRDevice` with configurable OSC port via ``body_osc_port`` parameter. +* Added ``BODY`` tracking target to :class:`~isaaclab.devices.DeviceBase` for full-body teleoperation. +* Added ``BODY_TRACKING`` requirement to :class:`~isaaclab.devices.RetargeterBase` for body-aware retargeters. +* Added keyboard toggle controls (S key for teleoperation, R key for reset) to :class:`~isaaclab.devices.Se2Keyboard`. +* Added how-to guide for setting up Meta Quest 3 body tracking with ALVR and SteamVR. Changed ^^^^^^^ * Changed :class:`~isaaclab.sensors.camera.Camera` to use Fabric backend for faster pose queries. +* Improved :class:`~isaaclab.devices.humanoid.GR1T2Retargeter` with pre-computed joint index mappings for better performance. 0.53.2 (2026-01-14) diff --git a/source/isaaclab/isaaclab/devices/openxr/osc_receiver.py b/source/isaaclab/isaaclab/devices/openxr/osc_receiver.py index fa3172be3b6..40b13c8196a 100644 --- a/source/isaaclab/isaaclab/devices/openxr/osc_receiver.py +++ b/source/isaaclab/isaaclab/devices/openxr/osc_receiver.py @@ -29,6 +29,7 @@ NUM_BODY_JOINTS: int = len(BODY_JOINT_NAMES) DOF_PER_JOINT: int = 7 # 3 position + 4 rotation (quaternion) + def _normalize(v: np.ndarray, eps: float = 1e-6) -> np.ndarray: """Normalize a vector, returning unchanged if norm is below epsilon.""" norm = np.linalg.norm(v) @@ -115,14 +116,14 @@ class BodyOscReceiver: # Mapping of joint pairs for heuristic rotation computation: (source, target) _ROTATION_PAIRS: tuple[tuple[str, str], ...] = ( - ("hip", "chest"), # Hip forward: hip -> chest - ("chest", "head"), # Chest forward: chest -> head - ("chest", "head"), # Head forward: same as chest - ("hip", "left_foot"), # Left foot forward: hip -> left_foot + ("hip", "chest"), # Hip forward: hip -> chest + ("chest", "head"), # Chest forward: chest -> head + ("chest", "head"), # Head forward: same as chest + ("hip", "left_foot"), # Left foot forward: hip -> left_foot ("hip", "right_foot"), # Right foot forward: hip -> right_foot - ("hip", "left_knee"), # Left knee forward: hip -> left_knee + ("hip", "left_knee"), # Left knee forward: hip -> left_knee ("hip", "right_knee"), # Right knee forward: hip -> right_knee - ("chest", "left_elbow"), # Left elbow forward: chest -> left_elbow + ("chest", "left_elbow"), # Left elbow forward: chest -> left_elbow ("chest", "right_elbow"), # Right elbow forward: chest -> right_elbow ) @@ -186,10 +187,15 @@ def _recompute_rotations_locked(self) -> None: # Joint rotation targets based on _ROTATION_PAIRS order target_joints = ( - "hip", "chest", "head", - "left_foot", "right_foot", - "left_knee", "right_knee", - "left_elbow", "right_elbow", + "hip", + "chest", + "head", + "left_foot", + "right_foot", + "left_knee", + "right_knee", + "left_elbow", + "right_elbow", ) for target_joint, (source, dest) in zip(target_joints, self._ROTATION_PAIRS): diff --git a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/gr1t2_retargeter.py b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/gr1t2_retargeter.py index 3970e07a80c..caad7225046 100644 --- a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/gr1t2_retargeter.py +++ b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/gr1t2_retargeter.py @@ -79,7 +79,7 @@ def __init__( usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/UIElements/frame_prim.usd", scale=(0.01, 0.01, 0.01), ), - } + }, ) # Green spheres for IK targets (after transform) ik_target_marker_cfg = VisualizationMarkersCfg( @@ -134,11 +134,15 @@ def retarget(self, data: dict) -> torch.Tensor: body_joints_position = np.array([pose[:3] for pose in body_poses.values()]) body_joints_orientation = np.array([pose[3:] for pose in body_poses.values()]) - self._markers.visualize(translations=torch.tensor(joints_position, device=self._sim_device), - orientations=torch.tensor(joints_orientation, device=self._sim_device)) + self._markers.visualize( + translations=torch.tensor(joints_position, device=self._sim_device), + orientations=torch.tensor(joints_orientation, device=self._sim_device), + ) - self._sphere_markers.visualize(translations=torch.tensor(body_joints_position, device=self._sim_device), - orientations=torch.tensor(body_joints_orientation, device=self._sim_device)) + self._sphere_markers.visualize( + translations=torch.tensor(body_joints_position, device=self._sim_device), + orientations=torch.tensor(body_joints_orientation, device=self._sim_device), + ) # Compute retargeted hand joints using pre-computed index mappings retargeted_hand_joints = np.zeros(self._num_joints, dtype=np.float32) @@ -155,7 +159,7 @@ def retarget(self, data: dict) -> torch.Tensor: ik_targets_quat = np.array([left_wrist_transformed[3:], right_wrist_transformed[3:]]) self._ik_target_markers.visualize( translations=torch.tensor(ik_targets_pos, device=self._sim_device), - orientations=torch.tensor(ik_targets_quat, device=self._sim_device) + orientations=torch.tensor(ik_targets_quat, device=self._sim_device), ) left_wrist_tensor = torch.tensor(left_wrist_transformed, dtype=torch.float32, device=self._sim_device) diff --git a/source/isaaclab/test/devices/test_osc_receiver.py b/source/isaaclab/test/devices/test_osc_receiver.py new file mode 100644 index 00000000000..89a8026df6c --- /dev/null +++ b/source/isaaclab/test/devices/test_osc_receiver.py @@ -0,0 +1,230 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Launch Isaac Sim Simulator first.""" + +from __future__ import annotations + +from isaaclab.app import AppLauncher + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +# Rest everything follows. + +import numpy as np +import pytest + +from isaaclab.devices.openxr.osc_receiver import ( + BODY_JOINT_NAMES, + DOF_PER_JOINT, + NUM_BODY_JOINTS, + BodyOscReceiver, + _normalize, + _quat_from_forward_up, + _rotation_matrix_to_quat, +) + + +class TestUtilityFunctions: + """Tests for utility functions in osc_receiver module.""" + + def test_normalize_nonzero(self): + """Test normalization of a non-zero vector.""" + v = np.array([3.0, 4.0, 0.0]) + result = _normalize(v) + expected = np.array([0.6, 0.8, 0.0]) + np.testing.assert_array_almost_equal(result, expected) + + def test_normalize_zero(self): + """Test normalization of a zero vector returns unchanged.""" + v = np.array([0.0, 0.0, 0.0]) + result = _normalize(v) + np.testing.assert_array_almost_equal(result, v) + + def test_normalize_small(self): + """Test normalization of a very small vector returns unchanged.""" + v = np.array([1e-8, 1e-8, 1e-8]) + result = _normalize(v) + np.testing.assert_array_almost_equal(result, v) + + def test_rotation_matrix_to_quat_identity(self): + """Test identity rotation matrix gives identity quaternion.""" + R = np.eye(3) + quat = _rotation_matrix_to_quat(R) + # Identity quaternion is [0, 0, 0, 1] + np.testing.assert_array_almost_equal(quat, [0.0, 0.0, 0.0, 1.0], decimal=5) + + def test_rotation_matrix_to_quat_90_z(self): + """Test 90 degree rotation around Z axis.""" + R = np.array([[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) + quat = _rotation_matrix_to_quat(R) + # 90 degree rotation around Z: [0, 0, sin(45), cos(45)] = [0, 0, 0.707, 0.707] + expected = np.array([0.0, 0.0, np.sin(np.pi / 4), np.cos(np.pi / 4)]) + np.testing.assert_array_almost_equal(np.abs(quat), np.abs(expected), decimal=3) + + def test_quat_from_forward_up_x_forward(self): + """Test quaternion from forward pointing in +X direction.""" + forward = np.array([1.0, 0.0, 0.0]) + quat = _quat_from_forward_up(forward) + # With +X forward and +Z up, this should be close to identity + # Check that the quaternion is normalized + np.testing.assert_almost_equal(np.linalg.norm(quat), 1.0) + + def test_quat_from_forward_up_zero_forward(self): + """Test quaternion from zero forward returns identity.""" + forward = np.array([0.0, 0.0, 0.0]) + quat = _quat_from_forward_up(forward) + np.testing.assert_array_almost_equal(quat, [0.0, 0.0, 0.0, 1.0]) + + +class TestBodyOscReceiverConstants: + """Tests for OSC receiver constants.""" + + def test_body_joint_names_count(self): + """Test that NUM_BODY_JOINTS matches BODY_JOINT_NAMES length.""" + assert len(BODY_JOINT_NAMES) == NUM_BODY_JOINTS + + def test_dof_per_joint(self): + """Test DOF per joint is 7 (pos + quat).""" + assert DOF_PER_JOINT == 7 + + def test_expected_joints(self): + """Test expected joints are present.""" + expected = { + "head", + "hip", + "chest", + "left_foot", + "right_foot", + "left_knee", + "right_knee", + "left_elbow", + "right_elbow", + } + assert set(BODY_JOINT_NAMES) == expected + + +class TestBodyOscReceiver: + """Tests for BodyOscReceiver class.""" + + @pytest.fixture + def receiver(self): + """Create a BodyOscReceiver instance for testing. + + Uses a non-standard port to avoid conflicts. + """ + receiver = BodyOscReceiver(ip="127.0.0.1", port=19000) + yield receiver + # Cleanup: stop the server + receiver._server.shutdown() + + def test_initialization(self, receiver): + """Test receiver initializes with correct data shape.""" + data = receiver.get_matrix() + assert data.shape == (NUM_BODY_JOINTS, DOF_PER_JOINT) + + def test_initial_positions_zero(self, receiver): + """Test initial positions are zero.""" + data = receiver.get_matrix() + positions = data[:, :3] + np.testing.assert_array_equal(positions, np.zeros((NUM_BODY_JOINTS, 3))) + + def test_initial_quaternions_identity(self, receiver): + """Test initial quaternions are identity.""" + data = receiver.get_matrix() + quats = data[:, 3:] + expected = np.tile([0.0, 0.0, 0.0, 1.0], (NUM_BODY_JOINTS, 1)) + np.testing.assert_array_equal(quats, expected) + + def test_get_flat_shape(self, receiver): + """Test get_flat returns correct shape.""" + flat = receiver.get_flat() + assert flat.shape == (NUM_BODY_JOINTS * DOF_PER_JOINT,) + + def test_get_position_valid_joint(self, receiver): + """Test get_position for valid joint returns correct shape.""" + pos = receiver.get_position("head") + assert pos.shape == (3,) + + def test_get_position_invalid_joint(self, receiver): + """Test get_position raises for invalid joint.""" + with pytest.raises(ValueError, match="Unknown joint name"): + receiver.get_position("invalid_joint") + + def test_get_pose_valid_joint(self, receiver): + """Test get_pose for valid joint returns correct shape.""" + pose = receiver.get_pose("hip") + assert pose.shape == (7,) + + def test_get_pose_invalid_joint(self, receiver): + """Test get_pose raises for invalid joint.""" + with pytest.raises(ValueError, match="Unknown joint name"): + receiver.get_pose("invalid_joint") + + def test_on_position_updates_data(self, receiver): + """Test _on_position updates internal data correctly.""" + # Simulate receiving position data for head (index 0) + receiver._on_position("/tracking/trackers/head/position", 1.0, 2.0, 3.0) + + # Note: coordinate swizzle (x, z, y) -> (x, y, z) + # So input (1.0, 2.0, 3.0) becomes (1.0, 3.0, 2.0) + pos = receiver.get_position("head") + np.testing.assert_array_almost_equal(pos, [1.0, 3.0, 2.0]) + + def test_on_position_by_index(self, receiver): + """Test _on_position works with numeric indices.""" + # Index 1 is "hip" + receiver._on_position("/tracking/trackers/1/position", 5.0, 6.0, 7.0) + + pos = receiver.get_position("hip") + np.testing.assert_array_almost_equal(pos, [5.0, 7.0, 6.0]) + + def test_on_position_invalid_index_ignored(self, receiver): + """Test _on_position ignores invalid indices.""" + initial_data = receiver.get_matrix().copy() + receiver._on_position("/tracking/trackers/999/position", 1.0, 2.0, 3.0) + np.testing.assert_array_equal(receiver.get_matrix(), initial_data) + + def test_on_position_insufficient_args(self, receiver): + """Test _on_position ignores messages with insufficient args.""" + initial_data = receiver.get_matrix().copy() + receiver._on_position("/tracking/trackers/head/position", 1.0, 2.0) # Only 2 args + np.testing.assert_array_equal(receiver.get_matrix(), initial_data) + + def test_recompute_rotations(self, receiver): + """Test recompute_rotations updates quaternions.""" + # Set some positions + receiver._on_position("/tracking/trackers/hip/position", 0.0, 0.0, 0.0) + receiver._on_position("/tracking/trackers/chest/position", 0.0, 1.0, 0.0) + + # Recompute rotations + receiver.recompute_rotations() + + # Quaternion should be updated and normalized + new_quat = receiver.get_pose("hip")[3:] + + # Since chest is above hip (forward direction), quaternion should be computed + # The exact value depends on the heuristic, but it should be normalized + assert new_quat.shape == (4,) + np.testing.assert_almost_equal(np.linalg.norm(new_quat), 1.0) + + def test_thread_safety_get_matrix(self, receiver): + """Test get_matrix returns a copy (thread safe).""" + data1 = receiver.get_matrix() + data1[0, 0] = 999.0 + data2 = receiver.get_matrix() + assert data2[0, 0] != 999.0 + + def test_thread_safety_get_flat(self, receiver): + """Test get_flat returns a copy (thread safe).""" + flat1 = receiver.get_flat() + flat1[0] = 999.0 + flat2 = receiver.get_flat() + assert flat2[0] != 999.0 + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) From 73335f1594a72286275325bf833fc562aea075bd Mon Sep 17 00:00:00 2001 From: Bruk Gebregziabher Date: Wed, 21 Jan 2026 13:01:20 +0100 Subject: [PATCH 3/6] refactor: remove duplicate metaquesthandtracking device config --- .../pick_place/pickplace_gr1t2_env_cfg.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_env_cfg.py index 0a2cd002238..ba6c5d38513 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_env_cfg.py @@ -416,17 +416,5 @@ def __post_init__(self): sim_device=self.sim.device, xr_cfg=self.xr, ), - "metaquesthandtracking": OpenXRDeviceCfg( - retargeters=[ - GR1T2RetargeterCfg( - enable_visualization=True, - num_open_xr_hand_joints=2 * self.NUM_OPENXR_HAND_JOINTS, - sim_device=self.sim.device, - hand_joint_names=self.actions.upper_body_ik.hand_joint_names, - ), - ], - sim_device=self.sim.device, - xr_cfg=self.xr, - ), } ) From 48925b2f13262c48ac1e729b1ededa3cd343bcfb Mon Sep 17 00:00:00 2001 From: Bruk Gebregziabher Date: Wed, 21 Jan 2026 15:02:45 +0100 Subject: [PATCH 4/6] fix: prevent wrist pose mutation and remove head tracker skip - Add .copy() to wrist poses before modification to avoid mutating original data - Remove conditional skip for head tracker in body tracker loop - Relocate head tracking comment outside loop for clarity --- source/isaaclab/isaaclab/devices/openxr/openxr_device.py | 4 +--- .../openxr/retargeters/humanoid/fourier/gr1t2_retargeter.py | 2 ++ 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/source/isaaclab/isaaclab/devices/openxr/openxr_device.py b/source/isaaclab/isaaclab/devices/openxr/openxr_device.py index 817f4c526f5..1c3e5c3bdf3 100644 --- a/source/isaaclab/isaaclab/devices/openxr/openxr_device.py +++ b/source/isaaclab/isaaclab/devices/openxr/openxr_device.py @@ -394,10 +394,8 @@ def _calculate_body_trackers(self) -> dict[str, np.ndarray]: self._osc_receiver = BodyOscReceiver(port=self._body_osc_port) logger.info(f"Initialized body OSC receiver on port {self._body_osc_port}") + # Head is tracked via OpenXR, not OSC for tracker_name in BODY_TRACKER_NAMES: - if tracker_name == "head": - # Head is tracked via OpenXR, not OSC - continue try: tracker_pose = self._osc_receiver.get_pose(tracker_name) position = tracker_pose[:3] diff --git a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/gr1t2_retargeter.py b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/gr1t2_retargeter.py index caad7225046..41907f1b470 100644 --- a/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/gr1t2_retargeter.py +++ b/source/isaaclab/isaaclab/devices/openxr/retargeters/humanoid/fourier/gr1t2_retargeter.py @@ -119,6 +119,8 @@ def retarget(self, data: dict) -> torch.Tensor: left_palm = left_hand_poses.get("palm") right_palm = right_hand_poses.get("palm") + left_wrist = left_wrist.copy() + right_wrist = right_wrist.copy() left_wrist[3:] = left_palm[3:] right_wrist[3:] = right_palm[3:] From 2f3ad5fefea03bc8c0bfb981b10ebb0221aa68a4 Mon Sep 17 00:00:00 2001 From: Bruk Gebregziabher Date: Wed, 21 Jan 2026 15:09:44 +0100 Subject: [PATCH 5/6] fix: address PR review feedback - Only create separate keyboard listener for non-keyboard teleop devices - Add shutdown method to BodyOscReceiver for proper resource cleanup --- scripts/environments/teleoperation/teleop_se3_agent.py | 9 ++++++--- .../isaaclab/isaaclab/devices/openxr/osc_receiver.py | 10 ++++++++++ source/isaaclab/test/devices/test_osc_receiver.py | 2 +- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/scripts/environments/teleoperation/teleop_se3_agent.py b/scripts/environments/teleoperation/teleop_se3_agent.py index 6c79258b16a..b121222e93f 100644 --- a/scripts/environments/teleoperation/teleop_se3_agent.py +++ b/scripts/environments/teleoperation/teleop_se3_agent.py @@ -258,9 +258,12 @@ def toggle_teleoperation() -> None: print("Teleoperation started. Press 'R' to reset the environment.") - keyboard = Se2Keyboard(Se2KeyboardCfg()) - keyboard.add_callback("R", reset_recording_instance) - keyboard.add_callback("S", toggle_teleoperation) + # Create separate keyboard listener for non-keyboard teleop devices (e.g., handtracking, spacemouse) + # to allow R/S key controls without conflicting with the main teleop interface + if not isinstance(teleop_interface, (Se3Keyboard, Se3Gamepad)): + keyboard = Se2Keyboard(Se2KeyboardCfg()) + keyboard.add_callback("R", reset_recording_instance) + keyboard.add_callback("S", toggle_teleoperation) # simulate environment while simulation_app.is_running(): diff --git a/source/isaaclab/isaaclab/devices/openxr/osc_receiver.py b/source/isaaclab/isaaclab/devices/openxr/osc_receiver.py index 40b13c8196a..abd90b7fbe9 100644 --- a/source/isaaclab/isaaclab/devices/openxr/osc_receiver.py +++ b/source/isaaclab/isaaclab/devices/openxr/osc_receiver.py @@ -146,6 +146,16 @@ def __init__(self, ip: str = "127.0.0.1", port: int = 9000): self._thread = threading.Thread(target=self._server.serve_forever, daemon=True) self._thread.start() + def shutdown(self) -> None: + """Stop the OSC server and clean up resources.""" + if self._server is not None: + self._server.shutdown() + self._server = None + + def __del__(self): + """Clean up resources when the object is destroyed.""" + self.shutdown() + def _on_position(self, addr: str, *args) -> None: """Handle incoming OSC position messages. diff --git a/source/isaaclab/test/devices/test_osc_receiver.py b/source/isaaclab/test/devices/test_osc_receiver.py index 89a8026df6c..c9c7f3537b8 100644 --- a/source/isaaclab/test/devices/test_osc_receiver.py +++ b/source/isaaclab/test/devices/test_osc_receiver.py @@ -119,7 +119,7 @@ def receiver(self): receiver = BodyOscReceiver(ip="127.0.0.1", port=19000) yield receiver # Cleanup: stop the server - receiver._server.shutdown() + receiver.shutdown() def test_initialization(self, receiver): """Test receiver initializes with correct data shape.""" From eab75f72af382fcd7fe34f5ed6148de897305fae Mon Sep 17 00:00:00 2001 From: Bruk Gebregziabher Date: Mon, 26 Jan 2026 11:09:33 +0100 Subject: [PATCH 6/6] chore: bump version to 0.54.1 for Meta Quest body tracking feature --- source/isaaclab/config/extension.toml | 2 +- source/isaaclab/docs/CHANGELOG.rst | 22 +++++++++++++++++----- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/source/isaaclab/config/extension.toml b/source/isaaclab/config/extension.toml index bb7a3965e34..46eae5bd547 100644 --- a/source/isaaclab/config/extension.toml +++ b/source/isaaclab/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "0.54.0" +version = "0.54.1" # Description title = "Isaac Lab framework for Robot Learning" diff --git a/source/isaaclab/docs/CHANGELOG.rst b/source/isaaclab/docs/CHANGELOG.rst index 905d5562ac1..16dc74be2a0 100644 --- a/source/isaaclab/docs/CHANGELOG.rst +++ b/source/isaaclab/docs/CHANGELOG.rst @@ -1,15 +1,12 @@ Changelog --------- -0.54.0 (2026-01-13) +0.54.1 (2026-01-26) ~~~~~~~~~~~~~~~~~~~ Added ^^^^^ -* Added Fabric backend support to :class:`~isaaclab.sim.views.XformPrimView` for GPU-accelerated - batch transform operations on all Boundable prims using Warp kernels. -* Added :mod:`~isaaclab.sim.utils.fabric_utils` module with Warp kernels for efficient Fabric matrix operations. * Added :class:`~isaaclab.devices.openxr.BodyOscReceiver` for receiving body tracking data from Meta Quest 3 via OSC protocol. * Added support for Meta Quest 3 body tracking in :class:`~isaaclab.devices.OpenXRDevice` with configurable OSC port via ``body_osc_port`` parameter. * Added ``BODY`` tracking target to :class:`~isaaclab.devices.DeviceBase` for full-body teleoperation. @@ -20,10 +17,25 @@ Added Changed ^^^^^^^ -* Changed :class:`~isaaclab.sensors.camera.Camera` to use Fabric backend for faster pose queries. * Improved :class:`~isaaclab.devices.humanoid.GR1T2Retargeter` with pre-computed joint index mappings for better performance. +0.54.0 (2026-01-13) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added Fabric backend support to :class:`~isaaclab.sim.views.XformPrimView` for GPU-accelerated + batch transform operations on all Boundable prims using Warp kernels. +* Added :mod:`~isaaclab.sim.utils.fabric_utils` module with Warp kernels for efficient Fabric matrix operations. + +Changed +^^^^^^^ + +* Changed :class:`~isaaclab.sensors.camera.Camera` to use Fabric backend for faster pose queries. + + 0.53.2 (2026-01-14) ~~~~~~~~~~~~~~~~~~~