diff --git a/docs/source/migration/migrating_to_isaaclab_3-0.rst b/docs/source/migration/migrating_to_isaaclab_3-0.rst index 45a992bda47..d7f6d42bac9 100644 --- a/docs/source/migration/migrating_to_isaaclab_3-0.rst +++ b/docs/source/migration/migrating_to_isaaclab_3-0.rst @@ -53,6 +53,57 @@ The following asset classes remain in the ``isaaclab`` package and can still be These classes now inherit from new abstract base classes but maintain full backward compatibility. +The following sensor classes also remain in the ``isaaclab`` package with unchanged imports: + +- :class:`~isaaclab.sensors.ContactSensor`, :class:`~isaaclab.sensors.ContactSensorCfg`, :class:`~isaaclab.sensors.ContactSensorData` +- :class:`~isaaclab.sensors.Imu`, :class:`~isaaclab.sensors.ImuCfg`, :class:`~isaaclab.sensors.ImuData` +- :class:`~isaaclab.sensors.FrameTransformer`, :class:`~isaaclab.sensors.FrameTransformerCfg`, :class:`~isaaclab.sensors.FrameTransformerData` + +These sensor classes now use factory patterns that automatically instantiate the appropriate backend +implementation (PhysX by default), maintaining full backward compatibility. + +If you need to import the PhysX sensor implementations directly (e.g., for type hints or subclassing), +you can import from ``isaaclab_physx.sensors``: + +.. code-block:: python + + # Direct PhysX implementation imports + from isaaclab_physx.sensors import ContactSensor, ContactSensorData + from isaaclab_physx.sensors import Imu, ImuData + from isaaclab_physx.sensors import FrameTransformer, FrameTransformerData + + +Sensor Pose Properties Deprecation +---------------------------------- + +The ``pose_w``, ``pos_w``, and ``quat_w`` properties on :class:`~isaaclab.sensors.ContactSensorData` +and :class:`~isaaclab.sensors.ImuData` are deprecated and will be removed in a future release. + +If you need to track sensor poses in world frame, please use a dedicated sensor such as +:class:`~isaaclab.sensors.FrameTransformer` instead. + +**Before (deprecated):** + +.. code-block:: python + + # Using pose properties directly on sensor data + sensor_pos = contact_sensor.data.pos_w + sensor_quat = contact_sensor.data.quat_w + +**After (recommended):** + +.. code-block:: python + + # Use FrameTransformer to track sensor pose + frame_transformer = FrameTransformer(FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + target_frames=[ + FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Robot/sensor_link"), + ], + )) + sensor_pos = frame_transformer.data.target_pos_w + sensor_quat = frame_transformer.data.target_quat_w + ``root_physx_view`` Deprecation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/source/isaaclab/config/extension.toml b/source/isaaclab/config/extension.toml index 02b727fd663..f56b9f1a68a 100644 --- a/source/isaaclab/config/extension.toml +++ b/source/isaaclab/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "2.0.0" +version = "2.1.0" # Description title = "Isaac Lab framework for Robot Learning" diff --git a/source/isaaclab/docs/CHANGELOG.rst b/source/isaaclab/docs/CHANGELOG.rst index 1d36fa3d7e2..3b59fa50e19 100644 --- a/source/isaaclab/docs/CHANGELOG.rst +++ b/source/isaaclab/docs/CHANGELOG.rst @@ -1,6 +1,40 @@ Changelog --------- +2.1.0 (2026-02-02) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~isaaclab.sensors.contact_sensor.BaseContactSensor` and + :class:`~isaaclab.sensors.contact_sensor.BaseContactSensorData` abstract base classes that define + the interface for contact sensors. These classes provide a backend-agnostic API for contact sensing. +* Added :class:`~isaaclab.sensors.imu.BaseImu` and :class:`~isaaclab.sensors.imu.BaseImuData` abstract + base classes that define the interface for IMU sensors. These classes provide a backend-agnostic + API for inertial measurement. +* Added :class:`~isaaclab.sensors.frame_transformer.BaseFrameTransformer` and + :class:`~isaaclab.sensors.frame_transformer.BaseFrameTransformerData` abstract base classes that + define the interface for frame transformer sensors. These classes provide a backend-agnostic API + for coordinate frame transformations. + +Changed +^^^^^^^ + +* Refactored the sensor classes (:class:`~isaaclab.sensors.ContactSensor`, + :class:`~isaaclab.sensors.Imu`, :class:`~isaaclab.sensors.FrameTransformer`) to follow the + multi-backend architecture. The classes now act as factory wrappers that instantiate the + appropriate backend-specific implementation (PhysX by default). +* Refactored the sensor data classes (:class:`~isaaclab.sensors.ContactSensorData`, + :class:`~isaaclab.sensors.ImuData`, :class:`~isaaclab.sensors.FrameTransformerData`) to use the + factory pattern for backend-specific instantiation. +* Moved PhysX-specific sensor tests to the ``isaaclab_physx`` package: + + * ``test_contact_sensor.py`` → ``isaaclab_physx/test/sensors/`` + * ``test_imu.py`` → ``isaaclab_physx/test/sensors/`` + * ``test_frame_transformer.py`` → ``isaaclab_physx/test/sensors/`` + + 2.0.0 (2026-01-30) ~~~~~~~~~~~~~~~~~~ diff --git a/source/isaaclab/isaaclab/assets/articulation/base_articulation.py b/source/isaaclab/isaaclab/assets/articulation/base_articulation.py index a614705ed3d..6d75d2d32cb 100644 --- a/source/isaaclab/isaaclab/assets/articulation/base_articulation.py +++ b/source/isaaclab/isaaclab/assets/articulation/base_articulation.py @@ -166,7 +166,7 @@ def body_names(self) -> list[str]: def root_view(self): """Root view for the asset. - Note: + .. note:: Use this view with caution. It requires handling of tensors in a specific way. """ raise NotImplementedError() @@ -203,7 +203,7 @@ def write_data_to_sim(self) -> None: If any explicit actuators are present, then the actuator models are used to compute the joint commands. Otherwise, the joint commands are directly set into the simulation. - Note: + .. note:: We write external wrench to the simulation here since this function is called before the simulation step. This ensures that the external wrench is applied at every simulation step. """ @@ -1107,7 +1107,7 @@ def _apply_actuator_model(self) -> None: def _validate_cfg(self) -> None: """Validate the configuration after processing. - Note: + .. note:: This function should be called only after the configuration has been processed and the buffers have been created. Otherwise, some settings that are altered during processing may not be validated. For instance, the actuator models may change the joint max velocity limits. @@ -1118,7 +1118,8 @@ def _validate_cfg(self) -> None: def _log_articulation_info(self) -> None: """Log information about the articulation. - Note: We purposefully read the values from the simulator to ensure that the values are configured as expected. + .. note:: + We purposefully read the values from the simulator to ensure that the values are configured as expected. """ raise NotImplementedError() diff --git a/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py b/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py index 323692df243..5e306e1180f 100644 --- a/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py +++ b/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py @@ -590,7 +590,7 @@ def projected_gravity_b(self) -> torch.Tensor: def heading_w(self) -> torch.Tensor: """Yaw heading of the base frame (in radians). Shape is (num_instances,). - Note: + .. note:: This quantity is computed by assuming that the forward-direction of the base frame is along x-direction, i.e. :math:`(1, 0, 0)`. """ diff --git a/source/isaaclab/isaaclab/assets/asset_base.py b/source/isaaclab/isaaclab/assets/asset_base.py index 158b6993351..8a7fecedbb8 100644 --- a/source/isaaclab/isaaclab/assets/asset_base.py +++ b/source/isaaclab/isaaclab/assets/asset_base.py @@ -155,7 +155,7 @@ def set_visibility(self, visible: bool, env_ids: Sequence[int] | None = None): It is useful for toggling the visibility of the asset in the simulator. For instance, one can hide the asset when it is not being used to reduce the rendering overhead. - Note: + .. note:: This operation uses the PXR API to set the visibility of the prims. Thus, the operation may have an overhead if the number of prims is large. @@ -304,7 +304,7 @@ def safe_callback(callback_name, event, obj_ref): def _initialize_callback(self, event): """Initializes the scene elements. - Note: + .. note:: PhysX handles are only enabled once the simulator starts playing. Hence, this function needs to be called whenever the simulator "plays" from a "stop" state. """ @@ -334,7 +334,7 @@ def _on_prim_deletion(self, prim_path: str) -> None: Args: prim_path: The path to the prim that is being deleted. - Note: + .. note:: This function is called when the prim is deleted. """ if prim_path == "/": diff --git a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object.py b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object.py index 7308bceea92..34ca3c4ad7e 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object.py +++ b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object.py @@ -89,7 +89,7 @@ def body_names(self) -> list[str]: def root_view(self): """Root view for the asset. - Note: + .. note:: Use this view with caution. It requires handling of tensors in a specific way. """ raise NotImplementedError() @@ -123,7 +123,7 @@ def reset(self, env_ids: Sequence[int] | None = None) -> None: def write_data_to_sim(self) -> None: """Write external wrench to the simulation. - Note: + .. note:: We write external wrench to the simulation here since this function is called before the simulation step. This ensures that the external wrench is applied at every simulation step. """ diff --git a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py index 4d8a16151a6..849e648de57 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py +++ b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py @@ -288,7 +288,7 @@ def projected_gravity_b(self) -> torch.Tensor: def heading_w(self) -> torch.Tensor: """Yaw heading of the base frame (in radians). Shape is (num_instances,). - Note: + .. note:: This quantity is computed by assuming that the forward-direction of the base frame is along x-direction, i.e. :math:`(1, 0, 0)`. """ diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py index 42f504070e8..e7814154fbd 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py @@ -87,7 +87,7 @@ def body_names(self) -> list[str]: def root_view(self): """Root view for the rigid object collection. - Note: + .. note:: Use this view with caution. It requires handling of tensors in a specific way. """ raise NotImplementedError() @@ -122,7 +122,7 @@ def reset(self, env_ids: Sequence[int] | None = None, object_ids: slice | torch. def write_data_to_sim(self) -> None: """Write external wrench to the simulation. - Note: + .. note:: We write external wrench to the simulation here since this function is called before the simulation step. This ensures that the external wrench is applied at every simulation step. """ diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py index 8f1ed6f6f4a..5ecad670dbb 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py @@ -217,7 +217,7 @@ def projected_gravity_b(self) -> torch.Tensor: def heading_w(self) -> torch.Tensor: """Yaw heading of the base frame (in radians). Shape is (num_instances, num_bodies). - Note: + .. note:: This quantity is computed by assuming that the forward-direction of the base frame is along x-direction, i.e. :math:`(1, 0, 0)`. """ diff --git a/source/isaaclab/isaaclab/sensors/camera/camera.py b/source/isaaclab/isaaclab/sensors/camera/camera.py index e651567fb8f..492806a4ca9 100644 --- a/source/isaaclab/isaaclab/sensors/camera/camera.py +++ b/source/isaaclab/isaaclab/sensors/camera/camera.py @@ -577,7 +577,7 @@ def _update_intrinsic_matrices(self, env_ids: Sequence[int]): Also called calibration matrix. This matrix works for linear depth images. We assume square pixels. - Note: + .. note:: The calibration matrix projects points in the 3D scene onto an imaginary screen of the camera. The coordinates of points on the image plane are in the homogeneous representation. """ diff --git a/source/isaaclab/isaaclab/sensors/camera/camera_cfg.py b/source/isaaclab/isaaclab/sensors/camera/camera_cfg.py index 66dcaf7809b..ffe2bce7350 100644 --- a/source/isaaclab/isaaclab/sensors/camera/camera_cfg.py +++ b/source/isaaclab/isaaclab/sensors/camera/camera_cfg.py @@ -42,7 +42,7 @@ class OffsetCfg: offset: OffsetCfg = OffsetCfg() """The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity. - Note: + .. note:: The parent frame is the frame the sensor attaches to. For example, the parent frame of a camera at path ``/World/envs/env_0/Robot/Camera`` is ``/World/envs/env_0/Robot``. """ diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/__init__.py b/source/isaaclab/isaaclab/sensors/contact_sensor/__init__.py index 94b402d41a3..5f2d3df68a0 100644 --- a/source/isaaclab/isaaclab/sensors/contact_sensor/__init__.py +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/__init__.py @@ -5,6 +5,10 @@ """Sub-module for rigid contact sensor.""" +from .base_contact_sensor import BaseContactSensor +from .base_contact_sensor_data import BaseContactSensorData from .contact_sensor import ContactSensor from .contact_sensor_cfg import ContactSensorCfg from .contact_sensor_data import ContactSensorData + +__all__ = ["BaseContactSensor", "BaseContactSensorData", "ContactSensor", "ContactSensorCfg", "ContactSensorData"] diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor.py b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor.py new file mode 100644 index 00000000000..673eeb3b208 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor.py @@ -0,0 +1,203 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from abc import abstractmethod +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch + +import isaaclab.utils.string as string_utils + +from ..sensor_base import SensorBase +from .base_contact_sensor_data import BaseContactSensorData + +if TYPE_CHECKING: + from .contact_sensor_cfg import ContactSensorCfg + + +class BaseContactSensor(SensorBase): + """A contact reporting sensor. + + The contact sensor reports the normal contact forces on a rigid body in the world frame. + It relies on the physics engine's contact reporting API to be activated on the rigid bodies. + + To enable the contact reporter on a rigid body, please make sure to enable the + :attr:`isaaclab.sim.spawner.RigidObjectSpawnerCfg.activate_contact_sensors` on your + asset spawner configuration. This will enable the contact reporter on all the rigid bodies + in the asset. + + The sensor can be configured to report the contact forces on a set of bodies with a given + filter pattern using the :attr:`ContactSensorCfg.filter_prim_paths_expr`. This is useful + when you want to report the contact forces between the sensor bodies and a specific set of + bodies in the scene. The data can be accessed using the :attr:`ContactSensorData.force_matrix_w`. + + The reporting of the filtered contact forces is only possible as one-to-many. This means that only one + sensor body in an environment can be filtered against multiple bodies in that environment. If you need to + filter multiple sensor bodies against multiple bodies, you need to create separate sensors for each sensor + body. + + As an example, suppose you want to report the contact forces for all the feet of a robot against an object + exclusively. In that case, setting the :attr:`ContactSensorCfg.prim_path` and + :attr:`ContactSensorCfg.filter_prim_paths_expr` with ``{ENV_REGEX_NS}/Robot/.*_FOOT`` and ``{ENV_REGEX_NS}/Object`` + respectively will not work. Instead, you need to create a separate sensor for each foot and filter + it against the object. + """ + + cfg: ContactSensorCfg + """The configuration parameters.""" + + __backend_name__: str = "base" + """The name of the backend for the contact sensor.""" + + def __init__(self, cfg: ContactSensorCfg): + """Initializes the contact sensor object. + + Args: + cfg: The configuration parameters. + """ + # initialize base class + super().__init__(cfg) + + """ + Properties + """ + + @property + @abstractmethod + def data(self) -> BaseContactSensorData: + raise NotImplementedError + + @property + @abstractmethod + def num_bodies(self) -> int: + """Number of bodies with contact sensors attached.""" + raise NotImplementedError + + @property + @abstractmethod + def body_names(self) -> list[str]: + """Ordered names of bodies with contact sensors attached.""" + raise NotImplementedError + + @property + @abstractmethod + def contact_view(self): + """View for the contact reporting. + + .. note:: + None if there is no view associated with the sensor. + """ + raise NotImplementedError + + """ + Operations + """ + + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + """Find bodies in the articulation based on the name keys. + + Args: + name_keys: A regular expression or a list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the body indices and names. + """ + return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) + + def compute_first_contact(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor: + """Checks if bodies that have established contact within the last :attr:`dt` seconds. + + This function checks if the bodies have established contact within the last :attr:`dt` seconds + by comparing the current contact time with the given time period. If the contact time is less + than the given time period, then the bodies are considered to be in contact. + + .. note:: + The function assumes that :attr:`dt` is a factor of the sensor update time-step. In other + words :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true + if the sensor is updated by the physics or the environment stepping time-step and the sensor + is read by the environment stepping time-step. + + Args: + dt: The time period since the contact was established. + abs_tol: The absolute tolerance for the comparison. + + Returns: + A boolean tensor indicating the bodies that have established contact within the last + :attr:`dt` seconds. Shape is (N, B), where N is the number of sensors and B is the + number of bodies in each sensor. + + Raises: + RuntimeError: If the sensor is not configured to track contact time. + """ + # check if the sensor is configured to track contact time + if not self.cfg.track_air_time: + raise RuntimeError( + "The contact sensor is not configured to track contact time." + "Please enable the 'track_air_time' in the sensor configuration." + ) + # check if the bodies are in contact + currently_in_contact = self.data.current_contact_time > 0.0 + less_than_dt_in_contact = self.data.current_contact_time < (dt + abs_tol) + return currently_in_contact * less_than_dt_in_contact + + def compute_first_air(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor: + """Checks if bodies that have broken contact within the last :attr:`dt` seconds. + + This function checks if the bodies have broken contact within the last :attr:`dt` seconds + by comparing the current air time with the given time period. If the air time is less + than the given time period, then the bodies are considered to not be in contact. + + .. note:: + It assumes that :attr:`dt` is a factor of the sensor update time-step. In other words, + :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true if + the sensor is updated by the physics or the environment stepping time-step and the sensor + is read by the environment stepping time-step. + + Args: + dt: The time period since the contract is broken. + abs_tol: The absolute tolerance for the comparison. + + Returns: + A boolean tensor indicating the bodies that have broken contact within the last :attr:`dt` seconds. + Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. + + Raises: + RuntimeError: If the sensor is not configured to track contact time. + """ + # check if the sensor is configured to track contact time + if not self.cfg.track_air_time: + raise RuntimeError( + "The contact sensor is not configured to track contact time." + "Please enable the 'track_air_time' in the sensor configuration." + ) + # check if the sensor is configured to track contact time + currently_detached = self.data.current_air_time > 0.0 + less_than_dt_detached = self.data.current_air_time < (dt + abs_tol) + return currently_detached * less_than_dt_detached + + """ + Implementation - Abstract methods to be implemented by backend-specific subclasses. + """ + + @abstractmethod + def _initialize_impl(self): + """Initializes the sensor handles and internal buffers. + + Subclasses should call ``super()._initialize_impl()`` first to initialize + the common sensor infrastructure from :class:`SensorBase`. + """ + super()._initialize_impl() + + @abstractmethod + def _update_buffers_impl(self, env_ids: Sequence[int]): + raise NotImplementedError + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + super()._invalidate_initialize_callback(event) diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py new file mode 100644 index 00000000000..0b53186d3d2 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py @@ -0,0 +1,130 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Base class for contact sensor data containers.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod + +import torch + + +class BaseContactSensorData(ABC): + """Data container for the contact reporting sensor. + + This base class defines the interface for contact sensor data. Backend-specific + implementations should inherit from this class and provide the actual data storage. + """ + + @property + @abstractmethod + def pose_w(self) -> torch.Tensor | None: + """Pose of the sensor origin in world frame. Shape is (N, 7). Quaternion in xyzw order.""" + raise NotImplementedError + + @property + @abstractmethod + def pos_w(self) -> torch.Tensor | None: + """Position of the sensor origin in world frame. Shape is (N, 3). + + None if :attr:`ContactSensorCfg.track_pose` is False. + """ + raise NotImplementedError + + @property + @abstractmethod + def quat_w(self) -> torch.Tensor | None: + """Orientation of the sensor origin in quaternion (x, y, z, w) in world frame. + + Shape is (N, 4). None if :attr:`ContactSensorCfg.track_pose` is False. + """ + raise NotImplementedError + + @property + @abstractmethod + def net_forces_w(self) -> torch.Tensor | None: + """The net normal contact forces in world frame. Shape is (N, B, 3).""" + raise NotImplementedError + + @property + @abstractmethod + def net_forces_w_history(self) -> torch.Tensor | None: + """History of net normal contact forces. Shape is (N, T, B, 3).""" + raise NotImplementedError + + @property + @abstractmethod + def force_matrix_w(self) -> torch.Tensor | None: + """Normal contact forces filtered between sensor and filtered bodies. + + Shape is (N, B, M, 3). None if :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty. + """ + raise NotImplementedError + + @property + @abstractmethod + def force_matrix_w_history(self) -> torch.Tensor | None: + """History of filtered contact forces. Shape is (N, T, B, M, 3). + + None if :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty. + """ + raise NotImplementedError + + # Make issues for this in Newton P1/P2s + @property + @abstractmethod + def contact_pos_w(self) -> torch.Tensor | None: + """Average position of contact points. Shape is (N, B, M, 3). + + None if :attr:`ContactSensorCfg.track_contact_points` is False. + """ + raise NotImplementedError + + # Make issues for this in Newton P1/P2s + @property + @abstractmethod + def friction_forces_w(self) -> torch.Tensor | None: + """Sum of friction forces. Shape is (N, B, M, 3). + + None if :attr:`ContactSensorCfg.track_friction_forces` is False. + """ + raise NotImplementedError + + @property + @abstractmethod + def last_air_time(self) -> torch.Tensor | None: + """Time spent in air before last contact. Shape is (N, B). + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + raise NotImplementedError + + @property + @abstractmethod + def current_air_time(self) -> torch.Tensor | None: + """Time spent in air since last detach. Shape is (N, B). + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + raise NotImplementedError + + @property + @abstractmethod + def last_contact_time(self) -> torch.Tensor | None: + """Time spent in contact before last detach. Shape is (N, B). + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + raise NotImplementedError + + @property + @abstractmethod + def current_contact_time(self) -> torch.Tensor | None: + """Time spent in contact since last contact. Shape is (N, B). + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + raise NotImplementedError diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor.py b/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor.py index d7657841776..9ddaf15fa9d 100644 --- a/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor.py +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor.py @@ -3,535 +3,25 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Ignore optional memory usage warning globally -# pyright: reportOptionalSubscript=false - from __future__ import annotations -from collections.abc import Sequence from typing import TYPE_CHECKING -import torch - -import carb -import omni.physics.tensors.impl.api as physx -from isaacsim.core.simulation_manager import SimulationManager -from pxr import PhysxSchema - -import isaaclab.sim as sim_utils -import isaaclab.utils.string as string_utils -from isaaclab.markers import VisualizationMarkers +from isaaclab.utils.backend_utils import FactoryBase -from ..sensor_base import SensorBase -from .contact_sensor_data import ContactSensorData +from .base_contact_sensor import BaseContactSensor +from .base_contact_sensor_data import BaseContactSensorData if TYPE_CHECKING: - from .contact_sensor_cfg import ContactSensorCfg - - -class ContactSensor(SensorBase): - """A contact reporting sensor. - - The contact sensor reports the normal contact forces on a rigid body in the world frame. - It relies on the `PhysX ContactReporter`_ API to be activated on the rigid bodies. - - To enable the contact reporter on a rigid body, please make sure to enable the - :attr:`isaaclab.sim.spawner.RigidObjectSpawnerCfg.activate_contact_sensors` on your - asset spawner configuration. This will enable the contact reporter on all the rigid bodies - in the asset. - - The sensor can be configured to report the contact forces on a set of bodies with a given - filter pattern using the :attr:`ContactSensorCfg.filter_prim_paths_expr`. This is useful - when you want to report the contact forces between the sensor bodies and a specific set of - bodies in the scene. The data can be accessed using the :attr:`ContactSensorData.force_matrix_w`. - Please check the documentation on `RigidContact`_ for more details. - - The reporting of the filtered contact forces is only possible as one-to-many. This means that only one - sensor body in an environment can be filtered against multiple bodies in that environment. If you need to - filter multiple sensor bodies against multiple bodies, you need to create separate sensors for each sensor - body. - - As an example, suppose you want to report the contact forces for all the feet of a robot against an object - exclusively. In that case, setting the :attr:`ContactSensorCfg.prim_path` and - :attr:`ContactSensorCfg.filter_prim_paths_expr` with ``{ENV_REGEX_NS}/Robot/.*_FOOT`` and ``{ENV_REGEX_NS}/Object`` - respectively will not work. Instead, you need to create a separate sensor for each foot and filter - it against the object. - - .. _PhysX ContactReporter: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/104.2/class_physx_schema_physx_contact_report_a_p_i.html - .. _RigidContact: https://docs.isaacsim.omniverse.nvidia.com/latest/py/source/extensions/isaacsim.core.api/docs/index.html#isaacsim.core.api.sensors.RigidContactView - """ - - cfg: ContactSensorCfg - """The configuration parameters.""" - - def __init__(self, cfg: ContactSensorCfg): - """Initializes the contact sensor object. - - Args: - cfg: The configuration parameters. - """ - # initialize base class - super().__init__(cfg) - - # Enable contact processing - carb_settings_iface = carb.settings.get_settings() - carb_settings_iface.set_bool("/physics/disableContactProcessing", False) - - # Create empty variables for storing output data - self._data: ContactSensorData = ContactSensorData() - # initialize self._body_physx_view for running in extension mode - self._body_physx_view = None - - def __str__(self) -> str: - """Returns: A string containing information about the instance.""" - return ( - f"Contact sensor @ '{self.cfg.prim_path}': \n" - f"\tview type : {self.body_physx_view.__class__}\n" - f"\tupdate period (s) : {self.cfg.update_period}\n" - f"\tnumber of bodies : {self.num_bodies}\n" - f"\tbody names : {self.body_names}\n" - ) - - """ - Properties - """ - - @property - def num_instances(self) -> int: - return self.body_physx_view.count - - @property - def data(self) -> ContactSensorData: - # update sensors if needed - self._update_outdated_buffers() - # return the data - return self._data - - @property - def num_bodies(self) -> int: - """Number of bodies with contact sensors attached.""" - return self._num_bodies - - @property - def body_names(self) -> list[str]: - """Ordered names of bodies with contact sensors attached.""" - prim_paths = self.body_physx_view.prim_paths[: self.num_bodies] - return [path.split("/")[-1] for path in prim_paths] - - @property - def body_physx_view(self) -> physx.RigidBodyView: - """View for the rigid bodies captured (PhysX). - - Note: - Use this view with caution. It requires handling of tensors in a specific way. - """ - return self._body_physx_view - - @property - def contact_physx_view(self) -> physx.RigidContactView: - """Contact reporter view for the bodies (PhysX). - - Note: - Use this view with caution. It requires handling of tensors in a specific way. - """ - return self._contact_physx_view - - """ - Operations - """ - - def reset(self, env_ids: Sequence[int] | None = None): - # reset the timers and counters - super().reset(env_ids) - # resolve None - if env_ids is None: - env_ids = slice(None) - # reset accumulative data buffers - self._data.net_forces_w[env_ids] = 0.0 - self._data.net_forces_w_history[env_ids] = 0.0 - # reset force matrix - if len(self.cfg.filter_prim_paths_expr) != 0: - self._data.force_matrix_w[env_ids] = 0.0 - self._data.force_matrix_w_history[env_ids] = 0.0 - # reset the current air time - if self.cfg.track_air_time: - self._data.current_air_time[env_ids] = 0.0 - self._data.last_air_time[env_ids] = 0.0 - self._data.current_contact_time[env_ids] = 0.0 - self._data.last_contact_time[env_ids] = 0.0 - # reset contact positions - if self.cfg.track_contact_points: - self._data.contact_pos_w[env_ids, :] = torch.nan - # reset friction forces - if self.cfg.track_friction_forces: - self._data.friction_forces_w[env_ids, :] = 0.0 - - def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: - """Find bodies in the articulation based on the name keys. - - Args: - name_keys: A regular expression or a list of regular expressions to match the body names. - preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. - - Returns: - A tuple of lists containing the body indices and names. - """ - return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) - - def compute_first_contact(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor: - """Checks if bodies that have established contact within the last :attr:`dt` seconds. - - This function checks if the bodies have established contact within the last :attr:`dt` seconds - by comparing the current contact time with the given time period. If the contact time is less - than the given time period, then the bodies are considered to be in contact. - - Note: - The function assumes that :attr:`dt` is a factor of the sensor update time-step. In other - words :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true - if the sensor is updated by the physics or the environment stepping time-step and the sensor - is read by the environment stepping time-step. - - Args: - dt: The time period since the contact was established. - abs_tol: The absolute tolerance for the comparison. - - Returns: - A boolean tensor indicating the bodies that have established contact within the last - :attr:`dt` seconds. Shape is (N, B), where N is the number of sensors and B is the - number of bodies in each sensor. - - Raises: - RuntimeError: If the sensor is not configured to track contact time. - """ - # check if the sensor is configured to track contact time - if not self.cfg.track_air_time: - raise RuntimeError( - "The contact sensor is not configured to track contact time." - "Please enable the 'track_air_time' in the sensor configuration." - ) - # check if the bodies are in contact - currently_in_contact = self.data.current_contact_time > 0.0 - less_than_dt_in_contact = self.data.current_contact_time < (dt + abs_tol) - return currently_in_contact * less_than_dt_in_contact - - def compute_first_air(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor: - """Checks if bodies that have broken contact within the last :attr:`dt` seconds. - - This function checks if the bodies have broken contact within the last :attr:`dt` seconds - by comparing the current air time with the given time period. If the air time is less - than the given time period, then the bodies are considered to not be in contact. - - Note: - It assumes that :attr:`dt` is a factor of the sensor update time-step. In other words, - :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true if - the sensor is updated by the physics or the environment stepping time-step and the sensor - is read by the environment stepping time-step. - - Args: - dt: The time period since the contract is broken. - abs_tol: The absolute tolerance for the comparison. - - Returns: - A boolean tensor indicating the bodies that have broken contact within the last :attr:`dt` seconds. - Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. - - Raises: - RuntimeError: If the sensor is not configured to track contact time. - """ - # check if the sensor is configured to track contact time - if not self.cfg.track_air_time: - raise RuntimeError( - "The contact sensor is not configured to track contact time." - "Please enable the 'track_air_time' in the sensor configuration." - ) - # check if the sensor is configured to track contact time - currently_detached = self.data.current_air_time > 0.0 - less_than_dt_detached = self.data.current_air_time < (dt + abs_tol) - return currently_detached * less_than_dt_detached - - """ - Implementation. - """ - - def _initialize_impl(self): - super()._initialize_impl() - # obtain global simulation view - self._physics_sim_view = SimulationManager.get_physics_sim_view() - # check that only rigid bodies are selected - leaf_pattern = self.cfg.prim_path.rsplit("/", 1)[-1] - template_prim_path = self._parent_prims[0].GetPath().pathString - body_names = list() - for prim in sim_utils.find_matching_prims(template_prim_path + "/" + leaf_pattern): - # check if prim has contact reporter API - if prim.HasAPI(PhysxSchema.PhysxContactReportAPI): - prim_path = prim.GetPath().pathString - body_names.append(prim_path.rsplit("/", 1)[-1]) - # check that there is at least one body with contact reporter API - if not body_names: - raise RuntimeError( - f"Sensor at path '{self.cfg.prim_path}' could not find any bodies with contact reporter API." - "\nHINT: Make sure to enable 'activate_contact_sensors' in the corresponding asset spawn configuration." - ) - - # construct regex expression for the body names - body_names_regex = r"(" + "|".join(body_names) + r")" - body_names_regex = f"{self.cfg.prim_path.rsplit('/', 1)[0]}/{body_names_regex}" - # convert regex expressions to glob expressions for PhysX - body_names_glob = body_names_regex.replace(".*", "*") - filter_prim_paths_glob = [expr.replace(".*", "*") for expr in self.cfg.filter_prim_paths_expr] - - # create a rigid prim view for the sensor - self._body_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_glob) - self._contact_physx_view = self._physics_sim_view.create_rigid_contact_view( - body_names_glob, - filter_patterns=filter_prim_paths_glob, - max_contact_data_count=self.cfg.max_contact_data_count_per_prim * len(body_names) * self._num_envs, - ) - # resolve the true count of bodies - self._num_bodies = self.body_physx_view.count // self._num_envs - # check that contact reporter succeeded - if self._num_bodies != len(body_names): - raise RuntimeError( - "Failed to initialize contact reporter for specified bodies." - f"\n\tInput prim path : {self.cfg.prim_path}" - f"\n\tResolved prim paths: {body_names_regex}" - ) - - # prepare data buffers - self._data.net_forces_w = torch.zeros(self._num_envs, self._num_bodies, 3, device=self._device) - # optional buffers - # -- history of net forces - if self.cfg.history_length > 0: - self._data.net_forces_w_history = torch.zeros( - self._num_envs, self.cfg.history_length, self._num_bodies, 3, device=self._device - ) - else: - self._data.net_forces_w_history = self._data.net_forces_w.unsqueeze(1) - # -- pose of sensor origins - if self.cfg.track_pose: - self._data.pos_w = torch.zeros(self._num_envs, self._num_bodies, 3, device=self._device) - self._data.quat_w = torch.zeros(self._num_envs, self._num_bodies, 4, device=self._device) - - # check if filter paths are valid - if self.cfg.track_contact_points or self.cfg.track_friction_forces: - if len(self.cfg.filter_prim_paths_expr) == 0: - raise ValueError( - "The 'filter_prim_paths_expr' is empty. Please specify a valid filter pattern to track" - f" {'contact points' if self.cfg.track_contact_points else 'friction forces'}." - ) - if self.cfg.max_contact_data_count_per_prim < 1: - raise ValueError( - f"The 'max_contact_data_count_per_prim' is {self.cfg.max_contact_data_count_per_prim}. " - "Please set it to a value greater than 0 to track" - f" {'contact points' if self.cfg.track_contact_points else 'friction forces'}." - ) - - # -- position of contact points - if self.cfg.track_contact_points: - self._data.contact_pos_w = torch.full( - (self._num_envs, self._num_bodies, self.contact_physx_view.filter_count, 3), - torch.nan, - device=self._device, - ) - # -- friction forces at contact points - if self.cfg.track_friction_forces: - self._data.friction_forces_w = torch.full( - (self._num_envs, self._num_bodies, self.contact_physx_view.filter_count, 3), - 0.0, - device=self._device, - ) - # -- air/contact time between contacts - if self.cfg.track_air_time: - self._data.last_air_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) - self._data.current_air_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) - self._data.last_contact_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) - self._data.current_contact_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) - # force matrix: (num_envs, num_bodies, num_filter_shapes, 3) - # force matrix history: (num_envs, history_length, num_bodies, num_filter_shapes, 3) - if len(self.cfg.filter_prim_paths_expr) != 0: - num_filters = self.contact_physx_view.filter_count - self._data.force_matrix_w = torch.zeros( - self._num_envs, self._num_bodies, num_filters, 3, device=self._device - ) - if self.cfg.history_length > 0: - self._data.force_matrix_w_history = torch.zeros( - self._num_envs, self.cfg.history_length, self._num_bodies, num_filters, 3, device=self._device - ) - else: - self._data.force_matrix_w_history = self._data.force_matrix_w.unsqueeze(1) - - def _update_buffers_impl(self, env_ids: Sequence[int]): - """Fills the buffers of the sensor data.""" - # default to all sensors - if len(env_ids) == self._num_envs: - env_ids = slice(None) - - # obtain the contact forces - # TODO: We are handling the indexing ourself because of the shape; (N, B) vs expected (N * B). - # This isn't the most efficient way to do this, but it's the easiest to implement. - net_forces_w = self.contact_physx_view.get_net_contact_forces(dt=self._sim_physics_dt) - self._data.net_forces_w[env_ids, :, :] = net_forces_w.view(-1, self._num_bodies, 3)[env_ids] - # update contact force history - if self.cfg.history_length > 0: - self._data.net_forces_w_history[env_ids] = self._data.net_forces_w_history[env_ids].roll(1, dims=1) - self._data.net_forces_w_history[env_ids, 0] = self._data.net_forces_w[env_ids] - - # obtain the contact force matrix - if len(self.cfg.filter_prim_paths_expr) != 0: - # shape of the filtering matrix: (num_envs, num_bodies, num_filter_shapes, 3) - num_filters = self.contact_physx_view.filter_count - # acquire and shape the force matrix - force_matrix_w = self.contact_physx_view.get_contact_force_matrix(dt=self._sim_physics_dt) - force_matrix_w = force_matrix_w.view(-1, self._num_bodies, num_filters, 3) - self._data.force_matrix_w[env_ids] = force_matrix_w[env_ids] - if self.cfg.history_length > 0: - self._data.force_matrix_w_history[env_ids] = self._data.force_matrix_w_history[env_ids].roll(1, dims=1) - self._data.force_matrix_w_history[env_ids, 0] = self._data.force_matrix_w[env_ids] - - # obtain the pose of the sensor origin - if self.cfg.track_pose: - pose = self.body_physx_view.get_transforms().view(-1, self._num_bodies, 7)[env_ids] - self._data.pos_w[env_ids], self._data.quat_w[env_ids] = pose.split([3, 4], dim=-1) - - # obtain contact points - if self.cfg.track_contact_points: - _, buffer_contact_points, _, _, buffer_count, buffer_start_indices = ( - self.contact_physx_view.get_contact_data(dt=self._sim_physics_dt) - ) - self._data.contact_pos_w[env_ids] = self._unpack_contact_buffer_data( - buffer_contact_points, buffer_count, buffer_start_indices - )[env_ids] - - # obtain friction forces - if self.cfg.track_friction_forces: - friction_forces, _, buffer_count, buffer_start_indices = self.contact_physx_view.get_friction_data( - dt=self._sim_physics_dt - ) - self._data.friction_forces_w[env_ids] = self._unpack_contact_buffer_data( - friction_forces, buffer_count, buffer_start_indices, avg=False, default=0.0 - )[env_ids] - - # obtain the air time - if self.cfg.track_air_time: - # -- time elapsed since last update - # since this function is called every frame, we can use the difference to get the elapsed time - elapsed_time = self._timestamp[env_ids] - self._timestamp_last_update[env_ids] - # -- check contact state of bodies - is_contact = torch.norm(self._data.net_forces_w[env_ids, :, :], dim=-1) > self.cfg.force_threshold - is_first_contact = (self._data.current_air_time[env_ids] > 0) * is_contact - is_first_detached = (self._data.current_contact_time[env_ids] > 0) * ~is_contact - # -- update the last contact time if body has just become in contact - self._data.last_air_time[env_ids] = torch.where( - is_first_contact, - self._data.current_air_time[env_ids] + elapsed_time.unsqueeze(-1), - self._data.last_air_time[env_ids], - ) - # -- increment time for bodies that are not in contact - self._data.current_air_time[env_ids] = torch.where( - ~is_contact, self._data.current_air_time[env_ids] + elapsed_time.unsqueeze(-1), 0.0 - ) - # -- update the last contact time if body has just detached - self._data.last_contact_time[env_ids] = torch.where( - is_first_detached, - self._data.current_contact_time[env_ids] + elapsed_time.unsqueeze(-1), - self._data.last_contact_time[env_ids], - ) - # -- increment time for bodies that are in contact - self._data.current_contact_time[env_ids] = torch.where( - is_contact, self._data.current_contact_time[env_ids] + elapsed_time.unsqueeze(-1), 0.0 - ) - - def _unpack_contact_buffer_data( - self, - contact_data: torch.Tensor, - buffer_count: torch.Tensor, - buffer_start_indices: torch.Tensor, - avg: bool = True, - default: float = float("nan"), - ) -> torch.Tensor: - """ - Unpacks and aggregates contact data for each (env, body, filter) group. - - This function vectorizes the following nested loop: - - for i in range(self._num_bodies * self._num_envs): - for j in range(self.contact_physx_view.filter_count): - start_index_ij = buffer_start_indices[i, j] - count_ij = buffer_count[i, j] - self._contact_position_aggregate_buffer[i, j, :] = torch.mean( - contact_data[start_index_ij : (start_index_ij + count_ij), :], dim=0 - ) - - For more details, see the `RigidContactView.get_contact_data() documentation `_. - - Args: - contact_data: Flat tensor of contact data, shape (N_envs * N_bodies, 3). - buffer_count: Number of contact points per (env, body, filter), shape (N_envs * N_bodies, N_filters). - buffer_start_indices: Start indices for each (env, body, filter), shape (N_envs * N_bodies, N_filters). - avg: If True, average the contact data for each group; if False, sum the data. Defaults to True. - default: Default value to use for groups with zero contacts. Defaults to NaN. - - Returns: - Aggregated contact data, shape (N_envs, N_bodies, N_filters, 3). - """ - counts, starts = buffer_count.view(-1), buffer_start_indices.view(-1) - n_rows, total = counts.numel(), int(counts.sum()) - agg = torch.full((n_rows, 3), default, device=self._device, dtype=contact_data.dtype) - if total > 0: - row_ids = torch.repeat_interleave(torch.arange(n_rows, device=self._device), counts) - - block_starts = counts.cumsum(0) - counts - deltas = torch.arange(row_ids.numel(), device=counts.device) - block_starts.repeat_interleave(counts) - flat_idx = starts[row_ids] + deltas - - pts = contact_data.index_select(0, flat_idx) - agg = agg.zero_().index_add_(0, row_ids, pts) - agg = agg / counts.clamp_min(1).unsqueeze(-1) if avg else agg - agg[counts == 0] = default - - return agg.view(self._num_envs * self.num_bodies, -1, 3).view( - self._num_envs, self._num_bodies, self.contact_physx_view.filter_count, 3 - ) + from isaaclab_physx.sensors.contact_sensor import ContactSensor as PhysXContactSensor + from isaaclab_physx.sensors.contact_sensor import ContactSensorData as PhysXContactSensorData - def _set_debug_vis_impl(self, debug_vis: bool): - # set visibility of markers - # note: parent only deals with callbacks. not their visibility - if debug_vis: - # create markers if necessary for the first time - if not hasattr(self, "contact_visualizer"): - self.contact_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) - # set their visibility to true - self.contact_visualizer.set_visibility(True) - else: - if hasattr(self, "contact_visualizer"): - self.contact_visualizer.set_visibility(False) - def _debug_vis_callback(self, event): - # safely return if view becomes invalid - # note: this invalidity happens because of isaac sim view callbacks - if self.body_physx_view is None: - return - # marker indices - # 0: contact, 1: no contact - net_contact_force_w = torch.norm(self._data.net_forces_w, dim=-1) - marker_indices = torch.where(net_contact_force_w > self.cfg.force_threshold, 0, 1) - # check if prim is visualized - if self.cfg.track_pose: - frame_origins: torch.Tensor = self._data.pos_w - else: - pose = self.body_physx_view.get_transforms() - frame_origins = pose.view(-1, self._num_bodies, 7)[:, :, :3] - # visualize - self.contact_visualizer.visualize(frame_origins.view(-1, 3), marker_indices=marker_indices.view(-1)) +class ContactSensor(FactoryBase, BaseContactSensor): + """Factory for creating contact sensor instances.""" - """ - Internal simulation callbacks. - """ + data: BaseContactSensorData | PhysXContactSensorData - def _invalidate_initialize_callback(self, event): - """Invalidates the scene elements.""" - # call parent - super()._invalidate_initialize_callback(event) - # set all existing views to None to invalidate them - self._body_physx_view = None - self._contact_physx_view = None + def __new__(cls, *args, **kwargs) -> BaseContactSensor | PhysXContactSensor: + """Create a new instance of a contact sensor based on the backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py b/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py index 6acaee1a9f0..11fbcd8353f 100644 --- a/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py @@ -3,151 +3,23 @@ # # SPDX-License-Identifier: BSD-3-Clause -# needed to import for allowing type-hinting: torch.Tensor | None -from __future__ import annotations - -from dataclasses import dataclass - -import torch - - -@dataclass -class ContactSensorData: - """Data container for the contact reporting sensor.""" - - pos_w: torch.Tensor | None = None - """Position of the sensor origin in world frame. - - Shape is (N, 3), where N is the number of sensors. - - Note: - If the :attr:`ContactSensorCfg.track_pose` is False, then this quantity is None. - - """ - - contact_pos_w: torch.Tensor | None = None - """Average of the positions of contact points between sensor body and filter prim in world frame. - - Shape is (N, B, M, 3), where N is the number of sensors, B is number of bodies in each sensor - and M is the number of filtered bodies. - - Collision pairs not in contact will result in NaN. - - Note: - - * If the :attr:`ContactSensorCfg.track_contact_points` is False, then this quantity is None. - * If the :attr:`ContactSensorCfg.track_contact_points` is True, a ValueError will be raised if: - - * If the :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty. - * If the :attr:`ContactSensorCfg.max_contact_data_per_prim` is not specified or less than 1. - will not be calculated. - - """ - - friction_forces_w: torch.Tensor | None = None - """Sum of the friction forces between sensor body and filter prim in world frame. - - Shape is (N, B, M, 3), where N is the number of sensors, B is number of bodies in each sensor - and M is the number of filtered bodies. - - Collision pairs not in contact will result in NaN. - - Note: - - * If the :attr:`ContactSensorCfg.track_friction_forces` is False, then this quantity is None. - * If the :attr:`ContactSensorCfg.track_friction_forces` is True, a ValueError will be raised if: - - * The :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty. - * The :attr:`ContactSensorCfg.max_contact_data_per_prim` is not specified or less than 1. - - """ +"""Re-exports the base contact sensor data class for backwards compatibility.""" - quat_w: torch.Tensor | None = None - """Orientation of the sensor origin in quaternion (x, y, z, w) in world frame. - - Shape is (N, 4), where N is the number of sensors. - - Note: - If the :attr:`ContactSensorCfg.track_pose` is False, then this quantity is None. - """ - - net_forces_w: torch.Tensor | None = None - """The net normal contact forces in world frame. - - Shape is (N, B, 3), where N is the number of sensors and B is the number of bodies in each sensor. - - Note: - This quantity is the sum of the normal contact forces acting on the sensor bodies. It must not be confused - with the total contact forces acting on the sensor bodies (which also includes the tangential forces). - """ - - net_forces_w_history: torch.Tensor | None = None - """The net normal contact forces in world frame. - - Shape is (N, T, B, 3), where N is the number of sensors, T is the configured history length - and B is the number of bodies in each sensor. - - In the history dimension, the first index is the most recent and the last index is the oldest. - - Note: - This quantity is the sum of the normal contact forces acting on the sensor bodies. It must not be confused - with the total contact forces acting on the sensor bodies (which also includes the tangential forces). - """ - - force_matrix_w: torch.Tensor | None = None - """The normal contact forces filtered between the sensor bodies and filtered bodies in world frame. - - Shape is (N, B, M, 3), where N is the number of sensors, B is number of bodies in each sensor - and M is the number of filtered bodies. - - Note: - If the :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty, then this quantity is None. - """ - - force_matrix_w_history: torch.Tensor | None = None - """The normal contact forces filtered between the sensor bodies and filtered bodies in world frame. - - Shape is (N, T, B, M, 3), where N is the number of sensors, T is the configured history length, - B is number of bodies in each sensor and M is the number of filtered bodies. - - In the history dimension, the first index is the most recent and the last index is the oldest. - - Note: - If the :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty, then this quantity is None. - """ - - last_air_time: torch.Tensor | None = None - """Time spent (in s) in the air before the last contact. - - Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. - - Note: - If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. - """ - - current_air_time: torch.Tensor | None = None - """Time spent (in s) in the air since the last detach. - - Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. +from __future__ import annotations - Note: - If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. - """ +from typing import TYPE_CHECKING - last_contact_time: torch.Tensor | None = None - """Time spent (in s) in contact before the last detach. +from isaaclab.utils.backend_utils import FactoryBase - Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. +from .base_contact_sensor_data import BaseContactSensorData - Note: - If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. - """ +if TYPE_CHECKING: + from isaaclab_physx.sensors.contact_sensor import ContactSensorData as PhysXContactSensorData - current_contact_time: torch.Tensor | None = None - """Time spent (in s) in contact since the last contact. - Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. +class ContactSensorData(FactoryBase, BaseContactSensorData): + """Factory for creating contact sensor data instances.""" - Note: - If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. - """ + def __new__(cls, *args, **kwargs) -> BaseContactSensorData | PhysXContactSensorData: + """Create a new instance of a contact sensor data based on the backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/__init__.py b/source/isaaclab/isaaclab/sensors/frame_transformer/__init__.py index d5db853e8cc..664a91e4eb0 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/__init__.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/__init__.py @@ -5,6 +5,10 @@ """Sub-module for frame transformer sensor.""" +from .base_frame_transformer import BaseFrameTransformer +from .base_frame_transformer_data import BaseFrameTransformerData from .frame_transformer import FrameTransformer from .frame_transformer_cfg import FrameTransformerCfg, OffsetCfg from .frame_transformer_data import FrameTransformerData + +__all__ = ["BaseFrameTransformer", "BaseFrameTransformerData", "FrameTransformer", "FrameTransformerCfg", "FrameTransformerData", "OffsetCfg"] diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer.py b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer.py new file mode 100644 index 00000000000..78b6b545635 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer.py @@ -0,0 +1,124 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from abc import abstractmethod +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import isaaclab.utils.string as string_utils + +from ..sensor_base import SensorBase +from .base_frame_transformer_data import BaseFrameTransformerData + +if TYPE_CHECKING: + from .frame_transformer_cfg import FrameTransformerCfg + + +class BaseFrameTransformer(SensorBase): + """A sensor for reporting frame transforms. + + This class provides an interface for reporting the transform of one or more frames (target frames) + with respect to another frame (source frame). The source frame is specified by the user as a prim path + (:attr:`FrameTransformerCfg.prim_path`) and the target frames are specified by the user as a list of + prim paths (:attr:`FrameTransformerCfg.target_frames`). + + The source frame and target frames are assumed to be rigid bodies. The transform of the target frames + with respect to the source frame is computed by first extracting the transform of the source frame + and target frames from the physics engine and then computing the relative transform between the two. + + Additionally, the user can specify an offset for the source frame and each target frame. This is useful + for specifying the transform of the desired frame with respect to the body's center of mass, for instance. + + A common example of using this sensor is to track the position and orientation of the end effector of a + robotic manipulator. In this case, the source frame would be the body corresponding to the base frame of the + manipulator, and the target frame would be the body corresponding to the end effector. Since the end-effector is + typically a fictitious body, the user may need to specify an offset from the end-effector to the body of the + manipulator. + + """ + + cfg: FrameTransformerCfg + """The configuration parameters.""" + + __backend_name__: str = "base" + """The name of the backend for the frame transformer sensor.""" + + def __init__(self, cfg: FrameTransformerCfg): + """Initializes the frame transformer object. + + Args: + cfg: The configuration parameters. + """ + # initialize base class + super().__init__(cfg) + + """ + Properties + """ + + @property + @abstractmethod + def data(self) -> BaseFrameTransformerData: + raise NotImplementedError + + @property + @abstractmethod + def num_bodies(self) -> int: + """Returns the number of target bodies being tracked. + + .. note:: + This is an alias used for consistency with other sensors. Otherwise, we recommend using + :attr:`len(data.target_frame_names)` to access the number of target frames. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_names(self) -> list[str]: + """Returns the names of the target bodies being tracked. + + .. note:: + This is an alias used for consistency with other sensors. Otherwise, we recommend using + :attr:`data.target_frame_names` to access the target frame names. + """ + raise NotImplementedError + + """ + Operations + """ + + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + """Find bodies in the articulation based on the name keys. + + Args: + name_keys: A regular expression or a list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the body indices and names. + """ + return string_utils.resolve_matching_names(name_keys, self._target_frame_names, preserve_order) + + """ + Implementation - Abstract methods to be implemented by backend-specific subclasses. + """ + + def _initialize_impl(self): + """Initializes the sensor handles and internal buffers. + + Subclasses should call ``super()._initialize_impl()`` first to initialize + the common sensor infrastructure from :class:`SensorBase`. + """ + super()._initialize_impl() + + @abstractmethod + def _update_buffers_impl(self, env_ids: Sequence[int]): + raise NotImplementedError + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + super()._invalidate_initialize_callback(event) diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py new file mode 100644 index 00000000000..87c30d8f27b --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py @@ -0,0 +1,83 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Base class for frame transformer sensor data containers.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod + +import torch + + +class BaseFrameTransformerData(ABC): + """Data container for the frame transformer sensor. + + This base class defines the interface for frame transformer sensor data. Backend-specific + implementations should inherit from this class and provide the actual data storage. + """ + + @property + @abstractmethod + def target_frame_names(self) -> list[str]: + """Target frame names (order matches data ordering). + + Resolved from :attr:`FrameTransformerCfg.FrameCfg.name`. + """ + raise NotImplementedError + + @property + @abstractmethod + def target_pose_source(self) -> list[int]: + """Pose of the target frame(s) relative to source frame. Shape is (N, M, 7). Quaternion in xyzw order.""" + raise NotImplementedError + + @property + @abstractmethod + def target_pos_source(self) -> torch.Tensor: + """Position of the target frame(s) relative to source frame. Shape is (N, M, 3).""" + raise NotImplementedError + + @property + @abstractmethod + def target_quat_source(self) -> torch.Tensor: + """Orientation of the target frame(s) relative to source frame (x, y, z, w). Shape is (N, M, 4).""" + raise NotImplementedError + + @property + @abstractmethod + def target_pose_w(self) -> torch.Tensor: + """Pose of the target frame(s) after offset in world frame. Shape is (N, M, 7). Quaternion in xyzw order.""" + raise NotImplementedError + + @property + @abstractmethod + def target_pos_w(self) -> torch.Tensor: + """Position of the target frame(s) after offset in world frame. Shape is (N, M, 3).""" + raise NotImplementedError + + @property + @abstractmethod + def target_quat_w(self) -> torch.Tensor: + """Orientation of the target frame(s) after offset in world frame (x, y, z, w). Shape is (N, M, 4).""" + raise NotImplementedError + + @property + @abstractmethod + def source_pose_w(self) -> torch.Tensor: + """Pose of the source frame after offset in world frame. Shape is (N, 7). Quaternion in xyzw order.""" + raise NotImplementedError + + @property + @abstractmethod + def source_pos_w(self) -> torch.Tensor: + """Position of the source frame after offset in world frame. Shape is (N, 3).""" + raise NotImplementedError + + @property + @abstractmethod + def source_quat_w(self) -> torch.Tensor: + """Orientation of the source frame after offset in world frame (x, y, z, w). Shape is (N, 4).""" + raise NotImplementedError diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer.py b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer.py index d16aa6a3eb4..f01ce2e3f64 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer.py @@ -5,552 +5,23 @@ from __future__ import annotations -import logging -import re -from collections.abc import Sequence from typing import TYPE_CHECKING -import torch +from isaaclab.utils.backend_utils import FactoryBase -from isaacsim.core.simulation_manager import SimulationManager -from pxr import UsdPhysics - -import isaaclab.sim as sim_utils -import isaaclab.utils.string as string_utils -from isaaclab.markers import VisualizationMarkers -from isaaclab.utils.math import ( - combine_frame_transforms, - is_identity_pose, - normalize, - quat_from_angle_axis, - subtract_frame_transforms, -) - -from ..sensor_base import SensorBase -from .frame_transformer_data import FrameTransformerData +from .base_frame_transformer import BaseFrameTransformer +from .base_frame_transformer_data import BaseFrameTransformerData if TYPE_CHECKING: - from .frame_transformer_cfg import FrameTransformerCfg - -# import logger -logger = logging.getLogger(__name__) - - -class FrameTransformer(SensorBase): - """A sensor for reporting frame transforms. - - This class provides an interface for reporting the transform of one or more frames (target frames) - with respect to another frame (source frame). The source frame is specified by the user as a prim path - (:attr:`FrameTransformerCfg.prim_path`) and the target frames are specified by the user as a list of - prim paths (:attr:`FrameTransformerCfg.target_frames`). - - The source frame and target frames are assumed to be rigid bodies. The transform of the target frames - with respect to the source frame is computed by first extracting the transform of the source frame - and target frames from the physics engine and then computing the relative transform between the two. - - Additionally, the user can specify an offset for the source frame and each target frame. This is useful - for specifying the transform of the desired frame with respect to the body's center of mass, for instance. - - A common example of using this sensor is to track the position and orientation of the end effector of a - robotic manipulator. In this case, the source frame would be the body corresponding to the base frame of the - manipulator, and the target frame would be the body corresponding to the end effector. Since the end-effector is - typically a fictitious body, the user may need to specify an offset from the end-effector to the body of the - manipulator. - - """ - - cfg: FrameTransformerCfg - """The configuration parameters.""" - - def __init__(self, cfg: FrameTransformerCfg): - """Initializes the frame transformer object. - - Args: - cfg: The configuration parameters. - """ - # initialize base class - super().__init__(cfg) - # Create empty variables for storing output data - self._data: FrameTransformerData = FrameTransformerData() - - def __str__(self) -> str: - """Returns: A string containing information about the instance.""" - return ( - f"FrameTransformer @ '{self.cfg.prim_path}': \n" - f"\ttracked body frames: {[self._source_frame_body_name] + self._target_frame_body_names} \n" - f"\tnumber of envs: {self._num_envs}\n" - f"\tsource body frame: {self._source_frame_body_name}\n" - f"\ttarget frames (count: {self._target_frame_names}): {len(self._target_frame_names)}\n" - ) - - """ - Properties - """ - - @property - def data(self) -> FrameTransformerData: - # update sensors if needed - self._update_outdated_buffers() - # return the data - return self._data - - @property - def num_bodies(self) -> int: - """Returns the number of target bodies being tracked. - - Note: - This is an alias used for consistency with other sensors. Otherwise, we recommend using - :attr:`len(data.target_frame_names)` to access the number of target frames. - """ - return len(self._target_frame_body_names) - - @property - def body_names(self) -> list[str]: - """Returns the names of the target bodies being tracked. - - Note: - This is an alias used for consistency with other sensors. Otherwise, we recommend using - :attr:`data.target_frame_names` to access the target frame names. - """ - return self._target_frame_body_names - - """ - Operations - """ - - def reset(self, env_ids: Sequence[int] | None = None): - # reset the timers and counters - super().reset(env_ids) - # resolve None - if env_ids is None: - env_ids = ... - - def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: - """Find bodies in the articulation based on the name keys. - - Args: - name_keys: A regular expression or a list of regular expressions to match the body names. - preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. - - Returns: - A tuple of lists containing the body indices and names. - """ - return string_utils.resolve_matching_names(name_keys, self._target_frame_names, preserve_order) - - """ - Implementation. - """ - - def _initialize_impl(self): - super()._initialize_impl() - - # resolve source frame offset - source_frame_offset_pos = torch.tensor(self.cfg.source_frame_offset.pos, device=self.device) - source_frame_offset_quat = torch.tensor(self.cfg.source_frame_offset.rot, device=self.device) - # Only need to perform offsetting of source frame if the position offsets is non-zero and rotation offset is - # not the identity quaternion for efficiency in _update_buffer_impl - self._apply_source_frame_offset = True - # Handle source frame offsets - if is_identity_pose(source_frame_offset_pos, source_frame_offset_quat): - logger.debug(f"No offset application needed for source frame as it is identity: {self.cfg.prim_path}") - self._apply_source_frame_offset = False - else: - logger.debug(f"Applying offset to source frame as it is not identity: {self.cfg.prim_path}") - # Store offsets as tensors (duplicating each env's offsets for ease of multiplication later) - self._source_frame_offset_pos = source_frame_offset_pos.unsqueeze(0).repeat(self._num_envs, 1) - self._source_frame_offset_quat = source_frame_offset_quat.unsqueeze(0).repeat(self._num_envs, 1) - - # Keep track of mapping from the rigid body name to the desired frames and prim path, - # as there may be multiple frames based upon the same body name and we don't want to - # create unnecessary views. - body_names_to_frames: dict[str, dict[str, set[str] | str]] = {} - # The offsets associated with each target frame - target_offsets: dict[str, dict[str, torch.Tensor]] = {} - # The frames whose offsets are not identity - non_identity_offset_frames: list[str] = [] - - # Only need to perform offsetting of target frame if any of the position offsets are non-zero or any of the - # rotation offsets are not the identity quaternion for efficiency in _update_buffer_impl - self._apply_target_frame_offset = False - - # Need to keep track of whether the source frame is also a target frame - self._source_is_also_target_frame = False - - # Collect all target frames, their associated body prim paths and their offsets so that we can extract - # the prim, check that it has the appropriate rigid body API in a single loop. - # First element is None because user can't specify source frame name - frames = [None] + [target_frame.name for target_frame in self.cfg.target_frames] - frame_prim_paths = [self.cfg.prim_path] + [target_frame.prim_path for target_frame in self.cfg.target_frames] - # First element is None because source frame offset is handled separately - frame_offsets = [None] + [target_frame.offset for target_frame in self.cfg.target_frames] - frame_types = ["source"] + ["target"] * len(self.cfg.target_frames) - for frame, prim_path, offset, frame_type in zip(frames, frame_prim_paths, frame_offsets, frame_types): - # Find correct prim - matching_prims = sim_utils.find_matching_prims(prim_path) - if len(matching_prims) == 0: - raise ValueError( - f"Failed to create frame transformer for frame '{frame}' with path '{prim_path}'." - " No matching prims were found." - ) - for prim in matching_prims: - # Get the prim path of the matching prim - matching_prim_path = prim.GetPath().pathString - # Check if it is a rigid prim - if not prim.HasAPI(UsdPhysics.RigidBodyAPI): - raise ValueError( - f"While resolving expression '{prim_path}' found a prim '{matching_prim_path}' which is not a" - " rigid body. The class only supports transformations between rigid bodies." - ) - - # Get the name of the body: use relative prim path for unique identification - body_name = self._get_relative_body_path(matching_prim_path) - # Use leaf name of prim path if frame name isn't specified by user - frame_name = frame if frame is not None else matching_prim_path.rsplit("/", 1)[-1] - - # Keep track of which frames are associated with which bodies - if body_name in body_names_to_frames: - body_names_to_frames[body_name]["frames"].add(frame_name) - - # This is a corner case where the source frame is also a target frame - if body_names_to_frames[body_name]["type"] == "source" and frame_type == "target": - self._source_is_also_target_frame = True - - else: - # Store the first matching prim path and the type of frame - body_names_to_frames[body_name] = { - "frames": {frame_name}, - "prim_path": matching_prim_path, - "type": frame_type, - } - - if offset is not None: - offset_pos = torch.tensor(offset.pos, device=self.device) - offset_quat = torch.tensor(offset.rot, device=self.device) - # Check if we need to apply offsets (optimized code path in _update_buffer_impl) - if not is_identity_pose(offset_pos, offset_quat): - non_identity_offset_frames.append(frame_name) - self._apply_target_frame_offset = True - target_offsets[frame_name] = {"pos": offset_pos, "quat": offset_quat} - - if not self._apply_target_frame_offset: - logger.info( - f"No offsets application needed from '{self.cfg.prim_path}' to target frames as all" - f" are identity: {frames[1:]}" - ) - else: - logger.info( - f"Offsets application needed from '{self.cfg.prim_path}' to the following target frames:" - f" {non_identity_offset_frames}" - ) - - # The names of bodies that RigidPrim will be tracking to later extract transforms from - tracked_prim_paths = [body_names_to_frames[body_name]["prim_path"] for body_name in body_names_to_frames.keys()] - tracked_body_names = [body_name for body_name in body_names_to_frames.keys()] - - body_names_regex = [tracked_prim_path.replace("env_0", "env_*") for tracked_prim_path in tracked_prim_paths] - - # obtain global simulation view - self._physics_sim_view = SimulationManager.get_physics_sim_view() - # Create a prim view for all frames and initialize it - # order of transforms coming out of view will be source frame followed by target frame(s) - self._frame_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_regex) - - # Determine the order in which regex evaluated body names so we can later index into frame transforms - # by frame name correctly - all_prim_paths = self._frame_physx_view.prim_paths - - if "env_" in all_prim_paths[0]: - - def extract_env_num_and_prim_path(item: str) -> tuple[int, str]: - """Separates the environment number and prim_path from the item. - - Args: - item: The item to extract the environment number from. Assumes item is of the form - `/World/envs/env_1/blah` or `/World/envs/env_11/blah`. - Returns: - The environment number and the prim_path. - """ - match = re.search(r"env_(\d+)(.*)", item) - return (int(match.group(1)), match.group(2)) - - # Find the indices that would reorganize output to be per environment. - # We want `env_1/blah` to come before `env_11/blah` and env_1/Robot/base - # to come before env_1/Robot/foot so we need to use custom key function - self._per_env_indices = [ - index - for index, _ in sorted( - list(enumerate(all_prim_paths)), key=lambda x: extract_env_num_and_prim_path(x[1]) - ) - ] - - # Only need 0th env as the names and their ordering are the same across environments - sorted_prim_paths = [ - all_prim_paths[index] for index in self._per_env_indices if "env_0" in all_prim_paths[index] - ] - - else: - # If no environment is present, then the order of the body names is the same as the order of the - # prim paths sorted alphabetically - self._per_env_indices = [index for index, _ in sorted(enumerate(all_prim_paths), key=lambda x: x[1])] - sorted_prim_paths = [all_prim_paths[index] for index in self._per_env_indices] - - # -- target frames: use relative prim path for unique identification - self._target_frame_body_names = [self._get_relative_body_path(prim_path) for prim_path in sorted_prim_paths] - - # -- source frame: use relative prim path for unique identification - self._source_frame_body_name = self._get_relative_body_path(self.cfg.prim_path) - source_frame_index = self._target_frame_body_names.index(self._source_frame_body_name) - - # Only remove source frame from tracked bodies if it is not also a target frame - if not self._source_is_also_target_frame: - self._target_frame_body_names.remove(self._source_frame_body_name) - - # Determine indices into all tracked body frames for both source and target frames - all_ids = torch.arange(self._num_envs * len(tracked_body_names)) - self._source_frame_body_ids = torch.arange(self._num_envs) * len(tracked_body_names) + source_frame_index - - # If source frame is also a target frame, then the target frame body ids are the same as - # the source frame body ids - if self._source_is_also_target_frame: - self._target_frame_body_ids = all_ids - else: - self._target_frame_body_ids = all_ids[~torch.isin(all_ids, self._source_frame_body_ids)] - - # The name of each of the target frame(s) - either user specified or defaulted to the body name - self._target_frame_names: list[str] = [] - # The position and rotation components of target frame offsets - target_frame_offset_pos = [] - target_frame_offset_quat = [] - # Stores the indices of bodies that need to be duplicated. For instance, if body "LF_SHANK" is needed - # for 2 frames, this list enables us to duplicate the body to both frames when doing the calculations - # when updating sensor in _update_buffers_impl - duplicate_frame_indices = [] - - # Go through each body name and determine the number of duplicates we need for that frame - # and extract the offsets. This is all done to handle the case where multiple frames - # reference the same body, but have different names and/or offsets - for i, body_name in enumerate(self._target_frame_body_names): - for frame in body_names_to_frames[body_name]["frames"]: - # Only need to handle target frames here as source frame is handled separately - if frame in target_offsets: - target_frame_offset_pos.append(target_offsets[frame]["pos"]) - target_frame_offset_quat.append(target_offsets[frame]["quat"]) - self._target_frame_names.append(frame) - duplicate_frame_indices.append(i) - - # To handle multiple environments, need to expand so [0, 1, 1, 2] with 2 environments becomes - # [0, 1, 1, 2, 3, 4, 4, 5]. Again, this is a optimization to make _update_buffer_impl more efficient - duplicate_frame_indices = torch.tensor(duplicate_frame_indices, device=self.device) - if self._source_is_also_target_frame: - num_target_body_frames = len(tracked_body_names) - else: - num_target_body_frames = len(tracked_body_names) - 1 - - self._duplicate_frame_indices = torch.cat( - [duplicate_frame_indices + num_target_body_frames * env_num for env_num in range(self._num_envs)] - ) - - # Target frame offsets are only applied if at least one of the offsets are non-identity - if self._apply_target_frame_offset: - # Stack up all the frame offsets for shape (num_envs, num_frames, 3) and (num_envs, num_frames, 4) - self._target_frame_offset_pos = torch.stack(target_frame_offset_pos).repeat(self._num_envs, 1) - self._target_frame_offset_quat = torch.stack(target_frame_offset_quat).repeat(self._num_envs, 1) - - # fill the data buffer - self._data.target_frame_names = self._target_frame_names - self._data.source_pos_w = torch.zeros(self._num_envs, 3, device=self._device) - self._data.source_quat_w = torch.zeros(self._num_envs, 4, device=self._device) - self._data.target_pos_w = torch.zeros(self._num_envs, len(duplicate_frame_indices), 3, device=self._device) - self._data.target_quat_w = torch.zeros(self._num_envs, len(duplicate_frame_indices), 4, device=self._device) - self._data.target_pos_source = torch.zeros_like(self._data.target_pos_w) - self._data.target_quat_source = torch.zeros_like(self._data.target_quat_w) - - def _update_buffers_impl(self, env_ids: Sequence[int]): - """Fills the buffers of the sensor data.""" - # default to all sensors - if len(env_ids) == self._num_envs: - env_ids = ... - - # Extract transforms from view - shape is: - # (the total number of source and target body frames being tracked * self._num_envs, 7) - transforms = self._frame_physx_view.get_transforms() - - # Reorder the transforms to be per environment as is expected of SensorData - transforms = transforms[self._per_env_indices] - - # Process source frame transform - source_frames = transforms[self._source_frame_body_ids] - # Only apply offset if the offsets will result in a coordinate frame transform - if self._apply_source_frame_offset: - source_pos_w, source_quat_w = combine_frame_transforms( - source_frames[:, :3], - source_frames[:, 3:], - self._source_frame_offset_pos, - self._source_frame_offset_quat, - ) - else: - source_pos_w = source_frames[:, :3] - source_quat_w = source_frames[:, 3:] - - # Process target frame transforms - target_frames = transforms[self._target_frame_body_ids] - duplicated_target_frame_pos_w = target_frames[self._duplicate_frame_indices, :3] - duplicated_target_frame_quat_w = target_frames[self._duplicate_frame_indices, 3:] - - # Only apply offset if the offsets will result in a coordinate frame transform - if self._apply_target_frame_offset: - target_pos_w, target_quat_w = combine_frame_transforms( - duplicated_target_frame_pos_w, - duplicated_target_frame_quat_w, - self._target_frame_offset_pos, - self._target_frame_offset_quat, - ) - else: - target_pos_w = duplicated_target_frame_pos_w - target_quat_w = duplicated_target_frame_quat_w - - # Compute the transform of the target frame with respect to the source frame - total_num_frames = len(self._target_frame_names) - target_pos_source, target_quat_source = subtract_frame_transforms( - source_pos_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 3), - source_quat_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 4), - target_pos_w, - target_quat_w, - ) - - # Update buffers - # note: The frame names / ordering don't change so no need to update them after initialization - self._data.source_pos_w[:] = source_pos_w.view(-1, 3) - self._data.source_quat_w[:] = source_quat_w.view(-1, 4) - self._data.target_pos_w[:] = target_pos_w.view(-1, total_num_frames, 3) - self._data.target_quat_w[:] = target_quat_w.view(-1, total_num_frames, 4) - self._data.target_pos_source[:] = target_pos_source.view(-1, total_num_frames, 3) - self._data.target_quat_source[:] = target_quat_source.view(-1, total_num_frames, 4) - - def _set_debug_vis_impl(self, debug_vis: bool): - # set visibility of markers - # note: parent only deals with callbacks. not their visibility - if debug_vis: - if not hasattr(self, "frame_visualizer"): - self.frame_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) - - # set their visibility to true - self.frame_visualizer.set_visibility(True) - else: - if hasattr(self, "frame_visualizer"): - self.frame_visualizer.set_visibility(False) - - def _debug_vis_callback(self, event): - # Get the all frames pose - frames_pos = torch.cat([self._data.source_pos_w, self._data.target_pos_w.view(-1, 3)], dim=0) - frames_quat = torch.cat([self._data.source_quat_w, self._data.target_quat_w.view(-1, 4)], dim=0) - - # Get the all connecting lines between frames pose - lines_pos, lines_quat, lines_length = self._get_connecting_lines( - start_pos=self._data.source_pos_w.repeat_interleave(self._data.target_pos_w.size(1), dim=0), - end_pos=self._data.target_pos_w.view(-1, 3), - ) - - # Initialize default (identity) scales and marker indices for all markers (frames + lines) - marker_scales = torch.ones(frames_pos.size(0) + lines_pos.size(0), 3) - marker_indices = torch.zeros(marker_scales.size(0)) - - # Set the z-scale of line markers to represent their actual length - marker_scales[-lines_length.size(0) :, -1] = lines_length - - # Assign marker config index 1 to line markers - marker_indices[-lines_length.size(0) :] = 1 - - # Update the frame and the connecting line visualizer - self.frame_visualizer.visualize( - translations=torch.cat((frames_pos, lines_pos), dim=0), - orientations=torch.cat((frames_quat, lines_quat), dim=0), - scales=marker_scales, - marker_indices=marker_indices, - ) - - """ - Internal simulation callbacks. - """ - - def _invalidate_initialize_callback(self, event): - """Invalidates the scene elements.""" - # call parent - super()._invalidate_initialize_callback(event) - # set all existing views to None to invalidate them - self._frame_physx_view = None - - """ - Internal helpers. - """ - - def _get_connecting_lines( - self, start_pos: torch.Tensor, end_pos: torch.Tensor - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """Draws connecting lines between frames. - - Given start and end points, this function computes the positions (mid-point), orientations, - and lengths of the connecting lines. - - Args: - start_pos: The start positions of the connecting lines. Shape is (N, 3). - end_pos: The end positions of the connecting lines. Shape is (N, 3). - - Returns: - A tuple containing: - - The positions of each connecting line. Shape is (N, 3). - - The orientations of each connecting line in quaternion. Shape is (N, 4). - - The lengths of each connecting line. Shape is (N,). - """ - direction = end_pos - start_pos - lengths = torch.norm(direction, dim=-1) - positions = (start_pos + end_pos) / 2 - - # Get default direction (along z-axis) - default_direction = torch.tensor([0.0, 0.0, 1.0], device=self.device).expand(start_pos.size(0), -1) - - # Normalize direction vector - direction_norm = normalize(direction) - - # Calculate rotation from default direction to target direction - rotation_axis = torch.linalg.cross(default_direction, direction_norm) - rotation_axis_norm = torch.norm(rotation_axis, dim=-1) - - # Handle case where vectors are parallel - mask = rotation_axis_norm > 1e-6 - rotation_axis = torch.where( - mask.unsqueeze(-1), - normalize(rotation_axis), - torch.tensor([1.0, 0.0, 0.0], device=self.device).expand(start_pos.size(0), -1), - ) - - # Calculate rotation angle - cos_angle = torch.sum(default_direction * direction_norm, dim=-1) - cos_angle = torch.clamp(cos_angle, -1.0, 1.0) - angle = torch.acos(cos_angle) - orientations = quat_from_angle_axis(angle, rotation_axis) - - return positions, orientations, lengths - - @staticmethod - def _get_relative_body_path(prim_path: str) -> str: - """Extract a normalized body path from a prim path. + from isaaclab_physx.sensors.frame_transformer import FrameTransformer as PhysXFrameTransformer + from isaaclab_physx.sensors.frame_transformer import FrameTransformerData as PhysXFrameTransformerData - Removes the environment instance segment `/envs/env_/` to normalize paths - across multiple environments, while preserving the `/envs/` prefix to - distinguish environment-scoped paths from non-environment paths. - Examples: - - '/World/envs/env_0/Robot/torso' -> '/World/envs/Robot/torso' - - '/World/envs/env_123/Robot/left_hand' -> '/World/envs/Robot/left_hand' - - '/World/Robot' -> '/World/Robot' - - '/World/Robot_2/left_hand' -> '/World/Robot_2/left_hand' +class FrameTransformer(FactoryBase, BaseFrameTransformer): + """Factory for creating frame transformer instances.""" - Args: - prim_path: The full prim path. + data: BaseFrameTransformerData | PhysXFrameTransformerData - Returns: - The prim path with `/envs/env_/` removed, preserving `/envs/`. - """ - pattern = re.compile(r"/envs/env_[^/]+/") - return pattern.sub("/envs/", prim_path) + def __new__(cls, *args, **kwargs) -> BaseFrameTransformer | PhysXFrameTransformer: + """Create a new instance of a frame transformer based on the backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_cfg.py b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_cfg.py index 04012624209..641bb386354 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_cfg.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_cfg.py @@ -71,6 +71,6 @@ class FrameCfg: visualizer_cfg: VisualizationMarkersCfg = FRAME_MARKER_CFG.replace(prim_path="/Visuals/FrameTransformer") """The configuration object for the visualization markers. Defaults to FRAME_MARKER_CFG. - Note: + .. note:: This attribute is only used when debug visualization is enabled. """ diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py index 449065021a3..8f202e15f40 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py @@ -3,55 +3,23 @@ # # SPDX-License-Identifier: BSD-3-Clause -from dataclasses import dataclass +"""Re-exports the base frame transformer data class for backwards compatibility.""" -import torch +from __future__ import annotations +from typing import TYPE_CHECKING -@dataclass -class FrameTransformerData: - """Data container for the frame transformer sensor.""" +from isaaclab.utils.backend_utils import FactoryBase - target_frame_names: list[str] = None - """Target frame names (this denotes the order in which that frame data is ordered). +from .base_frame_transformer_data import BaseFrameTransformerData - The frame names are resolved from the :attr:`FrameTransformerCfg.FrameCfg.name` field. - This does not necessarily follow the order in which the frames are defined in the config due to - the regex matching. - """ +if TYPE_CHECKING: + from isaaclab_physx.sensors.frame_transformer import FrameTransformerData as PhysXFrameTransformerData - target_pos_source: torch.Tensor = None - """Position of the target frame(s) relative to source frame. - Shape is (N, M, 3), where N is the number of environments, and M is the number of target frames. - """ +class FrameTransformerData(FactoryBase, BaseFrameTransformerData): + """Factory for creating frame transformer data instances.""" - target_quat_source: torch.Tensor = None - """Orientation of the target frame(s) relative to source frame quaternion (x, y, z, w). - - Shape is (N, M, 4), where N is the number of environments, and M is the number of target frames. - """ - - target_pos_w: torch.Tensor = None - """Position of the target frame(s) after offset (in world frame). - - Shape is (N, M, 3), where N is the number of environments, and M is the number of target frames. - """ - - target_quat_w: torch.Tensor = None - """Orientation of the target frame(s) after offset (in world frame) quaternion (x, y, z, w). - - Shape is (N, M, 4), where N is the number of environments, and M is the number of target frames. - """ - - source_pos_w: torch.Tensor = None - """Position of the source frame after offset (in world frame). - - Shape is (N, 3), where N is the number of environments. - """ - - source_quat_w: torch.Tensor = None - """Orientation of the source frame after offset (in world frame) quaternion (x, y, z, w). - - Shape is (N, 4), where N is the number of environments. - """ + def __new__(cls, *args, **kwargs) -> BaseFrameTransformerData | PhysXFrameTransformerData: + """Create a new instance of a frame transformer data based on the backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/sensors/imu/__init__.py b/source/isaaclab/isaaclab/sensors/imu/__init__.py index 7501e41cf49..2f372fade3a 100644 --- a/source/isaaclab/isaaclab/sensors/imu/__init__.py +++ b/source/isaaclab/isaaclab/sensors/imu/__init__.py @@ -7,6 +7,10 @@ Imu Sensor """ +from .base_imu import BaseImu +from .base_imu_data import BaseImuData from .imu import Imu from .imu_cfg import ImuCfg from .imu_data import ImuData + +__all__ = ["BaseImu", "BaseImuData", "Imu", "ImuCfg", "ImuData"] diff --git a/source/isaaclab/isaaclab/sensors/imu/base_imu.py b/source/isaaclab/isaaclab/sensors/imu/base_imu.py new file mode 100644 index 00000000000..d3d87eb4937 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/imu/base_imu.py @@ -0,0 +1,86 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from abc import abstractmethod +from collections.abc import Sequence +from typing import TYPE_CHECKING + +from ..sensor_base import SensorBase +from .base_imu_data import BaseImuData + +if TYPE_CHECKING: + from .imu_cfg import ImuCfg + + +class BaseImu(SensorBase): + """The Inertia Measurement Unit (IMU) sensor. + + The sensor can be attached to any prim path with a rigid ancestor in its tree and produces body-frame + linear acceleration and angular velocity, along with world-frame pose and body-frame linear and angular + accelerations/velocities. + + If the provided path is not a rigid body, the closest rigid-body ancestor is used for simulation queries. + The fixed transform from that ancestor to the target prim is computed once during initialization and + composed with the configured sensor offset. + + .. note:: + + We are computing the accelerations using numerical differentiation from the velocities. Consequently, the + IMU sensor accuracy depends on the chosen phsyx timestep. For a sufficient accuracy, we recommend to keep the + timestep at least as 200Hz. + + .. note:: + + The user can configure the sensor offset in the configuration file. The offset is applied relative to the + rigid source prim. If the target prim is not a rigid body, the offset is composed with the fixed transform + from the rigid ancestor to the target prim. The offset is applied in the body frame of the rigid source prim. + The offset is defined as a position vector and a quaternion rotation, which + are applied in the order: position, then rotation. The position is applied as a translation + in the body frame of the rigid source prim, and the rotation is applied as a rotation + in the body frame of the rigid source prim. + + """ + + cfg: ImuCfg + """The configuration parameters.""" + + __backend_name__: str = "base" + """The name of the backend for the IMU sensor.""" + + def __init__(self, cfg: ImuCfg): + """Initializes the Imu sensor. + + Args: + cfg: The configuration parameters. + """ + # initialize base class + super().__init__(cfg) + + """ + Properties + """ + + @property + @abstractmethod + def data(self) -> BaseImuData: + raise NotImplementedError + + """ + Implementation - Abstract methods to be implemented by backend-specific subclasses. + """ + + def _initialize_impl(self): + """Initializes the sensor handles and internal buffers. + + Subclasses should call ``super()._initialize_impl()`` first to initialize + the common sensor infrastructure from :class:`SensorBase`. + """ + super()._initialize_impl() + + @abstractmethod + def _update_buffers_impl(self, env_ids: Sequence[int]): + raise NotImplementedError diff --git a/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py b/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py new file mode 100644 index 00000000000..f929406d929 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py @@ -0,0 +1,68 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Base class for IMU sensor data containers.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod + +import torch + + +class BaseImuData(ABC): + """Data container for the Imu sensor. + + This base class defines the interface for IMU sensor data. Backend-specific + implementations should inherit from this class and provide the actual data storage. + """ + + @property + @abstractmethod + def pose_w(self) -> torch.Tensor: + """Pose of the sensor origin in world frame. Shape is (N, 7). Quaternion in xyzw order.""" + raise NotImplementedError + + @property + @abstractmethod + def pos_w(self) -> torch.Tensor: + """Position of the sensor origin in world frame. Shape is (N, 3).""" + raise NotImplementedError + + @property + @abstractmethod + def quat_w(self) -> torch.Tensor: + """Orientation of the sensor origin in quaternion (x, y, z, w) in world frame. Shape is (N, 4).""" + raise NotImplementedError + + @property + @abstractmethod + def projected_gravity_b(self) -> torch.Tensor: + """Gravity direction unit vector projected on the imu frame. Shape is (N, 3).""" + raise NotImplementedError + + @property + @abstractmethod + def lin_vel_b(self) -> torch.Tensor: + """IMU frame linear velocity relative to the world expressed in IMU frame. Shape is (N, 3).""" + raise NotImplementedError + + @property + @abstractmethod + def ang_vel_b(self) -> torch.Tensor: + """IMU frame angular velocity relative to the world expressed in IMU frame. Shape is (N, 3).""" + raise NotImplementedError + + @property + @abstractmethod + def lin_acc_b(self) -> torch.Tensor: + """IMU frame linear acceleration relative to the world expressed in IMU frame. Shape is (N, 3).""" + raise NotImplementedError + + @property + @abstractmethod + def ang_acc_b(self) -> torch.Tensor: + """IMU frame angular acceleration relative to the world expressed in IMU frame. Shape is (N, 3).""" + raise NotImplementedError diff --git a/source/isaaclab/isaaclab/sensors/imu/imu.py b/source/isaaclab/isaaclab/sensors/imu/imu.py index 2cfa68302a9..5fdc1c81453 100644 --- a/source/isaaclab/isaaclab/sensors/imu/imu.py +++ b/source/isaaclab/isaaclab/sensors/imu/imu.py @@ -5,294 +5,23 @@ from __future__ import annotations -from collections.abc import Sequence from typing import TYPE_CHECKING -import torch +from isaaclab.utils.backend_utils import FactoryBase -from isaacsim.core.simulation_manager import SimulationManager -from pxr import UsdGeom, UsdPhysics - -import isaaclab.sim as sim_utils -import isaaclab.utils.math as math_utils -from isaaclab.markers import VisualizationMarkers - -from ..sensor_base import SensorBase -from .imu_data import ImuData +from .base_imu import BaseImu +from .base_imu_data import BaseImuData if TYPE_CHECKING: - from .imu_cfg import ImuCfg - - -class Imu(SensorBase): - """The Inertia Measurement Unit (IMU) sensor. - - The sensor can be attached to any prim path with a rigid ancestor in its tree and produces body-frame - linear acceleration and angular velocity, along with world-frame pose and body-frame linear and angular - accelerations/velocities. - - If the provided path is not a rigid body, the closest rigid-body ancestor is used for simulation queries. - The fixed transform from that ancestor to the target prim is computed once during initialization and - composed with the configured sensor offset. - - .. note:: - - We are computing the accelerations using numerical differentiation from the velocities. Consequently, the - IMU sensor accuracy depends on the chosen phsyx timestep. For a sufficient accuracy, we recommend to keep the - timestep at least as 200Hz. - - .. note:: - - The user can configure the sensor offset in the configuration file. The offset is applied relative to the - rigid source prim. If the target prim is not a rigid body, the offset is composed with the fixed transform - from the rigid ancestor to the target prim. The offset is applied in the body frame of the rigid source prim. - The offset is defined as a position vector and a quaternion rotation, which - are applied in the order: position, then rotation. The position is applied as a translation - in the body frame of the rigid source prim, and the rotation is applied as a rotation - in the body frame of the rigid source prim. - - """ - - cfg: ImuCfg - """The configuration parameters.""" - - def __init__(self, cfg: ImuCfg): - """Initializes the Imu sensor. - - Args: - cfg: The configuration parameters. - """ - # initialize base class - super().__init__(cfg) - # Create empty variables for storing output data - self._data = ImuData() - - # Internal: expression used to build the rigid body view (may be different from cfg.prim_path) - self._rigid_parent_expr: str | None = None - - def __str__(self) -> str: - """Returns: A string containing information about the instance.""" - return ( - f"Imu sensor @ '{self.cfg.prim_path}': \n" - f"\tview type : {self._view.__class__}\n" - f"\tupdate period (s) : {self.cfg.update_period}\n" - f"\tnumber of sensors : {self._view.count}\n" - ) - - """ - Properties - """ - - @property - def data(self) -> ImuData: - # update sensors if needed - self._update_outdated_buffers() - # return the data - return self._data - - @property - def num_instances(self) -> int: - return self._view.count - - """ - Operations - """ - - def reset(self, env_ids: Sequence[int] | None = None): - # reset the timestamps - super().reset(env_ids) - # resolve None - if env_ids is None: - env_ids = slice(None) - # reset accumulative data buffers - self._data.pos_w[env_ids] = 0.0 - self._data.quat_w[env_ids] = 0.0 - self._data.quat_w[env_ids, 3] = 1.0 - self._data.projected_gravity_b[env_ids] = 0.0 - self._data.projected_gravity_b[env_ids, 2] = -1.0 - self._data.lin_vel_b[env_ids] = 0.0 - self._data.ang_vel_b[env_ids] = 0.0 - self._data.lin_acc_b[env_ids] = 0.0 - self._data.ang_acc_b[env_ids] = 0.0 - self._prev_lin_vel_w[env_ids] = 0.0 - self._prev_ang_vel_w[env_ids] = 0.0 - - def update(self, dt: float, force_recompute: bool = False): - # save timestamp - self._dt = dt - # execute updating - super().update(dt, force_recompute) - - """ - Implementation. - """ - - def _initialize_impl(self): - """Initializes the sensor handles and internal buffers. - - - If the target prim path is a rigid body, build the view directly on it. - - Otherwise find the closest rigid-body ancestor, cache the fixed transform from that ancestor - to the target prim, and build the view on the ancestor expression. - """ - # Initialize parent class - super()._initialize_impl() - # obtain global simulation view - self._physics_sim_view = SimulationManager.get_physics_sim_view() - # check if the prim at path is a rigid prim - prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) - if prim is None: - raise RuntimeError(f"Failed to find a prim at path expression: {self.cfg.prim_path}") - - # Find the first matching ancestor prim that implements rigid body API - ancestor_prim = sim_utils.get_first_matching_ancestor_prim( - prim.GetPath(), predicate=lambda _prim: _prim.HasAPI(UsdPhysics.RigidBodyAPI) - ) - if ancestor_prim is None: - raise RuntimeError(f"Failed to find a rigid body ancestor prim at path expression: {self.cfg.prim_path}") - # Convert ancestor prim path to expression - if ancestor_prim == prim: - self._rigid_parent_expr = self.cfg.prim_path - fixed_pos_b, fixed_quat_b = None, None - else: - # Convert ancestor prim path to expression - relative_path = prim.GetPath().MakeRelativePath(ancestor_prim.GetPath()).pathString - self._rigid_parent_expr = self.cfg.prim_path.replace(relative_path, "") - # Resolve the relative pose between the target prim and the ancestor prim - fixed_pos_b, fixed_quat_b = sim_utils.resolve_prim_pose(prim, ancestor_prim) - - # Create the rigid body view on the ancestor - self._view = self._physics_sim_view.create_rigid_body_view(self._rigid_parent_expr.replace(".*", "*")) - - # Get world gravity - gravity = self._physics_sim_view.get_gravity() - gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) - gravity_dir = math_utils.normalize(gravity_dir.unsqueeze(0)).squeeze(0) - self.GRAVITY_VEC_W = gravity_dir.repeat(self.num_instances, 1) - - # Create internal buffers - self._initialize_buffers_impl() - - # Compose the configured offset with the fixed ancestor->target transform (done once) - # new_offset = fixed * cfg.offset - # where composition is: p = p_fixed + R_fixed * p_cfg, q = q_fixed * q_cfg - if fixed_pos_b is not None and fixed_quat_b is not None: - # Broadcast fixed transform across instances - fixed_p = torch.tensor(fixed_pos_b, device=self._device).repeat(self._view.count, 1) - fixed_q = torch.tensor(fixed_quat_b, device=self._device).repeat(self._view.count, 1) - - cfg_p = self._offset_pos_b.clone() - cfg_q = self._offset_quat_b.clone() - - composed_p = fixed_p + math_utils.quat_apply(fixed_q, cfg_p) - composed_q = math_utils.quat_mul(fixed_q, cfg_q) - - self._offset_pos_b = composed_p - self._offset_quat_b = composed_q - - def _update_buffers_impl(self, env_ids: Sequence[int]): - """Fills the buffers of the sensor data.""" - - # default to all sensors - if len(env_ids) == self._num_envs: - env_ids = slice(None) - # world pose of the rigid source (ancestor) from the PhysX view - pos_w, quat_w = self._view.get_transforms()[env_ids].split([3, 4], dim=-1) - - # sensor pose in world: apply composed offset - self._data.pos_w[env_ids] = pos_w + math_utils.quat_apply(quat_w, self._offset_pos_b[env_ids]) - self._data.quat_w[env_ids] = math_utils.quat_mul(quat_w, self._offset_quat_b[env_ids]) - - # COM of rigid source (body frame) - com_pos_b = self._view.get_coms().to(self.device).split([3, 4], dim=-1)[0] - - # Velocities at rigid source COM - lin_vel_w, ang_vel_w = self._view.get_velocities()[env_ids].split([3, 3], dim=-1) - - # If sensor offset or COM != link origin, account for angular velocity contribution - lin_vel_w += torch.linalg.cross( - ang_vel_w, math_utils.quat_apply(quat_w, self._offset_pos_b[env_ids] - com_pos_b[env_ids]), dim=-1 - ) - - # numerical derivative (world frame) - lin_acc_w = (lin_vel_w - self._prev_lin_vel_w[env_ids]) / self._dt + self._gravity_bias_w[env_ids] - ang_acc_w = (ang_vel_w - self._prev_ang_vel_w[env_ids]) / self._dt - - # batch rotate world->body using current sensor orientation - dynamics_data = torch.stack((lin_vel_w, ang_vel_w, lin_acc_w, ang_acc_w, self.GRAVITY_VEC_W[env_ids]), dim=0) - dynamics_data_rot = math_utils.quat_apply_inverse(self._data.quat_w[env_ids].repeat(5, 1), dynamics_data).chunk( - 5, dim=0 - ) - # store the velocities. - self._data.lin_vel_b[env_ids] = dynamics_data_rot[0] - self._data.ang_vel_b[env_ids] = dynamics_data_rot[1] - # store the accelerations - self._data.lin_acc_b[env_ids] = dynamics_data_rot[2] - self._data.ang_acc_b[env_ids] = dynamics_data_rot[3] - # store projected gravity - self._data.projected_gravity_b[env_ids] = dynamics_data_rot[4] - - self._prev_lin_vel_w[env_ids] = lin_vel_w - self._prev_ang_vel_w[env_ids] = ang_vel_w + from isaaclab_physx.sensors.imu import Imu as PhysXImu + from isaaclab_physx.sensors.imu import ImuData as PhysXImuData - def _initialize_buffers_impl(self): - """Create buffers for storing data.""" - # data buffers - self._data.pos_w = torch.zeros(self._view.count, 3, device=self._device) - self._data.quat_w = torch.zeros(self._view.count, 4, device=self._device) - self._data.quat_w[:, 3] = 1.0 - self._data.projected_gravity_b = torch.zeros(self._view.count, 3, device=self._device) - self._data.lin_vel_b = torch.zeros_like(self._data.pos_w) - self._data.ang_vel_b = torch.zeros_like(self._data.pos_w) - self._data.lin_acc_b = torch.zeros_like(self._data.pos_w) - self._data.ang_acc_b = torch.zeros_like(self._data.pos_w) - self._prev_lin_vel_w = torch.zeros_like(self._data.pos_w) - self._prev_ang_vel_w = torch.zeros_like(self._data.pos_w) - # store sensor offset (applied relative to rigid source). - # This may be composed later with a fixed ancestor->target transform. - self._offset_pos_b = torch.tensor(list(self.cfg.offset.pos), device=self._device).repeat(self._view.count, 1) - self._offset_quat_b = torch.tensor(list(self.cfg.offset.rot), device=self._device).repeat(self._view.count, 1) - # set gravity bias - self._gravity_bias_w = torch.tensor(list(self.cfg.gravity_bias), device=self._device).repeat( - self._view.count, 1 - ) +class Imu(FactoryBase, BaseImu): + """Factory for creating IMU sensor instances.""" - def _set_debug_vis_impl(self, debug_vis: bool): - # set visibility of markers - # note: parent only deals with callbacks. not their visibility - if debug_vis: - # create markers if necessary for the first time - if not hasattr(self, "acceleration_visualizer"): - self.acceleration_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) - # set their visibility to true - self.acceleration_visualizer.set_visibility(True) - else: - if hasattr(self, "acceleration_visualizer"): - self.acceleration_visualizer.set_visibility(False) + data: BaseImuData | PhysXImuData - def _debug_vis_callback(self, event): - # safely return if view becomes invalid - # note: this invalidity happens because of isaac sim view callbacks - if self._view is None: - return - # get marker location - # -- base state - base_pos_w = self._data.pos_w.clone() - base_pos_w[:, 2] += 0.5 - # -- resolve the scales - default_scale = self.acceleration_visualizer.cfg.markers["arrow"].scale - arrow_scale = torch.tensor(default_scale, device=self.device).repeat(self._data.lin_acc_b.shape[0], 1) - # get up axis of current stage - up_axis = UsdGeom.GetStageUpAxis(self.stage) - # arrow-direction - quat_opengl = math_utils.quat_from_matrix( - math_utils.create_rotation_matrix_from_view( - self._data.pos_w, - self._data.pos_w + math_utils.quat_apply(self._data.quat_w, self._data.lin_acc_b), - up_axis=up_axis, - device=self._device, - ) - ) - quat_w = math_utils.convert_camera_frame_orientation_convention(quat_opengl, "opengl", "world") - # display markers - self.acceleration_visualizer.visualize(base_pos_w, quat_w, arrow_scale) + def __new__(cls, *args, **kwargs) -> BaseImu | PhysXImu: + """Create a new instance of an IMU sensor based on the backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/sensors/imu/imu_data.py b/source/isaaclab/isaaclab/sensors/imu/imu_data.py index c5b776d050f..6f6ed268ad5 100644 --- a/source/isaaclab/isaaclab/sensors/imu/imu_data.py +++ b/source/isaaclab/isaaclab/sensors/imu/imu_data.py @@ -3,55 +3,23 @@ # # SPDX-License-Identifier: BSD-3-Clause -from __future__ import annotations - -from dataclasses import dataclass - -import torch - - -@dataclass -class ImuData: - """Data container for the Imu sensor.""" - - pos_w: torch.Tensor = None - """Position of the sensor origin in world frame. +"""Re-exports the base IMU data class for backwards compatibility.""" - Shape is (N, 3), where ``N`` is the number of environments. - """ - - quat_w: torch.Tensor = None - """Orientation of the sensor origin in quaternion ``(x, y, z, w)`` in world frame. - - Shape is (N, 4), where ``N`` is the number of environments. - """ - - projected_gravity_b: torch.Tensor = None - """Gravity direction unit vector projected on the imu frame. - - Shape is (N,3), where ``N`` is the number of environments. - """ - - lin_vel_b: torch.Tensor = None - """IMU frame angular velocity relative to the world expressed in IMU frame. +from __future__ import annotations - Shape is (N, 3), where ``N`` is the number of environments. - """ +from typing import TYPE_CHECKING - ang_vel_b: torch.Tensor = None - """IMU frame angular velocity relative to the world expressed in IMU frame. +from isaaclab.utils.backend_utils import FactoryBase - Shape is (N, 3), where ``N`` is the number of environments. - """ +from .base_imu_data import BaseImuData - lin_acc_b: torch.Tensor = None - """IMU frame linear acceleration relative to the world expressed in IMU frame. +if TYPE_CHECKING: + from isaaclab_physx.sensors.imu import ImuData as PhysXImuData - Shape is (N, 3), where ``N`` is the number of environments. - """ - ang_acc_b: torch.Tensor = None - """IMU frame angular acceleration relative to the world expressed in IMU frame. +class ImuData(FactoryBase, BaseImuData): + """Factory for creating IMU data instances.""" - Shape is (N, 3), where ``N`` is the number of environments. - """ + def __new__(cls, *args, **kwargs) -> BaseImuData | PhysXImuData: + """Create a new instance of an IMU data based on the backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/patterns/patterns_cfg.py b/source/isaaclab/isaaclab/sensors/ray_caster/patterns/patterns_cfg.py index f50ba272b70..aa946ffac7b 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/patterns/patterns_cfg.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/patterns/patterns_cfg.py @@ -92,7 +92,7 @@ class PinholeCameraPatternCfg(PatternBaseCfg): Emulates sensor/film width on a camera. - Note: + .. note:: The default value is the horizontal aperture of a 35 mm spherical projector. """ vertical_aperture: float | None = None @@ -194,7 +194,7 @@ class BpearlPatternCfg(PatternBaseCfg): # fmt: on """Vertical ray angles (in degrees). Defaults to a list of 32 angles. - Note: + .. note:: We manually set the vertical ray angles to match the Bpearl sensor. The ray-angles are not evenly spaced. """ diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_cfg.py b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_cfg.py index 5d57bbcd066..ff6872f5970 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_cfg.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_cfg.py @@ -35,7 +35,7 @@ class OffsetCfg: mesh_prim_paths: list[str] = MISSING """The list of mesh primitive paths to ray cast against. - Note: + .. note:: Currently, only a single static mesh is supported. We are working on supporting multiple static meshes and dynamic meshes. """ @@ -93,6 +93,6 @@ class OffsetCfg: visualizer_cfg: VisualizationMarkersCfg = RAY_CASTER_MARKER_CFG.replace(prim_path="/Visuals/RayCaster") """The configuration object for the visualization markers. Defaults to RAY_CASTER_MARKER_CFG. - Note: + .. note:: This attribute is only used when debug visualization is enabled. """ diff --git a/source/isaaclab/isaaclab/sensors/sensor_base.py b/source/isaaclab/isaaclab/sensors/sensor_base.py index 4ece160bbe5..59c8581463b 100644 --- a/source/isaaclab/isaaclab/sensors/sensor_base.py +++ b/source/isaaclab/isaaclab/sensors/sensor_base.py @@ -294,7 +294,7 @@ def safe_callback(callback_name, event, obj_ref): def _initialize_callback(self, event): """Initializes the scene elements. - Note: + .. note:: PhysX handles are only enabled once the simulator starts playing. Hence, this function needs to be called whenever the simulator "plays" from a "stop" state. """ @@ -319,7 +319,7 @@ def _on_prim_deletion(self, prim_path: str) -> None: Args: prim_path: The path to the prim that is being deleted. - Note: + .. note:: This function is called when the prim is deleted. """ if prim_path == "/": diff --git a/source/isaaclab_physx/config/extension.toml b/source/isaaclab_physx/config/extension.toml index 7effa8970c0..2ff0f9bff5a 100644 --- a/source/isaaclab_physx/config/extension.toml +++ b/source/isaaclab_physx/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "0.1.0" +version = "1.0.0" # Description title = "PhysX simulation interfaces for IsaacLab core package" diff --git a/source/isaaclab_physx/docs/CHANGELOG.rst b/source/isaaclab_physx/docs/CHANGELOG.rst index 39ed35abf14..4b26e1edc9c 100644 --- a/source/isaaclab_physx/docs/CHANGELOG.rst +++ b/source/isaaclab_physx/docs/CHANGELOG.rst @@ -1,6 +1,46 @@ Changelog --------- +1.0.0 (2026-01-28) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :mod:`isaaclab_physx.sensors` module containing PhysX-specific sensor implementations: + + * :class:`~isaaclab_physx.sensors.ContactSensor` and :class:`~isaaclab_physx.sensors.ContactSensorData`: + PhysX-specific implementation for contact force sensing. Extends + :class:`~isaaclab.sensors.contact_sensor.BaseContactSensor` with PhysX tensor API for contact + force queries, contact filtering, and contact point tracking. + + * :class:`~isaaclab_physx.sensors.Imu` and :class:`~isaaclab_physx.sensors.ImuData`: + PhysX-specific implementation for inertial measurement unit simulation. Extends + :class:`~isaaclab.sensors.imu.BaseImu` with PhysX rigid body views for velocity and + acceleration computation. + + * :class:`~isaaclab_physx.sensors.FrameTransformer` and :class:`~isaaclab_physx.sensors.FrameTransformerData`: + PhysX-specific implementation for coordinate frame transformations. Extends + :class:`~isaaclab.sensors.frame_transformer.BaseFrameTransformer` with PhysX rigid body views + for efficient frame pose queries. + +* Added PhysX-specific sensor tests moved from ``isaaclab/test/sensors/``: + + * ``test_contact_sensor.py`` + * ``test_imu.py`` + * ``test_frame_transformer.py`` + * ``check_contact_sensor.py`` + * ``check_imu_sensor.py`` + +Deprecated +^^^^^^^^^^ + +* Deprecated the ``pose_w``, ``pos_w``, and ``quat_w`` properties on + :class:`~isaaclab_physx.sensors.ContactSensorData` and :class:`~isaaclab_physx.sensors.ImuData`. + These properties will be removed in a future release. Please use a dedicated sensor (e.g., + :class:`~isaaclab.sensors.FrameTransformer`) to measure sensor poses in world frame. + + 0.1.0 (2026-01-28) ~~~~~~~~~~~~~~~~~~~ diff --git a/source/isaaclab_physx/isaaclab_physx/__init__.py b/source/isaaclab_physx/isaaclab_physx/__init__.py index 68a8a553287..69bbf740a59 100644 --- a/source/isaaclab_physx/isaaclab_physx/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/__init__.py @@ -17,3 +17,6 @@ # Configure the module-level variables __version__ = ISAACLAB_PHYSX_METADATA["package"]["version"] + +# Import sensors module for auto-registration with factory +from . import sensors # noqa: F401, E402 diff --git a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py index f1264f1c22b..b07c5156cad 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py @@ -92,7 +92,7 @@ def num_bodies(self) -> int: def root_view(self) -> physx.SoftBodyView: """Deformable body view for the asset. - Note: + .. note:: Use this view with caution. It requires handling of tensors in a specific way. """ return self._root_physx_view @@ -112,7 +112,7 @@ def material_physx_view(self) -> physx.SoftBodyMaterialView | None: This view is optional and may not be available if the material is not bound to the deformable body. If the material is not available, then the material properties will be set to default values. - Note: + .. note:: Use this view with caution. It requires handling of tensors in a specific way. """ return self._material_physx_view @@ -223,7 +223,7 @@ def write_nodal_kinematic_target_to_sim(self, targets: torch.Tensor, env_ids: Se and a flag indicating whether the node is kinematically driven or not. The positions are in the simulation frame. - Note: + .. note:: The flag is set to 0.0 for kinematically driven nodes and 1.0 for free nodes. Args: diff --git a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_cfg.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_cfg.py index d917aef810c..280577baefb 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_cfg.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_cfg.py @@ -24,6 +24,6 @@ class DeformableObjectCfg(AssetBaseCfg): ) """The configuration object for the visualization markers. Defaults to DEFORMABLE_TARGET_MARKER_CFG. - Note: + .. note:: This attribute is only used when debug visualization is enabled. """ diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py index 2b529363f7c..fd6c5befeb3 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py @@ -96,7 +96,7 @@ def body_names(self) -> list[str]: def root_view(self): """Root view for the asset. - Note: + .. note:: Use this view with caution. It requires handling of tensors in a specific way. """ return self._root_view @@ -142,7 +142,7 @@ def reset(self, env_ids: Sequence[int] | None = None): def write_data_to_sim(self) -> None: """Write external wrench to the simulation. - Note: + .. note:: We write external wrench to the simulation here since this function is called before the simulation step. This ensures that the external wrench is applied at every simulation step. """ diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py index 6cffc4694a3..acc0db866f1 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py @@ -128,7 +128,7 @@ def body_names(self) -> list[str]: def root_view(self): """Root view for the rigid object collection. - Note: + .. note:: Use this view with caution. It requires handling of tensors in a specific way. """ return self._root_view @@ -166,7 +166,7 @@ def reset(self, env_ids: torch.Tensor | None = None, object_ids: slice | torch.T def write_data_to_sim(self) -> None: """Write external wrench to the simulation. - Note: + .. note:: We write external wrench to the simulation here since this function is called before the simulation step. This ensures that the external wrench is applied at every simulation step. """ @@ -826,7 +826,7 @@ def _on_prim_deletion(self, prim_path: str) -> None: Args: prim_path: The path to the prim that is being deleted. - Note: + .. note:: This function is called when the prim is deleted. """ if prim_path == "/": diff --git a/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py index fcf02950285..721d182817c 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py @@ -40,12 +40,12 @@ class SurfaceGripper(AssetBase): properties of the grippers at runtime. Finally, the :func:`set_grippers_command` function should be used to set the desired command for the grippers. - Note: + .. note:: The :func:`set_grippers_command` function does not write to the simulation. The simulation automatically calls :func:`write_data_to_sim` function to write the command to the simulation. Similarly, the update function is called automatically for every simulation step, and does not need to be called by the user. - Note: + .. note:: The SurfaceGripper is only supported on CPU for now. Please set the simulation backend to run on CPU. Use `--device cpu` to run the simulation on CPU. """ @@ -176,7 +176,7 @@ def update(self, dt: float) -> None: - "Closing" --> 0.0 - "Closed" --> 1.0 - Note: + .. note:: We need to do this conversion for every single step of the simulation because the gripper can lose contact with the object if some conditions are met: such as if a large force is applied to the gripped object. """ @@ -251,7 +251,7 @@ def _initialize_impl(self) -> None: ValueError: If the simulation backend is not CPU. RuntimeError: If the Simulation Context is not initialized or if gripper prims are not found. - Note: + .. note:: The SurfaceGripper is only supported on CPU for now. Please set the simulation backend to run on CPU. Use `--device cpu` to run the simulation on CPU. """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/__init__.py new file mode 100644 index 00000000000..dde43d8ce3f --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-package containing PhysX-specific sensor implementations.""" + +from .contact_sensor import ContactSensor, ContactSensorData +from .frame_transformer import FrameTransformer, FrameTransformerData +from .imu import Imu, ImuData + +__all__ = [ + "ContactSensor", + "ContactSensorData", + "FrameTransformer", + "FrameTransformerData", + "Imu", + "ImuData", +] diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py new file mode 100644 index 00000000000..e2f5f2b7772 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for PhysX rigid contact sensor.""" + +from .contact_sensor import ContactSensor +from .contact_sensor_data import ContactSensorData + +__all__ = ["ContactSensor", "ContactSensorData"] diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py new file mode 100644 index 00000000000..815d931f8cd --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py @@ -0,0 +1,421 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Ignore optional memory usage warning globally +# pyright: reportOptionalSubscript=false + +from __future__ import annotations + +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch + +import carb +import omni.physics.tensors.impl.api as physx +from isaacsim.core.simulation_manager import SimulationManager +from pxr import PhysxSchema + +import isaaclab.sim as sim_utils +from isaaclab.markers import VisualizationMarkers +from isaaclab.sensors.contact_sensor import BaseContactSensor + +from .contact_sensor_data import ContactSensorData + +if TYPE_CHECKING: + from isaaclab.sensors.contact_sensor import ContactSensorCfg + + +class ContactSensor(BaseContactSensor): + """A PhysX contact reporting sensor. + + The contact sensor reports the normal contact forces on a rigid body in the world frame. + It relies on the `PhysX ContactReporter`_ API to be activated on the rigid bodies. + + To enable the contact reporter on a rigid body, please make sure to enable the + :attr:`isaaclab.sim.spawner.RigidObjectSpawnerCfg.activate_contact_sensors` on your + asset spawner configuration. This will enable the contact reporter on all the rigid bodies + in the asset. + + The sensor can be configured to report the contact forces on a set of bodies with a given + filter pattern using the :attr:`ContactSensorCfg.filter_prim_paths_expr`. This is useful + when you want to report the contact forces between the sensor bodies and a specific set of + bodies in the scene. The data can be accessed using the :attr:`ContactSensorData.force_matrix_w`. + Please check the documentation on `RigidContact`_ for more details. + + The reporting of the filtered contact forces is only possible as one-to-many. This means that only one + sensor body in an environment can be filtered against multiple bodies in that environment. If you need to + filter multiple sensor bodies against multiple bodies, you need to create separate sensors for each sensor + body. + + As an example, suppose you want to report the contact forces for all the feet of a robot against an object + exclusively. In that case, setting the :attr:`ContactSensorCfg.prim_path` and + :attr:`ContactSensorCfg.filter_prim_paths_expr` with ``{ENV_REGEX_NS}/Robot/.*_FOOT`` and ``{ENV_REGEX_NS}/Object`` + respectively will not work. Instead, you need to create a separate sensor for each foot and filter + it against the object. + + .. _PhysX ContactReporter: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/104.2/class_physx_schema_physx_contact_report_a_p_i.html + .. _RigidContact: https://docs.isaacsim.omniverse.nvidia.com/latest/py/source/extensions/isaacsim.core.api/docs/index.html#isaacsim.core.api.sensors.RigidContactView + """ + + cfg: ContactSensorCfg + """The configuration parameters.""" + + __backend_name__: str = "physx" + """The name of the backend for the contact sensor.""" + + def __init__(self, cfg: ContactSensorCfg): + """Initializes the contact sensor object. + + Args: + cfg: The configuration parameters. + """ + # initialize base class + super().__init__(cfg) + + # Enable contact processing + carb_settings_iface = carb.settings.get_settings() + carb_settings_iface.set_bool("/physics/disableContactProcessing", False) + + # Create empty variables for storing output data + self._data: ContactSensorData = ContactSensorData() + # initialize self._body_physx_view for running in extension mode + self._body_physx_view = None + + def __str__(self) -> str: + """Returns: A string containing information about the instance.""" + return ( + f"Contact sensor @ '{self.cfg.prim_path}': \n" + f"\tview type : {self.body_physx_view.__class__}\n" + f"\tupdate period (s) : {self.cfg.update_period}\n" + f"\tnumber of bodies : {self.num_bodies}\n" + f"\tbody names : {self.body_names}\n" + ) + + """ + Properties + """ + + @property + def num_instances(self) -> int: + return self.body_physx_view.count + + @property + def data(self) -> ContactSensorData: + # update sensors if needed + self._update_outdated_buffers() + # return the data + return self._data + + @property + def num_bodies(self) -> int: + """Number of bodies with contact sensors attached.""" + return self._num_bodies + + @property + def body_names(self) -> list[str]: + """Ordered names of bodies with contact sensors attached.""" + prim_paths = self.body_physx_view.prim_paths[: self.num_bodies] + return [path.split("/")[-1] for path in prim_paths] + + @property + def body_physx_view(self) -> physx.RigidBodyView: + """View for the rigid bodies captured (PhysX). + + .. note:: + Use this view with caution. It requires handling of tensors in a specific way. + """ + return self._body_physx_view + + @property + def contact_view(self) -> physx.RigidContactView: + """Contact reporter view for the bodies (PhysX). + + .. note:: + Use this view with caution. It requires handling of tensors in a specific way. + """ + return self._contact_view + + """ + Operations + """ + + def reset(self, env_ids: Sequence[int] | None = None): + # reset the timers and counters + super().reset(env_ids) + # resolve None + if env_ids is None: + env_ids = slice(None) + # reset accumulative data buffers + self._data.net_forces_w[env_ids] = 0.0 + self._data.net_forces_w_history[env_ids] = 0.0 + # reset force matrix + if len(self.cfg.filter_prim_paths_expr) != 0: + self._data.force_matrix_w[env_ids] = 0.0 + self._data.force_matrix_w_history[env_ids] = 0.0 + # reset the current air time + if self.cfg.track_air_time: + self._data.current_air_time[env_ids] = 0.0 + self._data.last_air_time[env_ids] = 0.0 + self._data.current_contact_time[env_ids] = 0.0 + self._data.last_contact_time[env_ids] = 0.0 + # reset contact positions + if self.cfg.track_contact_points: + self._data.contact_pos_w[env_ids, :] = torch.nan + # reset friction forces + if self.cfg.track_friction_forces: + self._data.friction_forces_w[env_ids, :] = 0.0 + + """ + Implementation. + """ + + def _initialize_impl(self): + super()._initialize_impl() + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + # check that only rigid bodies are selected + leaf_pattern = self.cfg.prim_path.rsplit("/", 1)[-1] + template_prim_path = self._parent_prims[0].GetPath().pathString + body_names = list() + for prim in sim_utils.find_matching_prims(template_prim_path + "/" + leaf_pattern): + # check if prim has contact reporter API + if prim.HasAPI(PhysxSchema.PhysxContactReportAPI): + prim_path = prim.GetPath().pathString + body_names.append(prim_path.rsplit("/", 1)[-1]) + # check that there is at least one body with contact reporter API + if not body_names: + raise RuntimeError( + f"Sensor at path '{self.cfg.prim_path}' could not find any bodies with contact reporter API." + "\nHINT: Make sure to enable 'activate_contact_sensors' in the corresponding asset spawn configuration." + ) + + # construct regex expression for the body names + body_names_regex = r"(" + "|".join(body_names) + r")" + body_names_regex = f"{self.cfg.prim_path.rsplit('/', 1)[0]}/{body_names_regex}" + # convert regex expressions to glob expressions for PhysX + body_names_glob = body_names_regex.replace(".*", "*") + filter_prim_paths_glob = [expr.replace(".*", "*") for expr in self.cfg.filter_prim_paths_expr] + + # create a rigid prim view for the sensor + self._body_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_glob) + self._contact_view = self._physics_sim_view.create_rigid_contact_view( + body_names_glob, + filter_patterns=filter_prim_paths_glob, + max_contact_data_count=self.cfg.max_contact_data_count_per_prim * len(body_names) * self._num_envs, + ) + # resolve the true count of bodies + self._num_bodies = self.body_physx_view.count // self._num_envs + # check that contact reporter succeeded + if self._num_bodies != len(body_names): + raise RuntimeError( + "Failed to initialize contact reporter for specified bodies." + f"\n\tInput prim path : {self.cfg.prim_path}" + f"\n\tResolved prim paths: {body_names_regex}" + ) + + # check if filter paths are valid + if self.cfg.track_contact_points or self.cfg.track_friction_forces: + if len(self.cfg.filter_prim_paths_expr) == 0: + raise ValueError( + "The 'filter_prim_paths_expr' is empty. Please specify a valid filter pattern to track" + f" {'contact points' if self.cfg.track_contact_points else 'friction forces'}." + ) + if self.cfg.max_contact_data_count_per_prim < 1: + raise ValueError( + f"The 'max_contact_data_count_per_prim' is {self.cfg.max_contact_data_count_per_prim}. " + "Please set it to a value greater than 0 to track" + f" {'contact points' if self.cfg.track_contact_points else 'friction forces'}." + ) + + # prepare data buffers + num_filter_shapes = self.contact_view.filter_count if len(self.cfg.filter_prim_paths_expr) != 0 else 0 + self._data.create_buffers( + num_envs=self._num_envs, + num_bodies=self._num_bodies, + num_filter_shapes=num_filter_shapes, + history_length=self.cfg.history_length, + track_pose=self.cfg.track_pose, + track_air_time=self.cfg.track_air_time, + track_contact_points=self.cfg.track_contact_points, + track_friction_forces=self.cfg.track_friction_forces, + device=self._device, + ) + + def _update_buffers_impl(self, env_ids: Sequence[int]): + """Fills the buffers of the sensor data.""" + # default to all sensors + if len(env_ids) == self._num_envs: + env_ids = slice(None) + + # obtain the contact forces + # TODO: We are handling the indexing ourself because of the shape; (N, B) vs expected (N * B). + # This isn't the most efficient way to do this, but it's the easiest to implement. + net_forces_w = self.contact_view.get_net_contact_forces(dt=self._sim_physics_dt) + self._data.net_forces_w[env_ids, :, :] = net_forces_w.view(-1, self._num_bodies, 3)[env_ids] + # update contact force history + if self.cfg.history_length > 0: + self._data.net_forces_w_history[env_ids] = self._data.net_forces_w_history[env_ids].roll(1, dims=1) + self._data.net_forces_w_history[env_ids, 0] = self._data.net_forces_w[env_ids] + + # obtain the contact force matrix + if len(self.cfg.filter_prim_paths_expr) != 0: + # shape of the filtering matrix: (num_envs, num_bodies, num_filter_shapes, 3) + num_filters = self.contact_view.filter_count + # acquire and shape the force matrix + force_matrix_w = self.contact_view.get_contact_force_matrix(dt=self._sim_physics_dt) + force_matrix_w = force_matrix_w.view(-1, self._num_bodies, num_filters, 3) + self._data.force_matrix_w[env_ids] = force_matrix_w[env_ids] + if self.cfg.history_length > 0: + self._data.force_matrix_w_history[env_ids] = self._data.force_matrix_w_history[env_ids].roll(1, dims=1) + self._data.force_matrix_w_history[env_ids, 0] = self._data.force_matrix_w[env_ids] + + # obtain the pose of the sensor origin + if self.cfg.track_pose: + pose = self.body_physx_view.get_transforms().view(-1, self._num_bodies, 7)[env_ids] + self._data.pos_w[env_ids], self._data.quat_w[env_ids] = pose.split([3, 4], dim=-1) + + # obtain contact points + if self.cfg.track_contact_points: + _, buffer_contact_points, _, _, buffer_count, buffer_start_indices = self.contact_view.get_contact_data( + dt=self._sim_physics_dt + ) + self._data.contact_pos_w[env_ids] = self._unpack_contact_buffer_data( + buffer_contact_points, buffer_count, buffer_start_indices + )[env_ids] + + # obtain friction forces + if self.cfg.track_friction_forces: + friction_forces, _, buffer_count, buffer_start_indices = self.contact_view.get_friction_data( + dt=self._sim_physics_dt + ) + self._data.friction_forces_w[env_ids] = self._unpack_contact_buffer_data( + friction_forces, buffer_count, buffer_start_indices, avg=False, default=0.0 + )[env_ids] + + # obtain the air time + if self.cfg.track_air_time: + # -- time elapsed since last update + # since this function is called every frame, we can use the difference to get the elapsed time + elapsed_time = self._timestamp[env_ids] - self._timestamp_last_update[env_ids] + # -- check contact state of bodies + is_contact = torch.norm(self._data.net_forces_w[env_ids, :, :], dim=-1) > self.cfg.force_threshold + is_first_contact = (self._data.current_air_time[env_ids] > 0) * is_contact + is_first_detached = (self._data.current_contact_time[env_ids] > 0) * ~is_contact + # -- update the last contact time if body has just become in contact + self._data.last_air_time[env_ids] = torch.where( + is_first_contact, + self._data.current_air_time[env_ids] + elapsed_time.unsqueeze(-1), + self._data.last_air_time[env_ids], + ) + # -- increment time for bodies that are not in contact + self._data.current_air_time[env_ids] = torch.where( + ~is_contact, self._data.current_air_time[env_ids] + elapsed_time.unsqueeze(-1), 0.0 + ) + # -- update the last contact time if body has just detached + self._data.last_contact_time[env_ids] = torch.where( + is_first_detached, + self._data.current_contact_time[env_ids] + elapsed_time.unsqueeze(-1), + self._data.last_contact_time[env_ids], + ) + # -- increment time for bodies that are in contact + self._data.current_contact_time[env_ids] = torch.where( + is_contact, self._data.current_contact_time[env_ids] + elapsed_time.unsqueeze(-1), 0.0 + ) + + def _unpack_contact_buffer_data( + self, + contact_data: torch.Tensor, + buffer_count: torch.Tensor, + buffer_start_indices: torch.Tensor, + avg: bool = True, + default: float = float("nan"), + ) -> torch.Tensor: + """ + Unpacks and aggregates contact data for each (env, body, filter) group. + + This function vectorizes the following nested loop: + + for i in range(self._num_bodies * self._num_envs): + for j in range(self.contact_view.filter_count): + start_index_ij = buffer_start_indices[i, j] + count_ij = buffer_count[i, j] + self._contact_position_aggregate_buffer[i, j, :] = torch.mean( + contact_data[start_index_ij : (start_index_ij + count_ij), :], dim=0 + ) + + For more details, see the `RigidContactView.get_contact_data() documentation `_. + + Args: + contact_data: Flat tensor of contact data, shape (N_envs * N_bodies, 3). + buffer_count: Number of contact points per (env, body, filter), shape (N_envs * N_bodies, N_filters). + buffer_start_indices: Start indices for each (env, body, filter), shape (N_envs * N_bodies, N_filters). + avg: If True, average the contact data for each group; if False, sum the data. Defaults to True. + default: Default value to use for groups with zero contacts. Defaults to NaN. + + Returns: + Aggregated contact data, shape (N_envs, N_bodies, N_filters, 3). + """ + counts, starts = buffer_count.view(-1), buffer_start_indices.view(-1) + n_rows, total = counts.numel(), int(counts.sum()) + agg = torch.full((n_rows, 3), default, device=self._device, dtype=contact_data.dtype) + if total > 0: + row_ids = torch.repeat_interleave(torch.arange(n_rows, device=self._device), counts) + + block_starts = counts.cumsum(0) - counts + deltas = torch.arange(row_ids.numel(), device=counts.device) - block_starts.repeat_interleave(counts) + flat_idx = starts[row_ids] + deltas + + pts = contact_data.index_select(0, flat_idx) + agg = agg.zero_().index_add_(0, row_ids, pts) + agg = agg / counts.clamp_min(1).unsqueeze(-1) if avg else agg + agg[counts == 0] = default + + return agg.view(self._num_envs * self.num_bodies, -1, 3).view( + self._num_envs, self._num_bodies, self.contact_view.filter_count, 3 + ) + + def _set_debug_vis_impl(self, debug_vis: bool): + # set visibility of markers + # note: parent only deals with callbacks. not their visibility + if debug_vis: + # create markers if necessary for the first time + if not hasattr(self, "contact_visualizer"): + self.contact_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) + # set their visibility to true + self.contact_visualizer.set_visibility(True) + else: + if hasattr(self, "contact_visualizer"): + self.contact_visualizer.set_visibility(False) + + def _debug_vis_callback(self, event): + # safely return if view becomes invalid + # note: this invalidity happens because of isaac sim view callbacks + if self.body_physx_view is None: + return + # marker indices + # 0: contact, 1: no contact + net_contact_force_w = torch.norm(self._data.net_forces_w, dim=-1) + marker_indices = torch.where(net_contact_force_w > self.cfg.force_threshold, 0, 1) + # check if prim is visualized + if self.cfg.track_pose: + frame_origins: torch.Tensor = self._data.pos_w + else: + pose = self.body_physx_view.get_transforms() + frame_origins = pose.view(-1, self._num_bodies, 7)[:, :, :3] + # visualize + self.contact_visualizer.visualize(frame_origins.view(-1, 3), marker_indices=marker_indices.view(-1)) + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + # set all existing views to None to invalidate them + self._body_physx_view = None + self._contact_view = None diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py new file mode 100644 index 00000000000..ac882666f76 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py @@ -0,0 +1,202 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import logging + +import torch + +from isaaclab.sensors.contact_sensor import BaseContactSensorData + +logger = logging.getLogger(__name__) + + +class ContactSensorData(BaseContactSensorData): + """Data container for the PhysX contact reporting sensor.""" + + @property + def pose_w(self) -> torch.Tensor | None: + """Pose of the sensor origin in world frame. Shape is (N, 7). Quaternion in wxyz order.""" + logger.warning( + "The `pose_w` property will be deprecated in a future release. Please use a dedicated sensor to measure" + "sensor poses in world frame." + ) + return torch.cat([self._pos_w, self._quat_w], dim=-1) + + @property + def pos_w(self) -> torch.Tensor | None: + """Position of the sensor origin in world frame. Shape is (N, 3). + + None if :attr:`ContactSensorCfg.track_pose` is False. + """ + logger.warning( + "The `pos_w` property will be deprecated in a future release. Please use a dedicated sensor to measure" + "sensor positions in world frame." + ) + return self._pos_w + + @property + def quat_w(self) -> torch.Tensor | None: + """Orientation of the sensor origin in quaternion (x, y, z, w) in world frame. Shape is (N, 4). + + None if :attr:`ContactSensorCfg.track_pose` is False. + """ + logger.warning( + "The `quat_w` property will be deprecated in a future release. Please use a dedicated sensor to measure" + "sensor orientations in world frame." + ) + return self._quat_w + + @property + def net_forces_w(self) -> torch.Tensor | None: + """The net normal contact forces in world frame. Shape is (N, B, 3).""" + return self._net_forces_w + + @property + def net_forces_w_history(self) -> torch.Tensor | None: + """History of net normal contact forces. Shape is (N, T, B, 3).""" + return self._net_forces_w_history + + @property + def force_matrix_w(self) -> torch.Tensor | None: + """Normal contact forces filtered between sensor and filtered bodies. Shape is (N, B, M, 3). + + None if :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty. + """ + return self._force_matrix_w + + @property + def force_matrix_w_history(self) -> torch.Tensor | None: + """History of filtered contact forces. Shape is (N, T, B, M, 3). + + None if :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty. + """ + return self._force_matrix_w_history + + @property + def contact_pos_w(self) -> torch.Tensor | None: + """Average position of contact points. Shape is (N, B, M, 3). + + None if :attr:`ContactSensorCfg.track_contact_points` is False. + """ + return self._contact_pos_w + + @property + def friction_forces_w(self) -> torch.Tensor | None: + """Sum of friction forces. Shape is (N, B, M, 3). + + None if :attr:`ContactSensorCfg.track_friction_forces` is False. + """ + return self._friction_forces_w + + @property + def last_air_time(self) -> torch.Tensor | None: + """Time spent in air before last contact. Shape is (N, B). + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + return self._last_air_time + + @property + def current_air_time(self) -> torch.Tensor | None: + """Time spent in air since last detach. Shape is (N, B). + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + return self._current_air_time + + @property + def last_contact_time(self) -> torch.Tensor | None: + """Time spent in contact before last detach. Shape is (N, B). + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + return self._last_contact_time + + @property + def current_contact_time(self) -> torch.Tensor | None: + """Time spent in contact since last contact. Shape is (N, B). + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + return self._current_contact_time + + def create_buffers( + self, + num_envs: int, + num_bodies: int, + num_filter_shapes: int, + history_length: int, + track_pose: bool, + track_air_time: bool, + track_contact_points: bool, + track_friction_forces: bool, + device: str, + ) -> None: + """Create internal buffers for sensor data. + + Args: + num_envs: Number of environments. + num_bodies: Number of bodies per environment. + num_filter_shapes: Number of filtered shapes for force matrix. + history_length: Length of force history buffer. + track_pose: Whether to track sensor pose. + track_air_time: Whether to track air/contact time. + track_contact_points: Whether to track contact points. + track_friction_forces: Whether to track friction forces. + device: Device for tensor storage. + """ + # Net forces (always tracked) + self._net_forces_w = torch.zeros(num_envs, num_bodies, 3, device=device) + if history_length > 0: + self._net_forces_w_history = torch.zeros(num_envs, history_length, num_bodies, 3, device=device) + else: + self._net_forces_w_history = self._net_forces_w.unsqueeze(1) + + # Force matrix (optional - only with filter) + if num_filter_shapes > 0: + self._force_matrix_w = torch.zeros(num_envs, num_bodies, num_filter_shapes, 3, device=device) + if history_length > 0: + self._force_matrix_w_history = torch.zeros( + num_envs, history_length, num_bodies, num_filter_shapes, 3, device=device + ) + else: + self._force_matrix_w_history = self._force_matrix_w.unsqueeze(1) + else: + self._force_matrix_w = None + self._force_matrix_w_history = None + + # Pose tracking (optional) + if track_pose: + self._pos_w = torch.zeros(num_envs, num_bodies, 3, device=device) + self._quat_w = torch.zeros(num_envs, num_bodies, 4, device=device) + else: + self._pos_w = None + self._quat_w = None + + # Air/contact time tracking (optional) + if track_air_time: + self._last_air_time = torch.zeros(num_envs, num_bodies, device=device) + self._current_air_time = torch.zeros(num_envs, num_bodies, device=device) + self._last_contact_time = torch.zeros(num_envs, num_bodies, device=device) + self._current_contact_time = torch.zeros(num_envs, num_bodies, device=device) + else: + self._last_air_time = None + self._current_air_time = None + self._last_contact_time = None + self._current_contact_time = None + + # Contact points (optional) + if track_contact_points: + self._contact_pos_w = torch.full((num_envs, num_bodies, num_filter_shapes, 3), torch.nan, device=device) + else: + self._contact_pos_w = None + + # Friction forces (optional) + if track_friction_forces: + self._friction_forces_w = torch.zeros(num_envs, num_bodies, num_filter_shapes, 3, device=device) + else: + self._friction_forces_w = None diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py new file mode 100644 index 00000000000..dde1954a4ad --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for PhysX frame transformer sensor.""" + +from .frame_transformer import FrameTransformer +from .frame_transformer_data import FrameTransformerData + +__all__ = ["FrameTransformer", "FrameTransformerData"] diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer.py b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer.py new file mode 100644 index 00000000000..28d45b67f91 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer.py @@ -0,0 +1,545 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import logging +import re +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch + +from isaacsim.core.simulation_manager import SimulationManager +from pxr import UsdPhysics + +import isaaclab.sim as sim_utils +from isaaclab.markers import VisualizationMarkers +from isaaclab.sensors.frame_transformer import BaseFrameTransformer +from isaaclab.utils.math import ( + combine_frame_transforms, + is_identity_pose, + normalize, + quat_from_angle_axis, + subtract_frame_transforms, +) + +from .frame_transformer_data import FrameTransformerData + +if TYPE_CHECKING: + from isaaclab.sensors.frame_transformer import FrameTransformerCfg + +# import logger +logger = logging.getLogger(__name__) + + +class FrameTransformer(BaseFrameTransformer): + """A PhysX sensor for reporting frame transforms. + + This class provides an interface for reporting the transform of one or more frames (target frames) + with respect to another frame (source frame). The source frame is specified by the user as a prim path + (:attr:`FrameTransformerCfg.prim_path`) and the target frames are specified by the user as a list of + prim paths (:attr:`FrameTransformerCfg.target_frames`). + + The source frame and target frames are assumed to be rigid bodies. The transform of the target frames + with respect to the source frame is computed by first extracting the transform of the source frame + and target frames from the physics engine and then computing the relative transform between the two. + + Additionally, the user can specify an offset for the source frame and each target frame. This is useful + for specifying the transform of the desired frame with respect to the body's center of mass, for instance. + + A common example of using this sensor is to track the position and orientation of the end effector of a + robotic manipulator. In this case, the source frame would be the body corresponding to the base frame of the + manipulator, and the target frame would be the body corresponding to the end effector. Since the end-effector is + typically a fictitious body, the user may need to specify an offset from the end-effector to the body of the + manipulator. + + """ + + cfg: FrameTransformerCfg + """The configuration parameters.""" + + __backend_name__: str = "physx" + """The name of the backend for the frame transformer sensor.""" + + def __init__(self, cfg: FrameTransformerCfg): + """Initializes the frame transformer object. + + Args: + cfg: The configuration parameters. + """ + # initialize base class + super().__init__(cfg) + # Create empty variables for storing output data + self._data: FrameTransformerData = FrameTransformerData() + + def __str__(self) -> str: + """Returns: A string containing information about the instance.""" + return ( + f"FrameTransformer @ '{self.cfg.prim_path}': \n" + f"\ttracked body frames: {[self._source_frame_body_name] + self._target_frame_body_names} \n" + f"\tnumber of envs: {self._num_envs}\n" + f"\tsource body frame: {self._source_frame_body_name}\n" + f"\ttarget frames (count: {self._target_frame_names}): {len(self._target_frame_names)}\n" + ) + + """ + Properties + """ + + @property + def data(self) -> FrameTransformerData: + # update sensors if needed + self._update_outdated_buffers() + # return the data + return self._data + + @property + def num_bodies(self) -> int: + """Returns the number of target bodies being tracked. + + .. note:: + This is an alias used for consistency with other sensors. Otherwise, we recommend using + :attr:`len(data.target_frame_names)` to access the number of target frames. + """ + return len(self._target_frame_body_names) + + @property + def body_names(self) -> list[str]: + """Returns the names of the target bodies being tracked. + + .. note:: + This is an alias used for consistency with other sensors. Otherwise, we recommend using + :attr:`data.target_frame_names` to access the target frame names. + """ + return self._target_frame_body_names + + """ + Operations + """ + + def reset(self, env_ids: Sequence[int] | None = None): + # reset the timers and counters + super().reset(env_ids) + # resolve None + if env_ids is None: + env_ids = ... + + """ + Implementation. + """ + + def _initialize_impl(self): + super()._initialize_impl() + + # resolve source frame offset + source_frame_offset_pos = torch.tensor(self.cfg.source_frame_offset.pos, device=self.device) + source_frame_offset_quat = torch.tensor(self.cfg.source_frame_offset.rot, device=self.device) + # Only need to perform offsetting of source frame if the position offsets is non-zero and rotation offset is + # not the identity quaternion for efficiency in _update_buffer_impl + self._apply_source_frame_offset = True + # Handle source frame offsets + if is_identity_pose(source_frame_offset_pos, source_frame_offset_quat): + logger.debug(f"No offset application needed for source frame as it is identity: {self.cfg.prim_path}") + self._apply_source_frame_offset = False + else: + logger.debug(f"Applying offset to source frame as it is not identity: {self.cfg.prim_path}") + # Store offsets as tensors (duplicating each env's offsets for ease of multiplication later) + self._source_frame_offset_pos = source_frame_offset_pos.unsqueeze(0).repeat(self._num_envs, 1) + self._source_frame_offset_quat = source_frame_offset_quat.unsqueeze(0).repeat(self._num_envs, 1) + + # Keep track of mapping from the rigid body name to the desired frames and prim path, + # as there may be multiple frames based upon the same body name and we don't want to + # create unnecessary views. + body_names_to_frames: dict[str, dict[str, set[str] | str]] = {} + # The offsets associated with each target frame + target_offsets: dict[str, dict[str, torch.Tensor]] = {} + # The frames whose offsets are not identity + non_identity_offset_frames: list[str] = [] + + # Only need to perform offsetting of target frame if any of the position offsets are non-zero or any of the + # rotation offsets are not the identity quaternion for efficiency in _update_buffer_impl + self._apply_target_frame_offset = False + + # Need to keep track of whether the source frame is also a target frame + self._source_is_also_target_frame = False + + # Collect all target frames, their associated body prim paths and their offsets so that we can extract + # the prim, check that it has the appropriate rigid body API in a single loop. + # First element is None because user can't specify source frame name + frames = [None] + [target_frame.name for target_frame in self.cfg.target_frames] + frame_prim_paths = [self.cfg.prim_path] + [target_frame.prim_path for target_frame in self.cfg.target_frames] + # First element is None because source frame offset is handled separately + frame_offsets = [None] + [target_frame.offset for target_frame in self.cfg.target_frames] + frame_types = ["source"] + ["target"] * len(self.cfg.target_frames) + for frame, prim_path, offset, frame_type in zip(frames, frame_prim_paths, frame_offsets, frame_types): + # Find correct prim + matching_prims = sim_utils.find_matching_prims(prim_path) + if len(matching_prims) == 0: + raise ValueError( + f"Failed to create frame transformer for frame '{frame}' with path '{prim_path}'." + " No matching prims were found." + ) + for prim in matching_prims: + # Get the prim path of the matching prim + matching_prim_path = prim.GetPath().pathString + # Check if it is a rigid prim + if not prim.HasAPI(UsdPhysics.RigidBodyAPI): + raise ValueError( + f"While resolving expression '{prim_path}' found a prim '{matching_prim_path}' which is not a" + " rigid body. The class only supports transformations between rigid bodies." + ) + + # Get the name of the body: use relative prim path for unique identification + body_name = self._get_relative_body_path(matching_prim_path) + # Use leaf name of prim path if frame name isn't specified by user + frame_name = frame if frame is not None else matching_prim_path.rsplit("/", 1)[-1] + + # Keep track of which frames are associated with which bodies + if body_name in body_names_to_frames: + body_names_to_frames[body_name]["frames"].add(frame_name) + + # This is a corner case where the source frame is also a target frame + if body_names_to_frames[body_name]["type"] == "source" and frame_type == "target": + self._source_is_also_target_frame = True + + else: + # Store the first matching prim path and the type of frame + body_names_to_frames[body_name] = { + "frames": {frame_name}, + "prim_path": matching_prim_path, + "type": frame_type, + } + + if offset is not None: + offset_pos = torch.tensor(offset.pos, device=self.device) + offset_quat = torch.tensor(offset.rot, device=self.device) + # Check if we need to apply offsets (optimized code path in _update_buffer_impl) + if not is_identity_pose(offset_pos, offset_quat): + non_identity_offset_frames.append(frame_name) + self._apply_target_frame_offset = True + target_offsets[frame_name] = {"pos": offset_pos, "quat": offset_quat} + + if not self._apply_target_frame_offset: + logger.info( + f"No offsets application needed from '{self.cfg.prim_path}' to target frames as all" + f" are identity: {frames[1:]}" + ) + else: + logger.info( + f"Offsets application needed from '{self.cfg.prim_path}' to the following target frames:" + f" {non_identity_offset_frames}" + ) + + # The names of bodies that RigidPrim will be tracking to later extract transforms from + tracked_prim_paths = [body_names_to_frames[body_name]["prim_path"] for body_name in body_names_to_frames.keys()] + tracked_body_names = [body_name for body_name in body_names_to_frames.keys()] + + body_names_regex = [tracked_prim_path.replace("env_0", "env_*") for tracked_prim_path in tracked_prim_paths] + + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + # Create a prim view for all frames and initialize it + # order of transforms coming out of view will be source frame followed by target frame(s) + self._frame_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_regex) + + # Determine the order in which regex evaluated body names so we can later index into frame transforms + # by frame name correctly + all_prim_paths = self._frame_physx_view.prim_paths + + if "env_" in all_prim_paths[0]: + + def extract_env_num_and_prim_path(item: str) -> tuple[int, str]: + """Separates the environment number and prim_path from the item. + + Args: + item: The item to extract the environment number from. Assumes item is of the form + `/World/envs/env_1/blah` or `/World/envs/env_11/blah`. + Returns: + The environment number and the prim_path. + """ + match = re.search(r"env_(\d+)(.*)", item) + return (int(match.group(1)), match.group(2)) + + # Find the indices that would reorganize output to be per environment. + # We want `env_1/blah` to come before `env_11/blah` and env_1/Robot/base + # to come before env_1/Robot/foot so we need to use custom key function + self._per_env_indices = [ + index + for index, _ in sorted( + list(enumerate(all_prim_paths)), key=lambda x: extract_env_num_and_prim_path(x[1]) + ) + ] + + # Only need 0th env as the names and their ordering are the same across environments + sorted_prim_paths = [ + all_prim_paths[index] for index in self._per_env_indices if "env_0" in all_prim_paths[index] + ] + + else: + # If no environment is present, then the order of the body names is the same as the order of the + # prim paths sorted alphabetically + self._per_env_indices = [index for index, _ in sorted(enumerate(all_prim_paths), key=lambda x: x[1])] + sorted_prim_paths = [all_prim_paths[index] for index in self._per_env_indices] + + # -- target frames: use relative prim path for unique identification + self._target_frame_body_names = [self._get_relative_body_path(prim_path) for prim_path in sorted_prim_paths] + + # -- source frame: use relative prim path for unique identification + self._source_frame_body_name = self._get_relative_body_path(self.cfg.prim_path) + source_frame_index = self._target_frame_body_names.index(self._source_frame_body_name) + + # Only remove source frame from tracked bodies if it is not also a target frame + if not self._source_is_also_target_frame: + self._target_frame_body_names.remove(self._source_frame_body_name) + + # Determine indices into all tracked body frames for both source and target frames + all_ids = torch.arange(self._num_envs * len(tracked_body_names)) + self._source_frame_body_ids = torch.arange(self._num_envs) * len(tracked_body_names) + source_frame_index + + # If source frame is also a target frame, then the target frame body ids are the same as + # the source frame body ids + if self._source_is_also_target_frame: + self._target_frame_body_ids = all_ids + else: + self._target_frame_body_ids = all_ids[~torch.isin(all_ids, self._source_frame_body_ids)] + + # The name of each of the target frame(s) - either user specified or defaulted to the body name + self._target_frame_names: list[str] = [] + # The position and rotation components of target frame offsets + target_frame_offset_pos = [] + target_frame_offset_quat = [] + # Stores the indices of bodies that need to be duplicated. For instance, if body "LF_SHANK" is needed + # for 2 frames, this list enables us to duplicate the body to both frames when doing the calculations + # when updating sensor in _update_buffers_impl + duplicate_frame_indices = [] + + # Go through each body name and determine the number of duplicates we need for that frame + # and extract the offsets. This is all done to handle the case where multiple frames + # reference the same body, but have different names and/or offsets + for i, body_name in enumerate(self._target_frame_body_names): + for frame in body_names_to_frames[body_name]["frames"]: + # Only need to handle target frames here as source frame is handled separately + if frame in target_offsets: + target_frame_offset_pos.append(target_offsets[frame]["pos"]) + target_frame_offset_quat.append(target_offsets[frame]["quat"]) + self._target_frame_names.append(frame) + duplicate_frame_indices.append(i) + + # To handle multiple environments, need to expand so [0, 1, 1, 2] with 2 environments becomes + # [0, 1, 1, 2, 3, 4, 4, 5]. Again, this is a optimization to make _update_buffer_impl more efficient + duplicate_frame_indices = torch.tensor(duplicate_frame_indices, device=self.device) + if self._source_is_also_target_frame: + num_target_body_frames = len(tracked_body_names) + else: + num_target_body_frames = len(tracked_body_names) - 1 + + self._duplicate_frame_indices = torch.cat( + [duplicate_frame_indices + num_target_body_frames * env_num for env_num in range(self._num_envs)] + ) + + # Target frame offsets are only applied if at least one of the offsets are non-identity + if self._apply_target_frame_offset: + # Stack up all the frame offsets for shape (num_envs, num_frames, 3) and (num_envs, num_frames, 4) + self._target_frame_offset_pos = torch.stack(target_frame_offset_pos).repeat(self._num_envs, 1) + self._target_frame_offset_quat = torch.stack(target_frame_offset_quat).repeat(self._num_envs, 1) + + # Create data buffers + self._data.create_buffers( + num_envs=self._num_envs, + num_target_frames=len(duplicate_frame_indices), + target_frame_names=self._target_frame_names, + device=self._device, + ) + + def _update_buffers_impl(self, env_ids: Sequence[int]): + """Fills the buffers of the sensor data.""" + # default to all sensors + if len(env_ids) == self._num_envs: + env_ids = ... + + # Extract transforms from view - shape is: + # (the total number of source and target body frames being tracked * self._num_envs, 7) + transforms = self._frame_physx_view.get_transforms() + + # Reorder the transforms to be per environment as is expected of SensorData + transforms = transforms[self._per_env_indices] + + # Process source frame transform + source_frames = transforms[self._source_frame_body_ids] + # Only apply offset if the offsets will result in a coordinate frame transform + if self._apply_source_frame_offset: + source_pos_w, source_quat_w = combine_frame_transforms( + source_frames[:, :3], + source_frames[:, 3:], + self._source_frame_offset_pos, + self._source_frame_offset_quat, + ) + else: + source_pos_w = source_frames[:, :3] + source_quat_w = source_frames[:, 3:] + + # Process target frame transforms + target_frames = transforms[self._target_frame_body_ids] + duplicated_target_frame_pos_w = target_frames[self._duplicate_frame_indices, :3] + duplicated_target_frame_quat_w = target_frames[self._duplicate_frame_indices, 3:] + + # Only apply offset if the offsets will result in a coordinate frame transform + if self._apply_target_frame_offset: + target_pos_w, target_quat_w = combine_frame_transforms( + duplicated_target_frame_pos_w, + duplicated_target_frame_quat_w, + self._target_frame_offset_pos, + self._target_frame_offset_quat, + ) + else: + target_pos_w = duplicated_target_frame_pos_w + target_quat_w = duplicated_target_frame_quat_w + + # Compute the transform of the target frame with respect to the source frame + total_num_frames = len(self._target_frame_names) + target_pos_source, target_quat_source = subtract_frame_transforms( + source_pos_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 3), + source_quat_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 4), + target_pos_w, + target_quat_w, + ) + + # Update buffers + # note: The frame names / ordering don't change so no need to update them after initialization + self._data.source_pos_w[:] = source_pos_w.view(-1, 3) + self._data.source_quat_w[:] = source_quat_w.view(-1, 4) + self._data.target_pos_w[:] = target_pos_w.view(-1, total_num_frames, 3) + self._data.target_quat_w[:] = target_quat_w.view(-1, total_num_frames, 4) + self._data.target_pos_source[:] = target_pos_source.view(-1, total_num_frames, 3) + self._data.target_quat_source[:] = target_quat_source.view(-1, total_num_frames, 4) + + def _set_debug_vis_impl(self, debug_vis: bool): + # set visibility of markers + # note: parent only deals with callbacks. not their visibility + if debug_vis: + if not hasattr(self, "frame_visualizer"): + self.frame_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) + + # set their visibility to true + self.frame_visualizer.set_visibility(True) + else: + if hasattr(self, "frame_visualizer"): + self.frame_visualizer.set_visibility(False) + + def _debug_vis_callback(self, event): + # Get the all frames pose + frames_pos = torch.cat([self._data.source_pos_w, self._data.target_pos_w.view(-1, 3)], dim=0) + frames_quat = torch.cat([self._data.source_quat_w, self._data.target_quat_w.view(-1, 4)], dim=0) + + # Get the all connecting lines between frames pose + lines_pos, lines_quat, lines_length = self._get_connecting_lines( + start_pos=self._data.source_pos_w.repeat_interleave(self._data.target_pos_w.size(1), dim=0), + end_pos=self._data.target_pos_w.view(-1, 3), + ) + + # Initialize default (identity) scales and marker indices for all markers (frames + lines) + marker_scales = torch.ones(frames_pos.size(0) + lines_pos.size(0), 3) + marker_indices = torch.zeros(marker_scales.size(0)) + + # Set the z-scale of line markers to represent their actual length + marker_scales[-lines_length.size(0) :, -1] = lines_length + + # Assign marker config index 1 to line markers + marker_indices[-lines_length.size(0) :] = 1 + + # Update the frame and the connecting line visualizer + self.frame_visualizer.visualize( + translations=torch.cat((frames_pos, lines_pos), dim=0), + orientations=torch.cat((frames_quat, lines_quat), dim=0), + scales=marker_scales, + marker_indices=marker_indices, + ) + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + # set all existing views to None to invalidate them + self._frame_physx_view = None + + """ + Internal helpers. + """ + + def _get_connecting_lines( + self, start_pos: torch.Tensor, end_pos: torch.Tensor + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Draws connecting lines between frames. + + Given start and end points, this function computes the positions (mid-point), orientations, + and lengths of the connecting lines. + + Args: + start_pos: The start positions of the connecting lines. Shape is (N, 3). + end_pos: The end positions of the connecting lines. Shape is (N, 3). + + Returns: + A tuple containing: + - The positions of each connecting line. Shape is (N, 3). + - The orientations of each connecting line in quaternion. Shape is (N, 4). + - The lengths of each connecting line. Shape is (N,). + """ + direction = end_pos - start_pos + lengths = torch.norm(direction, dim=-1) + positions = (start_pos + end_pos) / 2 + + # Get default direction (along z-axis) + default_direction = torch.tensor([0.0, 0.0, 1.0], device=self.device).expand(start_pos.size(0), -1) + + # Normalize direction vector + direction_norm = normalize(direction) + + # Calculate rotation from default direction to target direction + rotation_axis = torch.linalg.cross(default_direction, direction_norm) + rotation_axis_norm = torch.norm(rotation_axis, dim=-1) + + # Handle case where vectors are parallel + mask = rotation_axis_norm > 1e-6 + rotation_axis = torch.where( + mask.unsqueeze(-1), + normalize(rotation_axis), + torch.tensor([1.0, 0.0, 0.0], device=self.device).expand(start_pos.size(0), -1), + ) + + # Calculate rotation angle + cos_angle = torch.sum(default_direction * direction_norm, dim=-1) + cos_angle = torch.clamp(cos_angle, -1.0, 1.0) + angle = torch.acos(cos_angle) + orientations = quat_from_angle_axis(angle, rotation_axis) + + return positions, orientations, lengths + + @staticmethod + def _get_relative_body_path(prim_path: str) -> str: + """Extract a normalized body path from a prim path. + + Removes the environment instance segment `/envs/env_/` to normalize paths + across multiple environments, while preserving the `/envs/` prefix to + distinguish environment-scoped paths from non-environment paths. + + Examples: + - '/World/envs/env_0/Robot/torso' -> '/World/envs/Robot/torso' + - '/World/envs/env_123/Robot/left_hand' -> '/World/envs/Robot/left_hand' + - '/World/Robot' -> '/World/Robot' + - '/World/Robot_2/left_hand' -> '/World/Robot_2/left_hand' + + Args: + prim_path: The full prim path. + + Returns: + The prim path with `/envs/env_/` removed, preserving `/envs/`. + """ + pattern = re.compile(r"/envs/env_[^/]+/") + return pattern.sub("/envs/", prim_path) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_data.py new file mode 100644 index 00000000000..e958433bbb6 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_data.py @@ -0,0 +1,87 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch + +from isaaclab.sensors.frame_transformer import BaseFrameTransformerData + + +class FrameTransformerData(BaseFrameTransformerData): + """Data container for the PhysX frame transformer sensor.""" + + @property + def target_frame_names(self) -> list[str]: + """Target frame names (order matches data ordering).""" + return self._target_frame_names + + @property + def target_pose_source(self) -> torch.Tensor: + """Pose of target frame(s) relative to source frame. Shape is (N, M, 7). Quaternion in wxyz order.""" + return torch.cat([self._target_pos_source, self._target_quat_source], dim=-1) + + @property + def target_pos_source(self) -> torch.Tensor: + """Position of target frame(s) relative to source frame. Shape is (N, M, 3).""" + return self._target_pos_source + + @property + def target_quat_source(self) -> torch.Tensor: + """Orientation of target frame(s) relative to source frame (x, y, z, w). Shape is (N, M, 4).""" + return self._target_quat_source + + @property + def target_pose_w(self) -> torch.Tensor: + """Pose of target frame(s) after offset in world frame. Shape is (N, M, 7). Quaternion in xyzw order.""" + return torch.cat([self._target_pos_w, self._target_quat_w], dim=-1) + + @property + def target_pos_w(self) -> torch.Tensor: + """Position of target frame(s) after offset in world frame. Shape is (N, M, 3).""" + return self._target_pos_w + + @property + def target_quat_w(self) -> torch.Tensor: + """Orientation of target frame(s) after offset in world frame (x, y, z, w). Shape is (N, M, 4).""" + return self._target_quat_w + + @property + def source_pose_w(self) -> torch.Tensor: + """Pose of source frame after offset in world frame. Shape is (N, 7). Quaternion in xyzw order.""" + return torch.cat([self._source_pos_w, self._source_quat_w], dim=-1) + + @property + def source_pos_w(self) -> torch.Tensor: + """Position of source frame after offset in world frame. Shape is (N, 3).""" + return self._source_pos_w + + @property + def source_quat_w(self) -> torch.Tensor: + """Orientation of source frame after offset in world frame (x, y, z, w). Shape is (N, 4).""" + return self._source_quat_w + + def create_buffers( + self, + num_envs: int, + num_target_frames: int, + target_frame_names: list[str], + device: str, + ) -> None: + """Create internal buffers for sensor data. + + Args: + num_envs: Number of environments. + num_target_frames: Number of target frames. + target_frame_names: Names of target frames. + device: Device for tensor storage. + """ + self._target_frame_names = target_frame_names + self._source_pos_w = torch.zeros(num_envs, 3, device=device) + self._source_quat_w = torch.zeros(num_envs, 4, device=device) + self._target_pos_w = torch.zeros(num_envs, num_target_frames, 3, device=device) + self._target_quat_w = torch.zeros(num_envs, num_target_frames, 4, device=device) + self._target_pos_source = torch.zeros(num_envs, num_target_frames, 3, device=device) + self._target_quat_source = torch.zeros(num_envs, num_target_frames, 4, device=device) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py new file mode 100644 index 00000000000..d1c50358796 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for PhysX IMU sensor.""" + +from .imu import Imu +from .imu_data import ImuData + +__all__ = ["Imu", "ImuData"] diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu.py b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu.py new file mode 100644 index 00000000000..1d6e2e96753 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu.py @@ -0,0 +1,296 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch + +from isaacsim.core.simulation_manager import SimulationManager +from pxr import UsdGeom, UsdPhysics + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as math_utils +from isaaclab.markers import VisualizationMarkers +from isaaclab.sensors.imu import BaseImu + +from .imu_data import ImuData + +if TYPE_CHECKING: + from isaaclab.sensors.imu import ImuCfg + + +class Imu(BaseImu): + """The PhysX Inertia Measurement Unit (IMU) sensor. + + The sensor can be attached to any prim path with a rigid ancestor in its tree and produces body-frame + linear acceleration and angular velocity, along with world-frame pose and body-frame linear and angular + accelerations/velocities. + + If the provided path is not a rigid body, the closest rigid-body ancestor is used for simulation queries. + The fixed transform from that ancestor to the target prim is computed once during initialization and + composed with the configured sensor offset. + + .. note:: + + We are computing the accelerations using numerical differentiation from the velocities. Consequently, the + IMU sensor accuracy depends on the chosen phsyx timestep. For a sufficient accuracy, we recommend to keep the + timestep at least as 200Hz. + + .. note:: + + The user can configure the sensor offset in the configuration file. The offset is applied relative to the + rigid source prim. If the target prim is not a rigid body, the offset is composed with the fixed transform + from the rigid ancestor to the target prim. The offset is applied in the body frame of the rigid source prim. + The offset is defined as a position vector and a quaternion rotation, which + are applied in the order: position, then rotation. The position is applied as a translation + in the body frame of the rigid source prim, and the rotation is applied as a rotation + in the body frame of the rigid source prim. + + """ + + cfg: ImuCfg + """The configuration parameters.""" + + __backend_name__: str = "physx" + """The name of the backend for the IMU sensor.""" + + def __init__(self, cfg: ImuCfg): + """Initializes the Imu sensor. + + Args: + cfg: The configuration parameters. + """ + # initialize base class + super().__init__(cfg) + # Create empty variables for storing output data + self._data = ImuData() + + # Internal: expression used to build the rigid body view (may be different from cfg.prim_path) + self._rigid_parent_expr: str | None = None + + def __str__(self) -> str: + """Returns: A string containing information about the instance.""" + return ( + f"Imu sensor @ '{self.cfg.prim_path}': \n" + f"\tview type : {self._view.__class__}\n" + f"\tupdate period (s) : {self.cfg.update_period}\n" + f"\tnumber of sensors : {self._view.count}\n" + ) + + """ + Properties + """ + + @property + def data(self) -> ImuData: + # update sensors if needed + self._update_outdated_buffers() + # return the data + return self._data + + @property + def num_instances(self) -> int: + return self._view.count + + """ + Operations + """ + + def reset(self, env_ids: Sequence[int] | None = None): + # reset the timestamps + super().reset(env_ids) + # resolve None + if env_ids is None: + env_ids = slice(None) + # reset accumulative data buffers + self._data.pos_w[env_ids] = 0.0 + self._data.quat_w[env_ids] = 0.0 + self._data.quat_w[env_ids, 0] = 1.0 + self._data.projected_gravity_b[env_ids] = 0.0 + self._data.projected_gravity_b[env_ids, 2] = -1.0 + self._data.lin_vel_b[env_ids] = 0.0 + self._data.ang_vel_b[env_ids] = 0.0 + self._data.lin_acc_b[env_ids] = 0.0 + self._data.ang_acc_b[env_ids] = 0.0 + self._prev_lin_vel_w[env_ids] = 0.0 + self._prev_ang_vel_w[env_ids] = 0.0 + + def update(self, dt: float, force_recompute: bool = False): + # save timestamp + self._dt = dt + # execute updating + super().update(dt, force_recompute) + + """ + Implementation. + """ + + def _initialize_impl(self): + """Initializes the sensor handles and internal buffers. + + - If the target prim path is a rigid body, build the view directly on it. + - Otherwise find the closest rigid-body ancestor, cache the fixed transform from that ancestor + to the target prim, and build the view on the ancestor expression. + """ + # Initialize parent class + super()._initialize_impl() + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + # check if the prim at path is a rigid prim + prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) + if prim is None: + raise RuntimeError(f"Failed to find a prim at path expression: {self.cfg.prim_path}") + + # Find the first matching ancestor prim that implements rigid body API + ancestor_prim = sim_utils.get_first_matching_ancestor_prim( + prim.GetPath(), predicate=lambda _prim: _prim.HasAPI(UsdPhysics.RigidBodyAPI) + ) + if ancestor_prim is None: + raise RuntimeError(f"Failed to find a rigid body ancestor prim at path expression: {self.cfg.prim_path}") + # Convert ancestor prim path to expression + if ancestor_prim == prim: + self._rigid_parent_expr = self.cfg.prim_path + fixed_pos_b, fixed_quat_b = None, None + else: + # Convert ancestor prim path to expression + relative_path = prim.GetPath().MakeRelativePath(ancestor_prim.GetPath()).pathString + self._rigid_parent_expr = self.cfg.prim_path.replace(relative_path, "") + # Resolve the relative pose between the target prim and the ancestor prim + fixed_pos_b, fixed_quat_b = sim_utils.resolve_prim_pose(prim, ancestor_prim) + + # Create the rigid body view on the ancestor + self._view = self._physics_sim_view.create_rigid_body_view(self._rigid_parent_expr.replace(".*", "*")) + + # Get world gravity + gravity = self._physics_sim_view.get_gravity() + gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) + gravity_dir = math_utils.normalize(gravity_dir.unsqueeze(0)).squeeze(0) + self.GRAVITY_VEC_W = gravity_dir.repeat(self.num_instances, 1) + + # Create internal buffers + self._initialize_buffers_impl() + + # Compose the configured offset with the fixed ancestor->target transform (done once) + # new_offset = fixed * cfg.offset + # where composition is: p = p_fixed + R_fixed * p_cfg, q = q_fixed * q_cfg + if fixed_pos_b is not None and fixed_quat_b is not None: + # Broadcast fixed transform across instances + fixed_p = torch.tensor(fixed_pos_b, device=self._device).repeat(self._view.count, 1) + fixed_q = torch.tensor(fixed_quat_b, device=self._device).repeat(self._view.count, 1) + + cfg_p = self._offset_pos_b.clone() + cfg_q = self._offset_quat_b.clone() + + composed_p = fixed_p + math_utils.quat_apply(fixed_q, cfg_p) + composed_q = math_utils.quat_mul(fixed_q, cfg_q) + + self._offset_pos_b = composed_p + self._offset_quat_b = composed_q + + def _update_buffers_impl(self, env_ids: Sequence[int]): + """Fills the buffers of the sensor data.""" + + # default to all sensors + if len(env_ids) == self._num_envs: + env_ids = slice(None) + # world pose of the rigid source (ancestor) from the PhysX view + pos_w, quat_w = self._view.get_transforms()[env_ids].split([3, 4], dim=-1) + + # sensor pose in world: apply composed offset + self._data.pos_w[env_ids] = pos_w + math_utils.quat_apply(quat_w, self._offset_pos_b[env_ids]) + self._data.quat_w[env_ids] = math_utils.quat_mul(quat_w, self._offset_quat_b[env_ids]) + + # COM of rigid source (body frame) + com_pos_b = self._view.get_coms().to(self.device).split([3, 4], dim=-1)[0] + + # Velocities at rigid source COM + lin_vel_w, ang_vel_w = self._view.get_velocities()[env_ids].split([3, 3], dim=-1) + + # If sensor offset or COM != link origin, account for angular velocity contribution + lin_vel_w += torch.linalg.cross( + ang_vel_w, math_utils.quat_apply(quat_w, self._offset_pos_b[env_ids] - com_pos_b[env_ids]), dim=-1 + ) + + # numerical derivative (world frame) + lin_acc_w = (lin_vel_w - self._prev_lin_vel_w[env_ids]) / self._dt + self._gravity_bias_w[env_ids] + ang_acc_w = (ang_vel_w - self._prev_ang_vel_w[env_ids]) / self._dt + + # batch rotate world->body using current sensor orientation + dynamics_data = torch.stack((lin_vel_w, ang_vel_w, lin_acc_w, ang_acc_w, self.GRAVITY_VEC_W[env_ids]), dim=0) + dynamics_data_rot = math_utils.quat_apply_inverse(self._data.quat_w[env_ids].repeat(5, 1), dynamics_data).chunk( + 5, dim=0 + ) + # store the velocities. + self._data.lin_vel_b[env_ids] = dynamics_data_rot[0] + self._data.ang_vel_b[env_ids] = dynamics_data_rot[1] + # store the accelerations + self._data.lin_acc_b[env_ids] = dynamics_data_rot[2] + self._data.ang_acc_b[env_ids] = dynamics_data_rot[3] + # store projected gravity + self._data.projected_gravity_b[env_ids] = dynamics_data_rot[4] + + self._prev_lin_vel_w[env_ids] = lin_vel_w + self._prev_ang_vel_w[env_ids] = ang_vel_w + + def _initialize_buffers_impl(self): + """Create buffers for storing data.""" + # Create data buffers via data class + self._data.create_buffers(num_envs=self._view.count, device=self._device) + + # Sensor-internal buffers for velocity tracking (not exposed via data) + self._prev_lin_vel_w = torch.zeros(self._view.count, 3, device=self._device) + self._prev_ang_vel_w = torch.zeros(self._view.count, 3, device=self._device) + + # Store sensor offset (applied relative to rigid source). + # This may be composed later with a fixed ancestor->target transform. + self._offset_pos_b = torch.tensor(list(self.cfg.offset.pos), device=self._device).repeat(self._view.count, 1) + self._offset_quat_b = torch.tensor(list(self.cfg.offset.rot), device=self._device).repeat(self._view.count, 1) + # Set gravity bias + self._gravity_bias_w = torch.tensor(list(self.cfg.gravity_bias), device=self._device).repeat( + self._view.count, 1 + ) + + def _set_debug_vis_impl(self, debug_vis: bool): + # set visibility of markers + # note: parent only deals with callbacks. not their visibility + if debug_vis: + # create markers if necessary for the first time + if not hasattr(self, "acceleration_visualizer"): + self.acceleration_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) + # set their visibility to true + self.acceleration_visualizer.set_visibility(True) + else: + if hasattr(self, "acceleration_visualizer"): + self.acceleration_visualizer.set_visibility(False) + + def _debug_vis_callback(self, event): + # safely return if view becomes invalid + # note: this invalidity happens because of isaac sim view callbacks + if self._view is None: + return + # get marker location + # -- base state + base_pos_w = self._data.pos_w.clone() + base_pos_w[:, 2] += 0.5 + # -- resolve the scales + default_scale = self.acceleration_visualizer.cfg.markers["arrow"].scale + arrow_scale = torch.tensor(default_scale, device=self.device).repeat(self._data.lin_acc_b.shape[0], 1) + # get up axis of current stage + up_axis = UsdGeom.GetStageUpAxis(self.stage) + # arrow-direction + quat_opengl = math_utils.quat_from_matrix( + math_utils.create_rotation_matrix_from_view( + self._data.pos_w, + self._data.pos_w + math_utils.quat_apply(self._data.quat_w, self._data.lin_acc_b), + up_axis=up_axis, + device=self._device, + ) + ) + quat_w = math_utils.convert_camera_frame_orientation_convention(quat_opengl, "opengl", "world") + # display markers + self.acceleration_visualizer.visualize(base_pos_w, quat_w, arrow_scale) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py new file mode 100644 index 00000000000..5f20a156930 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py @@ -0,0 +1,86 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import logging + +import torch + +from isaaclab.sensors.imu import BaseImuData + +logger = logging.getLogger(__name__) + + +class ImuData(BaseImuData): + """Data container for the PhysX Imu sensor.""" + + @property + def pose_w(self) -> torch.Tensor: + """Pose of the sensor origin in world frame. Shape is (N, 7). Quaternion in xyzw order.""" + logger.warning( + "The `pose_w` property will be deprecated in a future release. Please use a dedicated sensor to measure" + "sensor poses in world frame." + ) + return torch.cat((self._pos_w, self._quat_w), dim=-1) + + @property + def pos_w(self) -> torch.Tensor: + """Position of the sensor origin in world frame. Shape is (N, 3).""" + logger.warning( + "The `pos_w` property will be deprecated in a future release. Please use a dedicated sensor to measure" + "sensor positions in world frame." + ) + return self._pos_w + + @property + def quat_w(self) -> torch.Tensor: + """Orientation of the sensor origin in quaternion (x, y, z, w) in world frame. Shape is (N, 4).""" + logger.warning( + "The `quat_w` property will be deprecated in a future release. Please use a dedicated sensor to measure" + "sensor orientations in world frame." + ) + return self._quat_w + + @property + def projected_gravity_b(self) -> torch.Tensor: + """Gravity direction unit vector projected on the imu frame. Shape is (N, 3).""" + return self._projected_gravity_b + + @property + def lin_vel_b(self) -> torch.Tensor: + """IMU frame linear velocity relative to the world expressed in IMU frame. Shape is (N, 3).""" + return self._lin_vel_b + + @property + def ang_vel_b(self) -> torch.Tensor: + """IMU frame angular velocity relative to the world expressed in IMU frame. Shape is (N, 3).""" + return self._ang_vel_b + + @property + def lin_acc_b(self) -> torch.Tensor: + """IMU frame linear acceleration relative to the world expressed in IMU frame. Shape is (N, 3).""" + return self._lin_acc_b + + @property + def ang_acc_b(self) -> torch.Tensor: + """IMU frame angular acceleration relative to the world expressed in IMU frame. Shape is (N, 3).""" + return self._ang_acc_b + + def create_buffers(self, num_envs: int, device: str) -> None: + """Create internal buffers for sensor data. + + Args: + num_envs: Number of environments. + device: Device for tensor storage. + """ + self._pos_w = torch.zeros(num_envs, 3, device=device) + self._quat_w = torch.zeros(num_envs, 4, device=device) + self._quat_w[:, 0] = 1.0 + self._projected_gravity_b = torch.zeros(num_envs, 3, device=device) + self._lin_vel_b = torch.zeros(num_envs, 3, device=device) + self._ang_vel_b = torch.zeros(num_envs, 3, device=device) + self._lin_acc_b = torch.zeros(num_envs, 3, device=device) + self._ang_acc_b = torch.zeros(num_envs, 3, device=device) diff --git a/source/isaaclab/test/sensors/check_contact_sensor.py b/source/isaaclab_physx/test/sensors/check_contact_sensor.py similarity index 100% rename from source/isaaclab/test/sensors/check_contact_sensor.py rename to source/isaaclab_physx/test/sensors/check_contact_sensor.py diff --git a/source/isaaclab/test/sensors/check_imu_sensor.py b/source/isaaclab_physx/test/sensors/check_imu_sensor.py similarity index 100% rename from source/isaaclab/test/sensors/check_imu_sensor.py rename to source/isaaclab_physx/test/sensors/check_imu_sensor.py diff --git a/source/isaaclab/test/sensors/test_contact_sensor.py b/source/isaaclab_physx/test/sensors/test_contact_sensor.py similarity index 98% rename from source/isaaclab/test/sensors/test_contact_sensor.py rename to source/isaaclab_physx/test/sensors/test_contact_sensor.py index ed376f97f2d..468047201ef 100644 --- a/source/isaaclab/test/sensors/test_contact_sensor.py +++ b/source/isaaclab_physx/test/sensors/test_contact_sensor.py @@ -291,8 +291,8 @@ def test_cube_stack_contact_filtering(setup_simulation, device, num_envs): contact_sensor_2 = scene["contact_sensor_2"] # Check that contact processing is enabled - assert contact_sensor.contact_physx_view.filter_count == 1 - assert contact_sensor_2.contact_physx_view.filter_count == 1 + assert contact_sensor.contact_view.filter_count == 1 + assert contact_sensor_2.contact_view.filter_count == 1 # Play the simulation scene.reset() @@ -357,8 +357,8 @@ def test_no_contact_reporting(setup_simulation): contact_sensor_2: ContactSensor = scene["contact_sensor_2"] # Check buffers have the right size - assert contact_sensor.contact_physx_view.filter_count == 1 - assert contact_sensor_2.contact_physx_view.filter_count == 1 + assert contact_sensor.contact_view.filter_count == 1 + assert contact_sensor_2.contact_view.filter_count == 1 # Reset the contact sensors scene.reset() @@ -486,7 +486,7 @@ def test_friction_reporting(setup_simulation, grav_dir): _perform_sim_step(sim, scene, sim_dt) # check that forces are being reported match expected friction forces - expected_friction, _, _, _ = scene["contact_sensor"].contact_physx_view.get_friction_data(dt=sim_dt) + expected_friction, _, _, _ = scene["contact_sensor"].contact_view.get_friction_data(dt=sim_dt) reported_friction = scene["contact_sensor"].data.friction_forces_w[0, 0, :] torch.testing.assert_close(expected_friction.sum(dim=0), reported_friction[0], atol=1e-6, rtol=1e-5) @@ -752,11 +752,11 @@ def _test_friction_forces(shape: RigidObject, sensor: ContactSensor, mode: Conta # compare friction forces if mode == ContactTestMode.IN_CONTACT: assert torch.any(torch.abs(sensor._data.friction_forces_w) > 1e-5).item() - friction_forces, _, buffer_count, buffer_start_indices = sensor.contact_physx_view.get_friction_data( + friction_forces, _, buffer_count, buffer_start_indices = sensor.contact_view.get_friction_data( dt=sensor._sim_physics_dt ) for i in range(sensor.num_instances * num_bodies): - for j in range(sensor.contact_physx_view.filter_count): + for j in range(sensor.contact_view.filter_count): start_index_ij = buffer_start_indices[i, j] count_ij = buffer_count[i, j] force = torch.sum(friction_forces[start_index_ij : (start_index_ij + count_ij), :], dim=0) diff --git a/source/isaaclab/test/sensors/test_frame_transformer.py b/source/isaaclab_physx/test/sensors/test_frame_transformer.py similarity index 100% rename from source/isaaclab/test/sensors/test_frame_transformer.py rename to source/isaaclab_physx/test/sensors/test_frame_transformer.py diff --git a/source/isaaclab/test/sensors/test_imu.py b/source/isaaclab_physx/test/sensors/test_imu.py similarity index 100% rename from source/isaaclab/test/sensors/test_imu.py rename to source/isaaclab_physx/test/sensors/test_imu.py diff --git a/source/isaaclab/test/sensors/urdfs/simple_2_link.urdf b/source/isaaclab_physx/test/sensors/urdfs/simple_2_link.urdf similarity index 100% rename from source/isaaclab/test/sensors/urdfs/simple_2_link.urdf rename to source/isaaclab_physx/test/sensors/urdfs/simple_2_link.urdf